gt stringclasses 1 value | context stringlengths 2.49k 119k |
|---|---|
#!/usr/bin/env python
"""
Creates distance restraints to lock several chains together. Useful to avoid unnatural
flexibility or movement due to sequence/numbering gaps.
"""
import logging
import sys
import random
import itertools
# Set random seed to have reproducibility
random.seed(917)
# Functions/Methods
def calc_euclidean(i, j):
return ((j[0]-i[0])**2 + (j[1]-i[1])**2 + (j[2]-i[2])**2)**0.5
def read_structure(pdbf, exclude=None):
"""
Reads a PDB file and returns a list of parsed atoms
"""
_atoms = {'CA', 'P'} # Alpha-Carbon (Prot), Backbone Phosphorous (DNA)
_altloc = {' ', 'A'}
if not exclude:
exclude = set()
else:
exclude = set(exclude)
res_list = []
with open(pdbf, 'r') as pdb_handle:
for line in pdb_handle:
field = line[0:4]
if field != 'ATOM':
continue
aname = line[12:16].strip()
chain = line[21] if line[21].strip() else line[72:76].strip() # chain ID or segID
if chain not in exclude and aname in _atoms and line[16] in _altloc:
resi = int(line[22:26])
coords = (float(line[30:38]), float(line[38:46]), float(line[46:54]))
res_list.append((chain, resi, aname, coords))
if not res_list:
logging.critical('[!] PDB File seems empty or no CA/P atoms found: {0}'.format(pdbf))
sys.exit(1)
return res_list
def get_bodies(atom_lst, prot_threshold=4.0, dna_threshold=7.5):
"""
Determines gaps in an atom list following simple distance based criteria.
Returns continuous fragments.
"""
bodies = []
threshold = {'CA': prot_threshold, 'P': dna_threshold}
body_start = 0
i = None
for i, atom in enumerate(atom_lst[1:], start=1):
p_atom = atom_lst[i-1]
chain, resi, aname, xyz = atom
p_chain, p_resi, p_aname, p_xyz = p_atom
if (chain == p_chain) and (aname == p_aname): # Internal Gap
d_xyz = calc_euclidean(xyz, p_xyz)
if d_xyz >= threshold[aname]:
logging.debug('[+++] (Internal) Body: {0}:{1}'.format(body_start, i-1))
bodies.append((body_start, i-1))
body_start = i # Set new beginning
elif (chain != p_chain) or (aname != p_aname): # Different molecules/types
logging.debug('[+++] Body: {0}:{1}'.format(body_start, i-1))
bodies.append((body_start, i-1))
body_start = i # Set new beginning
if not bodies: # Single continuous molecule
bodies.append((0, len(atom_lst)))
else:
logging.debug('[+++] Body: {0}:{1}'.format(body_start, i))
bodies.append((body_start, i)) # Last body
logging.info('[++] Found {0} bodies'.format(len(bodies)))
return bodies
def build_restraints(bodies):
"""
Generates distance restraints to maintain the relative
orientation of the different bodies during the simulations.
Generates two unique restraints per pair of bodies.
Each restraint is created using two random atoms on each body
and using their exact euclidean distance as target distance.
"""
def pick_residues(body, max_trials=10):
# Pick two random residues in each body
# Make sure they are far apart from each other
n_trials = 0
while 1:
try:
res_i, res_ii = random.sample(body, 2)
except ValueError:
# Likely, sample size is 1
logging.warning('[!] One-sized body found. This may lead to problems..')
return body[0], body[0]
logging.debug('[+++] Trial {0}: {1} & {2}'.format(n_trials, res_i, res_ii))
if abs(res_i - res_ii) > 3:
logging.info('[++] Picked residues {0} & {1}'.format(res_i, res_ii))
return res_i, res_ii
n_trials += 1
if n_trials == max_trials:
msg = '[!] Could not pick two unique distant residues in body after {0} tries'
logging.info(msg.format(max_trials))
return res_i, res_ii
restraints = []
n_bodies = range(len(bodies))
combinations = itertools.combinations(n_bodies, 2)
for pair_bodies in combinations:
body_i, body_j = pair_bodies
logging.debug('[+++] Restraining body {0} to body {1}'.format(body_i, body_j))
st_body_i, en_body_i = bodies[body_i]
st_body_j, en_body_j = bodies[body_j]
res_i, res_ii = pick_residues(range(st_body_i, en_body_i+1))
res_j, res_jj = pick_residues(range(st_body_j, en_body_j+1))
logging.info('[++] Created restraint: {0}:{1} <--> {2}:{3}'.format(body_i, res_i, body_j, res_j))
restraints.append((res_i, res_j))
logging.info('[++] Created restraint: {0}:{1} <--> {2}:{3}'.format(body_i, res_ii, body_j, res_jj))
restraints.append((res_ii, res_jj))
return restraints
def generate_tbl(atom_lst, restraints):
"""
Makes a list of TBL-formatted restraints.
"""
for r in restraints:
i, j = r
atom_i, atom_j = atom_lst[i], atom_lst[j]
dist_ij = calc_euclidean(atom_i[3], atom_j[3])
tbl = "assign (segid {0[0]} and resi {0[1]} and name {0[2]}) ".format(atom_i)
tbl += "(segid {0[0]} and resi {0[1]} and name {0[2]}) ".format(atom_j)
tbl += "{0:3.3f} 0.0 0.0".format(dist_ij)
print(tbl)
def generate_pml(atom_lst, restraints):
"""
Makes a list of restraints in Pymol format
"""
for n, r in enumerate(restraints, start=1):
i, j = r
atom_i, atom_j = atom_lst[i], atom_lst[j]
pml = "distance restraint_{0}, ".format(n)
pml += "(chain {0[0]} and resi {0[1]} and name {0[2]}), ".format(atom_i)
pml += "(chain {0[0]} and resi {0[1]} and name {0[2]}) ".format(atom_j)
print(pml)
if __name__ == '__main__':
from argparse import ArgumentParser
ap = ArgumentParser(description=__doc__)
ap.add_argument('structures', nargs='+', help='PDB structures to restraint')
ap.add_argument('--exclude', '-e', nargs='+', help='Chains to exclude from the calculation')
ap.add_argument('--verbose', '-v', default=0, action='count')
args = ap.parse_args()
# Set Logger
if args.verbose == 1:
level = logging.INFO
elif args.verbose > 1:
level = logging.DEBUG
else:
level = logging.WARNING
logging.basicConfig(level=level, format='%(message)s')
logger = logging.getLogger(__name__)
# Main logic
atom_lst = []
for s in args.structures:
atom_lst += read_structure(s, exclude=args.exclude)
bodies = get_bodies(atom_lst)
restraints = build_restraints(bodies)
generate_tbl(atom_lst, restraints)
| |
"""Implements a Deep Boltzmann Machine."""
from neuralnet import *
class DBM(NeuralNet):
def __init__(self, *args, **kwargs):
super(DBM, self).__init__(*args, **kwargs)
self.initializer_net = None
self.cd = self.t_op.optimizer == deepnet_pb2.Operation.CD
@staticmethod
def AreInputs(l):
return reduce(lambda a, x: x.is_input and a, l, True)
def SetPhase(self, layer, pos=True):
"""Setup required before starting a phase.
This method makes 'state' and 'sample' point to the right variable depending
on the phase.
"""
if pos:
layer.state = layer.pos_state
layer.sample = layer.pos_sample
else:
layer.state = layer.neg_state
layer.sample = layer.neg_sample
def DumpModelState(self, step):
state_dict = dict([(node.name, node.state.asarray().T) for node in self.node_list])
filename = '/ais/gobi3/u/nitish/flickr/states/%s_%d' % (self.net.name, step)
print 'Dumping state at step %d to %s' % (step, filename)
np.savez(filename, **state_dict)
def Sort(self):
"""Sort layers into useful orders.
After this method is done:
pos_phase_order: Order in which nodes have to be updated in the positive
phase.
neg_phase_order: Order in which nodes have to be updated in the negative
phase.
node_list: List of all nodes. All input nodes occur before non input ones.
"""
non_input_nodes = []
node_list = list(self.input_datalayer)
for node in self.layer:
if not node.is_input:
non_input_nodes.append(node)
node_list.extend(non_input_nodes)
if self.net.positive_phase_order:
self.pos_phase_order = [self.GetLayerByName(x) for x in self.net.positive_phase_order]
self.pos_phase_order.extend([self.GetLayerByName(x) for x in self.unclamped_layer])
else:
self.pos_phase_order = non_input_nodes
if self.net.negative_phase_order:
self.neg_phase_order = [self.GetLayerByName(x) for x in self.net.negative_phase_order]
else:
self.neg_phase_order = node_list
return node_list
def ComputeUnnormalizedLogProb(self):
pass
def ComputeUp(self, layer, train=False, compute_input=False, step=0,
maxsteps=0, use_samples=False, neg_phase=False):
"""
Computes the state of a layer, given the state of its incoming neighbours.
Args:
train: True if this computation is happening during training, False during
evaluation.
compute_input: If True, the state of the input layer will be computed.
Otherwise, it will be loaded as data.
step: Training step.
maxsteps: Maximum number of steps that will be taken (Some hyperparameters
may depend on this.)
use_samples: Use neighbours' samples to update the layer's state.
"""
if layer.is_input and not compute_input:
layer.GetData()
else:
for i, edge in enumerate(layer.incoming_edge):
neighbour = layer.incoming_neighbour[i]
if use_samples:
inputs = neighbour.sample
else:
inputs = neighbour.state
if edge.node2 == layer:
w = edge.params['weight'].T
factor = edge.proto.up_factor
else:
w = edge.params['weight']
factor = edge.proto.down_factor
if i == 0:
cm.dot(w, inputs, target=layer.state)
if factor != 1:
layer.state.mult(factor)
else:
layer.state.add_dot(w, inputs, mult=factor)
b = layer.params['bias']
if layer.replicated_neighbour is None:
layer.state.add_col_vec(b)
else:
layer.state.add_dot(b, layer.replicated_neighbour.NN)
layer.ApplyActivation()
if layer.hyperparams.dropout:
if train and maxsteps - step >= layer.hyperparams.stop_dropout_for_last:
# Randomly set states to zero.
if not neg_phase:
layer.mask.fill_with_rand()
layer.mask.greater_than(layer.hyperparams.dropout_prob)
layer.state.mult(layer.mask)
else:
# Produce expected output.
layer.state.mult(1.0 - layer.hyperparams.dropout_prob)
def PositivePhase(self, train=False, evaluate=False, step=0):
"""Perform the positive phase.
This method computes the sufficient statistics under the data distribution.
"""
# Do a forward pass in the initializer net, if set.
if self.initializer_net:
self.initializer_net.ForwardPropagate(train=train, step=step)
# Initialize layers.
for node in self.node_list:
if node.is_input:
# Load data into input nodes.
self.ComputeUp(node, train=train)
elif node.is_initialized:
node.state.assign(node.initialization_source.state)
else:
# Initialize other nodes to zero.
node.ResetState(rand=False)
# Starting MF.
for i in range(self.net.hyperparams.mf_steps):
for node in self.pos_phase_order:
self.ComputeUp(node, train=train, step=step, maxsteps=self.train_stop_steps)
# End of MF.
losses = []
if train:
for node in self.layer:
r = node.CollectSufficientStatistics()
if r is not None: # This is true only if sparsity is active.
perf = deepnet_pb2.Metrics()
perf.MergeFrom(node.proto.performance_stats)
perf.count = 1
perf.sparsity = r
losses.append(perf)
for edge in self.edge:
edge.CollectSufficientStatistics()
# Evaluation
# If CD, then this step would be performed by the negative phase anyways,
# So the loss is measured in the negative phase instead. Return []
# Otherwise, reconstruct the input given the other layers and report
# the loss.
if not self.cd or evaluate:
for node in self.input_datalayer:
self.ComputeUp(node, compute_input=True, step=step, maxsteps=self.train_stop_steps)
losses.append(node.GetLoss())
return losses
def InitializeNegPhase(self, to_pos=False):
"""Initialize negative particles.
Copies the pos state and samples it to initialize the ngative particles.
"""
for layer in self.layer:
self.SetPhase(layer, pos=False)
if to_pos:
layer.state.assign(layer.pos_state)
else:
layer.ResetState(rand=True)
layer.Sample()
self.SetPhase(layer, pos=True)
def NegativePhase(self, step=0, train=True, gibbs_steps=-1):
"""Perform the negative phase.
This method computes the sufficient statistics under the model distribution.
Args:
step: Training step
train: If true, then this computation is happening during training.
gibbs_steps: Number of gibbs steps to take. If -1, use default.
"""
losses = []
if self.cd:
for node in self.node_list:
if not node.is_input:
node.Sample()
else:
for node in self.layer:
self.SetPhase(node, pos=False)
if gibbs_steps < 0:
h = self.net.hyperparams
start_after = h.start_step_up_cd_after
if start_after > 0 and start_after < step:
gibbs_steps = h.gibbs_steps + 1 + (step - start_after) / h.step_up_cd_after
else:
gibbs_steps = h.gibbs_steps
for i in range(gibbs_steps):
for node in self.neg_phase_order:
self.ComputeUp(node, train=train, step=step,
maxsteps=self.train_stop_steps, use_samples=True,
compute_input=True, neg_phase=True)
if i == 0 and node.is_input and self.cd:
losses.append(node.GetLoss())
if node.is_input:
if node.sample_input and node.hyperparams.sample_input_after <= step:
node.Sample()
else:
# Not sampling inputs usually makes learning faster.
node.sample.assign(node.state)
else:
node.Sample()
# End of Gibbs Sampling.
if train:
for node in self.layer:
node.CollectSufficientStatistics(neg=True)
self.UpdateLayerParams(node, step=step)
for edge in self.edge:
edge.CollectSufficientStatistics(neg=True)
self.UpdateEdgeParams(edge, step=step)
if not self.cd:
for node in self.layer:
self.SetPhase(node, pos=True)
return losses
def UpdateLayerParams(self, layer, step=0):
"""Update parameters associated with this layer."""
layer.gradient.add_mult(layer.suff_stats, -1.0 / layer.batchsize)
if layer.tied_to:
layer.tied_to.gradient.add(layer.gradient)
layer.gradient.assign(0)
layer = layer.tied_to
layer.num_grads_received += 1
if layer.num_grads_received == layer.num_shares:
layer.Update('bias', step, no_reg=True) # By default, do not regularize bias.
def UpdateEdgeParams(self, edge, step):
""" Update the parameters associated with this edge."""
numcases = edge.node1.batchsize
edge.gradient.add_mult(edge.suff_stats, -1.0/numcases)
if edge.tied_to:
edge.tied_to.gradient.add(edge.gradient)
edge.gradient.assign(0)
edge = edge.tied_to
edge.num_grads_received += 1
if edge.num_grads_received == edge.num_shares:
edge.Update('weight', step)
def GetBatch(self, handler=None):
super(DBM, self).GetBatch(handler=handler)
if self.initializer_net:
self.initializer_net.GetBatch()
def TrainOneBatch(self, step):
losses1 = self.PositivePhase(train=True, step=step)
if step == 0 and self.t_op.optimizer == deepnet_pb2.Operation.PCD:
self.InitializeNegPhase(to_pos=True)
losses2 = self.NegativePhase(step, train=True)
losses1.extend(losses2)
return losses1
def EvaluateOneBatch(self):
losses = self.PositivePhase(train=False, evaluate=True)
return losses
def SetUpData(self, *args, **kwargs):
super(DBM, self).SetUpData(*args, **kwargs)
# Set up data for initializer net.
if self.initializer_net:
for node in self.initializer_net.layer:
try:
matching_dbm_node = next(l for l in self.layer \
if l.name == node.name)
except StopIteration:
matching_dbm_node = None
if matching_dbm_node:
if node.is_input or node.is_output:
self.initializer_net.tied_datalayer.append(node)
node.tied_to = matching_dbm_node
elif matching_dbm_node.is_initialized:
matching_dbm_node.initialization_source = node
def LoadModelOnGPU(self, batchsize=-1):
super(DBM, self).LoadModelOnGPU(batchsize=batchsize)
if self.net.initializer_net:
self.initializer_net = NeuralNet(self.net.initializer_net, self.t_op,
self.e_op)
self.initializer_net.LoadModelOnGPU(batchsize=batchsize)
def Reconstruct(self, layername, numbatches, inputlayername=[],
validation=True):
"""Reconstruct from the model.
Args:
layername: Name of the layer which is to be reconstructed.
numbatches: Number of batches to reconstruct.
inputlayername: List of input layers whose states will be returned.
validation: If True, reconstruct the validation set,
else reconstruct test set.
Returns:
The reconstruction for layer 'layername' and inputs in layers
'inputlayername'
"""
step = 0
self.recon = []
self.inputs = []
self.recon_pos = 0
inputlayer = []
layer_to_tap = self.GetLayerByName(layername, down=True)
self.recon = np.zeros((numbatches * self.e_op.batchsize,
layer_to_tap.state.shape[0]))
for i, lname in enumerate(inputlayername):
l = self.GetLayerByName(lname)
inputlayer.append(l)
self.inputs.append(np.zeros((numbatches * self.e_op.batchsize,
l.state.shape[0])))
if validation:
datagetter = self.GetValidationBatch
else:
datagetter = self.GetTestBatch
for batch in range(numbatches):
datagetter()
self.ReconstructOneBatch(layer_to_tap, inputlayer)
return self.recon, self.inputs
def GetAllRepresentations(self, numbatches, validation=True):
"""Get representations at all layers.
Returns:
A dictionary with the name of the layer as the key and its state as as the
value.
"""
if validation:
datagetter = self.GetValidationBatch
else:
datagetter = self.GetTestBatch
rep_list = []
names = []
for node in self.node_list:
rep_list.append(np.zeros((numbatches * node.state.shape[1],
node.state.shape[0]), dtype='float32'))
names.append(node.name)
for batch in range(numbatches):
datagetter()
self.PositivePhase(train=False, evaluate=False)
for i, node in enumerate(self.node_list):
rep_list[i][batch*node.batchsize:(batch+1)*node.batchsize,:] =\
node.state.asarray().T
return dict(zip(names, rep_list))
def WriteRepresentationToDisk(self, layernames, output_dir, memory='1G',
dataset='test', input_recon=False):
layers = [self.GetLayerByName(lname) for lname in layernames]
numdim_list = [layer.state.shape[0] for layer in layers]
if dataset == 'train':
datagetter = self.GetTrainBatch
if self.train_data_handler is None:
return
numbatches = self.train_data_handler.num_batches
size = numbatches * self.train_data_handler.batchsize
elif dataset == 'validation':
datagetter = self.GetValidationBatch
if self.validation_data_handler is None:
return
numbatches = self.validation_data_handler.num_batches
size = numbatches * self.validation_data_handler.batchsize
elif dataset == 'test':
datagetter = self.GetTestBatch
if self.test_data_handler is None:
return
numbatches = self.test_data_handler.num_batches
size = numbatches * self.test_data_handler.batchsize
datawriter = DataWriter(layernames, output_dir, memory, numdim_list, size)
for batch in range(numbatches):
datagetter()
sys.stdout.write('\r%d' % (batch+1))
sys.stdout.flush()
self.PositivePhase(train=False, evaluate=input_recon)
reprs = [l.state.asarray().T for l in layers]
datawriter.Submit(reprs)
sys.stdout.write('\n')
size = datawriter.Commit()
return size
def GetRepresentation(self, layername, numbatches, inputlayername=[],
validation=True):
"""Get the representation at layer 'layername'."""
step = 0
self.rep_pos = 0
inputlayer = []
self.inputs = []
layer_to_tap = self.GetLayerByName(layername)
self.rep = np.zeros((numbatches * self.e_op.batchsize, layer_to_tap.state.shape[0]))
for i, lname in enumerate(inputlayername):
l = self.GetLayerByName(lname)
inputlayer.append(l)
self.inputs.append(np.zeros((numbatches * self.e_op.batchsize,
l.state.shape[0])))
if validation:
datagetter = self.GetValidationBatch
else:
datagetter = self.GetTestBatch
for batch in range(numbatches):
datagetter()
self.GetRepresentationOneBatch(layer_to_tap, inputlayer)
return self.rep, self.inputs
def GetLayerByName(self, layername, down=False):
try:
l = next(l for l in self.layer if l.name == layername)
except StopIteration:
l = None
return l
def Inference(self, steps, layernames, unclamped_layers, output_dir, memory='1G', dataset='test', method='gibbs'):
layers_to_infer = [self.GetLayerByName(l) for l in layernames]
layers_to_unclamp = [self.GetLayerByName(l) for l in unclamped_layers]
numdim_list = [layer.state.shape[0] for layer in layers_to_infer]
for l in layers_to_unclamp:
l.is_input = False
self.pos_phase_order.append(l)
if dataset == 'train':
datagetter = self.GetTrainBatch
if self.train_data_handler is None:
return
numbatches = self.train_data_handler.num_batches
size = numbatches * self.train_data_handler.batchsize
elif dataset == 'validation':
datagetter = self.GetValidationBatch
if self.validation_data_handler is None:
return
numbatches = self.validation_data_handler.num_batches
size = numbatches * self.validation_data_handler.batchsize
elif dataset == 'test':
datagetter = self.GetTestBatch
if self.test_data_handler is None:
return
numbatches = self.test_data_handler.num_batches
size = numbatches * self.test_data_handler.batchsize
dw = DataWriter(layernames, output_dir, memory, numdim_list, size)
gibbs = method == 'gibbs'
mf = method == 'mf'
for batch in range(numbatches):
sys.stdout.write('\r%d' % (batch+1))
sys.stdout.flush()
datagetter()
for node in self.node_list:
if node.is_input or node.is_initialized:
node.GetData()
else:
node.ResetState(rand=False)
if gibbs:
node.sample.assign(node.state)
for i in range(steps):
for node in self.pos_phase_order:
self.ComputeUp(node, use_samples=gibbs)
if gibbs:
node.Sample()
output = [l.state.asarray().T for l in layers_to_infer]
dw.Submit(output)
sys.stdout.write('\n')
size = dw.Commit()
return size[0]
def ReconstructOneBatch(self, layer, inputlayers):
self.PositivePhase(train=False, evaluate=True)
self.recon[self.recon_pos:self.recon_pos + self.e_op.batchsize,:] =\
layer.state.asarray().T
for i, l in enumerate(inputlayers):
self.inputs[i][self.recon_pos:self.recon_pos + self.e_op.batchsize,:] =\
l.data.asarray().T
self.recon_pos += self.e_op.batchsize
def GetRepresentationOneBatch(self, layer, inputlayers):
self.PositivePhase(train=False, evaluate=False)
if layer.proto.is_input:
self.rep[self.rep_pos:self.rep_pos + self.e_op.batchsize,:] =\
layer.data.asarray().T
else:
self.rep[self.rep_pos:self.rep_pos + self.e_op.batchsize,:] =\
layer.state.asarray().T
for i, l in enumerate(inputlayers):
self.inputs[i][self.rep_pos:self.rep_pos + self.e_op.batchsize,:] =\
l.data.asarray().T
self.rep_pos += self.e_op.batchsize
def UnclampLayer(self, layername):
"""Unclamps the layer 'layername'.
Most useful when called just after calling the constructor.
"""
for l in self.net.layer:
if l.name == layername:
print 'Unclamping %s' % layername
l.is_input = False
self.unclamped_layer.append(l.name)
| |
import numpy as np
import cPickle
from collections import defaultdict
import sys, re
import pandas as pd
from multiprocessing import Process
import random
# transcript: "after_other_data/new_test_after_pairs1","after_other_data/new_test_other_pairs1"
# epoch: 17, training time: 465.99 secs, train perf: 99.97 %, val perf: 83.24 %
#
def build_data_cv(train_data_folder, test_data_folder, candidate_pool_file, cv, clean_string=True):
"""
Loads data and split into 10 folds.
"""
revs = []
pos_file = train_data_folder[0]
neg_file = train_data_folder[1]
oth_file = train_data_folder[2]
vocab = defaultdict(float)
pos_count = 0
with open(pos_file, "rb") as f:
for line in f:
#words = line.split()
#if int (words[0]) != cv:
# continue
pos_count += 1
text = line
count = 0
#for word in words:
# if word == '|':
# count += 1
# continue
# if count == 2:
# text += word + ' '
rev = []
rev.append(text.strip())
if clean_string:
orig_rev = clean_str(" ".join(rev))
else:
orig_rev = " ".join(rev).lower()
words = set(orig_rev.split())
for word in words:
vocab[word] += 1
datum = {"y":0,
"text": orig_rev,
"num_words": len(orig_rev.split()),
"split": 1}
#"split": np.random.randint(0,cv)}
revs.append(datum)
neg_count = 0
with open(neg_file, "rb") as f:
for line in f:
#words = line.split()
#if int (words[0]) != cv:
# continue
neg_count += 1
text = line
count = 0
#for word in words:
# if word == '|':
# count += 1
# continue
# if count == 2:
# text += word + ' '
rev = []
rev.append(text.strip())
if clean_string:
orig_rev = clean_str(" ".join(rev))
else:
orig_rev = " ".join(rev).lower()
words = set(orig_rev.split())
for word in words:
vocab[word] += 1
datum = {"y":1,
"text": orig_rev,
"num_words": len(orig_rev.split()),
"split": 1}
#"split": np.random.randint(0,cv)}
revs.append(datum)
print "after_count: ", pos_count
print "before_count: ", neg_count
with open(oth_file, "rb") as f:
oth_len = sum(1 for line in f)
# make other training instances two times as before and after sum
#ratio = int(oth_len / (2 *(pos_count + neg_count)) )
#print "ratio: ", ratio
random.seed(int(cv))
f.seek(0)
for line in f:
#if random.randint(0, ratio) != 0:
# continue
rev = []
rev.append(line.strip())
if clean_string:
orig_rev = clean_str(" ".join(rev))
else:
orig_rev = " ".join(rev).lower()
words = set(orig_rev.split())
for word in words:
vocab[word] += 1
datum = {"y":2,
"text": orig_rev,
"num_words": len(orig_rev.split()),
"split": 1}
#"split": np.random.randint(0,cv)}
revs.append(datum)
# test part
pos_file = test_data_folder[0]
neg_file = test_data_folder[1]
oth_file = test_data_folder[2]
with open(pos_file, "rb") as f:
for line in f:
rev = []
rev.append(line.strip())
if clean_string:
orig_rev = clean_str(" ".join(rev))
else:
orig_rev = " ".join(rev).lower()
words = set(orig_rev.split())
for word in words:
vocab[word] += 1
datum = {"y":0,
"text": orig_rev,
"num_words": len(orig_rev.split()),
"split": 0}
#"split": np.random.randint(0,cv)}
revs.append(datum)
with open(neg_file, "rb") as f:
for line in f:
rev = []
rev.append(line.strip())
if clean_string:
orig_rev = clean_str(" ".join(rev))
else:
orig_rev = " ".join(rev).lower()
words = set(orig_rev.split())
for word in words:
vocab[word] += 1
datum = {"y":1,
"text": orig_rev,
"num_words": len(orig_rev.split()),
"split": 0}
#"split": np.random.randint(0,cv)}
revs.append(datum)
with open(oth_file, "rb") as f:
for line in f:
rev = []
rev.append(line.strip())
if clean_string:
orig_rev = clean_str(" ".join(rev))
else:
orig_rev = " ".join(rev).lower()
words = set(orig_rev.split())
for word in words:
vocab[word] += 1
datum = {"y":2,
"text": orig_rev,
"num_words": len(orig_rev.split()),
"split": 0}
#"split": np.random.randint(0,cv)}
revs.append(datum)
# candidate pool part
with open(candidate_pool_file, "rb") as f:
for line in f:
rev = []
rev.append(line.strip())
if clean_string:
orig_rev = clean_str(" ".join(rev))
else:
orig_rev = " ".join(rev).lower()
words = set(orig_rev.split())
for word in words:
vocab[word] += 1
datum = {"y":2,
"text": orig_rev,
"num_words": len(orig_rev.split()),
"split": 2}
#"split": np.random.randint(0,cv)}
revs.append(datum)
return revs, vocab
def get_W(word_vecs, k=300):
"""
Get word matrix. W[i] is the vector for word indexed by i
"""
vocab_size = len(word_vecs)
word_idx_map = dict()
W = np.zeros(shape=(vocab_size+1, k), dtype='float32')
W[0] = np.zeros(k, dtype='float32')
i = 1
for word in word_vecs:
#print word
#raw_input("continue?")
W[i] = word_vecs[word]
word_idx_map[word] = i
i += 1
return W, word_idx_map
def load_bin_vec(fname, vocab):
"""
Loads 300x1 word vecs from Google (Mikolov) word2vec
"""
word_vecs = {}
with open(fname, "rb") as f:
header = f.readline()
vocab_size, layer1_size = map(int, header.split())
binary_len = np.dtype('float32').itemsize * layer1_size
for line in xrange(vocab_size):
word = []
while True:
ch = f.read(1)
if ch == ' ':
word = ''.join(word)
break
if ch != '\n':
word.append(ch)
if word in vocab:
word_vecs[word] = np.fromstring(f.read(binary_len), dtype='float32')
else:
f.read(binary_len)
return word_vecs
def add_unknown_words(word_vecs, vocab, min_df=1, k=300):
"""
For words that occur in at least min_df documents, create a separate word vector.
0.25 is chosen so the unknown vectors have (approximately) same variance as pre-trained ones
"""
for word in vocab:
if word not in word_vecs and vocab[word] >= min_df:
word_vecs[word] = np.random.uniform(-0.25,0.25,k)
def clean_str(string, TREC=False):
"""
Tokenization/string cleaning for all datasets except for SST.
Every dataset is lower cased except for TREC
"""
string = re.sub(r"[^A-Za-z0-9(),!?\'\`]", " ", string)
string = re.sub(r"\'s", " \'s", string)
string = re.sub(r"\'ve", " \'ve", string)
string = re.sub(r"n\'t", " n\'t", string)
string = re.sub(r"\'re", " \'re", string)
string = re.sub(r"\'d", " \'d", string)
string = re.sub(r"\'ll", " \'ll", string)
string = re.sub(r",", " , ", string)
string = re.sub(r"!", " ! ", string)
string = re.sub(r"\(", " \( ", string)
string = re.sub(r"\)", " \) ", string)
string = re.sub(r"\?", " \? ", string)
string = re.sub(r"\s{2,}", " ", string)
return string.strip() if TREC else string.strip().lower()
def clean_str_sst(string):
"""
Tokenization/string cleaning for the SST dataset
"""
string = re.sub(r"[^A-Za-z0-9(),!?\'\`]", " ", string)
string = re.sub(r"\s{2,}", " ", string)
return string.strip().lower()
def create_database(iteration_i, context_flag, context_flag2, argv1, cluster_label):
#w2v_file = sys.argv[1]
w2v_file = argv1
#type = "without"
folder = context_flag + "_event_words/" + iteration_i + "/"
train_data_folder = [folder + "train_after_pairs", folder + "train_before_pairs", folder + "train_other_pairs"]
#test_data_folder = [folder + "test_after_pairs", folder + "test_before_pairs", folder + "test_other_pairs"]
test_data_folder = []
test_data_folder = ["../TempEval3/new_preprocess/TempEval_after_" + context_flag2, "../TempEval3/new_preprocess/TempEval_before_" + context_flag2, "../TempEval3/new_preprocess/TempEval_other_" + context_flag2]
#test_data_folder = ["../TempEval3/TempEval_after_with_event", "../TempEval3/TempEval_before_with_event", "../TempEval3/TempEval_other_with_event"]
candidate_pool_file = folder + "test_other_pairs"
print "loading training and validating data...",
revs, vocab = build_data_cv(train_data_folder, test_data_folder, candidate_pool_file, cluster_label, clean_string=True)
"""
In [37]: d = {'one' : [1., 2., 3., 4.], 'two' : [4., 3., 2., 1.]}
In [38]: pd.DataFrame(d)
Out[38]:
one two
0 1 4
1 2 3
2 3 2
3 4 1
In [39]: pd.DataFrame(d, index=['a', 'b', 'c', 'd'])
Out[39]:
one two
a 1 4
b 2 3
c 3 2
d 4 1
"""
print "*************** cluster_label ", cluster_label, " ******************\n"
max_l = np.max(pd.DataFrame(revs)["num_words"]) # np.max: maximum of the flattened array
print "data loaded!"
#print "number of training sentences: " + str(train_val_boundary)
print "number of sentences: " + str(len(revs))
print "vocab size: " + str(len(vocab))
print "max sentence length: " + str(max_l)
print "loading word2vec vectors...",
w2v = load_bin_vec(w2v_file, vocab)
print "word2vec loaded!"
print "num words already in word2vec: " + str(len(w2v))
add_unknown_words(w2v, vocab)
W, word_idx_map = get_W(w2v)
rand_vecs = {}
add_unknown_words(rand_vecs, vocab)
W2, _ = get_W(rand_vecs)
"""
# Save a dictionary into a pickle file.
import pickle
favorite_color = { "lion": "yellow", "kitty": "red" }
pickle.dump( favorite_color, open( "save.p", "wb" ) )
# Load the dictionary back from the pickle file.
import pickle
favorite_color = pickle.load( open( "save.p", "rb" ) )
# favorite_color is now { "lion": "yellow", "kitty": "red" }
"""
cPickle.dump([revs, W, W2, word_idx_map, vocab], open("mr_folder_" + context_flag + "_event_words/" + iteration_i + "/mr" + str(cluster_label) + ".p", "wb"))
print "dataset created!"
def classifier_process_data_main(iteration_i, context_flag, context_flag2, w2v_file):
create_database(iteration_i, context_flag, context_flag2, w2v_file, 0)
| |
"""plistlib.py -- a tool to generate and parse MacOSX .plist files.
The PropertList (.plist) file format is a simple XML pickle supporting
basic object types, like dictionaries, lists, numbers and strings.
Usually the top level object is a dictionary.
To write out a plist file, use the writePlist(rootObject, pathOrFile)
function. 'rootObject' is the top level object, 'pathOrFile' is a
filename or a (writable) file object.
To parse a plist from a file, use the readPlist(pathOrFile) function,
with a file name or a (readable) file object as the only argument. It
returns the top level object (again, usually a dictionary).
To work with plist data in strings, you can use readPlistFromString()
and writePlistToString().
Values can be strings, integers, floats, booleans, tuples, lists,
dictionaries, Data or datetime.datetime objects. String values (including
dictionary keys) may be unicode strings -- they will be written out as
UTF-8.
The <data> plist type is supported through the Data class. This is a
thin wrapper around a Python string.
Generate Plist example:
pl = dict(
aString="Doodah",
aList=["A", "B", 12, 32.1, [1, 2, 3]],
aFloat = 0.1,
anInt = 728,
aDict=dict(
anotherString="<hello & hi there!>",
aUnicodeValue=u'M\xe4ssig, Ma\xdf',
aTrueValue=True,
aFalseValue=False,
),
someData = Data("<binary gunk>"),
someMoreData = Data("<lots of binary gunk>" * 10),
aDate = datetime.fromtimestamp(time.mktime(time.gmtime())),
)
# unicode keys are possible, but a little awkward to use:
pl[u'\xc5benraa'] = "That was a unicode key."
writePlist(pl, fileName)
Parse Plist example:
pl = readPlist(pathOrFile)
print pl["aKey"]
"""
__all__ = [
"readPlist", "writePlist", "readPlistFromString", "writePlistToString",
"readPlistFromResource", "writePlistToResource",
"Plist", "Data", "Dict"
]
# Note: the Plist and Dict classes have been deprecated.
import binascii
from cStringIO import StringIO
import re
try:
from datetime import datetime
except ImportError:
# We're running on Python < 2.3, we don't support dates here,
# yet we provide a stub class so type dispatching works.
class datetime(object):
def __init__(self, *args, **kwargs):
raise ValueError("datetime is not supported")
def readPlist(pathOrFile):
"""Read a .plist file. 'pathOrFile' may either be a file name or a
(readable) file object. Return the unpacked root object (which
usually is a dictionary).
"""
didOpen = 0
if isinstance(pathOrFile, (str, unicode)):
pathOrFile = open(pathOrFile)
didOpen = 1
p = PlistParser()
rootObject = p.parse(pathOrFile)
if didOpen:
pathOrFile.close()
return rootObject
def writePlist(rootObject, pathOrFile):
"""Write 'rootObject' to a .plist file. 'pathOrFile' may either be a
file name or a (writable) file object.
"""
didOpen = 0
if isinstance(pathOrFile, (str, unicode)):
pathOrFile = open(pathOrFile, "w")
didOpen = 1
writer = PlistWriter(pathOrFile)
writer.writeln("<plist version=\"1.0\">")
writer.writeValue(rootObject)
writer.writeln("</plist>")
if didOpen:
pathOrFile.close()
def readPlistFromString(data):
"""Read a plist data from a string. Return the root object.
"""
return readPlist(StringIO(data))
def writePlistToString(rootObject):
"""Return 'rootObject' as a plist-formatted string.
"""
f = StringIO()
writePlist(rootObject, f)
return f.getvalue()
def readPlistFromResource(path, restype='plst', resid=0):
"""Read plst resource from the resource fork of path.
"""
from Carbon.File import FSRef, FSGetResourceForkName
from Carbon.Files import fsRdPerm
from Carbon import Res
fsRef = FSRef(path)
resNum = Res.FSOpenResourceFile(fsRef, FSGetResourceForkName(), fsRdPerm)
Res.UseResFile(resNum)
plistData = Res.Get1Resource(restype, resid).data
Res.CloseResFile(resNum)
return readPlistFromString(plistData)
def writePlistToResource(rootObject, path, restype='plst', resid=0):
"""Write 'rootObject' as a plst resource to the resource fork of path.
"""
from Carbon.File import FSRef, FSGetResourceForkName
from Carbon.Files import fsRdWrPerm
from Carbon import Res
plistData = writePlistToString(rootObject)
fsRef = FSRef(path)
resNum = Res.FSOpenResourceFile(fsRef, FSGetResourceForkName(), fsRdWrPerm)
Res.UseResFile(resNum)
try:
Res.Get1Resource(restype, resid).RemoveResource()
except Res.Error:
pass
res = Res.Resource(plistData)
res.AddResource(restype, resid, '')
res.WriteResource()
Res.CloseResFile(resNum)
class DumbXMLWriter:
def __init__(self, file, indentLevel=0, indent="\t"):
self.file = file
self.stack = []
self.indentLevel = indentLevel
self.indent = indent
def beginElement(self, element):
self.stack.append(element)
self.writeln("<%s>" % element)
self.indentLevel += 1
def endElement(self, element):
assert self.indentLevel > 0
assert self.stack.pop() == element
self.indentLevel -= 1
self.writeln("</%s>" % element)
def simpleElement(self, element, value=None):
if value is not None:
value = _escapeAndEncode(value)
self.writeln("<%s>%s</%s>" % (element, value, element))
else:
self.writeln("<%s/>" % element)
def writeln(self, line):
if line:
self.file.write(self.indentLevel * self.indent + line + "\n")
else:
self.file.write("\n")
# Contents should conform to a subset of ISO 8601
# (in particular, YYYY '-' MM '-' DD 'T' HH ':' MM ':' SS 'Z'. Smaller units may be omitted with
# a loss of precision)
_dateParser = re.compile(r"(?P<year>\d\d\d\d)(?:-(?P<month>\d\d)(?:-(?P<day>\d\d)(?:T(?P<hour>\d\d)(?::(?P<minute>\d\d)(?::(?P<second>\d\d))?)?)?)?)?Z")
def _dateFromString(s):
order = ('year', 'month', 'day', 'hour', 'minute', 'second')
gd = _dateParser.match(s).groupdict()
lst = []
for key in order:
val = gd[key]
if val is None:
break
lst.append(int(val))
return datetime(*lst)
def _dateToString(d):
return '%04d-%02d-%02dT%02d:%02d:%02dZ' % (
d.year, d.month, d.day,
d.hour, d.minute, d.second
)
# Regex to find any control chars, except for \t \n and \r
_controlCharPat = re.compile(
r"[\x00\x01\x02\x03\x04\x05\x06\x07\x08\x0b\x0c\x0e\x0f"
r"\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f]")
def _escapeAndEncode(text):
m = _controlCharPat.search(text)
if m is not None:
raise ValueError("strings can't contains control characters; "
"use plistlib.Data instead")
text = text.replace("\r\n", "\n") # convert DOS line endings
text = text.replace("\r", "\n") # convert Mac line endings
text = text.replace("&", "&") # escape '&'
text = text.replace("<", "<") # escape '<'
text = text.replace(">", ">") # escape '>'
return text.encode("utf-8") # encode as UTF-8
PLISTHEADER = """\
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple Computer//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
"""
class PlistWriter(DumbXMLWriter):
def __init__(self, file, indentLevel=0, indent="\t", writeHeader=1):
if writeHeader:
file.write(PLISTHEADER)
DumbXMLWriter.__init__(self, file, indentLevel, indent)
def writeValue(self, value):
if isinstance(value, (str, unicode)):
self.simpleElement("string", value)
elif isinstance(value, bool):
# must switch for bool before int, as bool is a
# subclass of int...
if value:
self.simpleElement("true")
else:
self.simpleElement("false")
elif isinstance(value, (int, long)):
self.simpleElement("integer", "%d" % value)
elif isinstance(value, float):
self.simpleElement("real", repr(value))
elif isinstance(value, dict):
self.writeDict(value)
elif isinstance(value, Data):
self.writeData(value)
elif isinstance(value, datetime):
self.simpleElement("date", _dateToString(value))
elif isinstance(value, (tuple, list)):
self.writeArray(value)
else:
raise TypeError("unsuported type: %s" % type(value))
def writeData(self, data):
self.beginElement("data")
self.indentLevel -= 1
maxlinelength = 76 - len(self.indent.replace("\t", " " * 8) *
self.indentLevel)
for line in data.asBase64(maxlinelength).split("\n"):
if line:
self.writeln(line)
self.indentLevel += 1
self.endElement("data")
def writeDict(self, d):
self.beginElement("dict")
items = d.items()
items.sort()
for key, value in items:
if not isinstance(key, (str, unicode)):
raise TypeError("keys must be strings")
self.simpleElement("key", key)
self.writeValue(value)
self.endElement("dict")
def writeArray(self, array):
self.beginElement("array")
for value in array:
self.writeValue(value)
self.endElement("array")
class _InternalDict(dict):
# This class is needed while Dict is scheduled for deprecation:
# we only need to warn when a *user* instantiates Dict or when
# the "attribute notation for dict keys" is used.
def __getattr__(self, attr):
try:
value = self[attr]
except KeyError:
raise AttributeError, attr
from warnings import warn
warn("Attribute access from plist dicts is deprecated, use d[key] "
"notation instead", PendingDeprecationWarning)
return value
def __setattr__(self, attr, value):
from warnings import warn
warn("Attribute access from plist dicts is deprecated, use d[key] "
"notation instead", PendingDeprecationWarning)
self[attr] = value
def __delattr__(self, attr):
try:
del self[attr]
except KeyError:
raise AttributeError, attr
from warnings import warn
warn("Attribute access from plist dicts is deprecated, use d[key] "
"notation instead", PendingDeprecationWarning)
class Dict(_InternalDict):
def __init__(self, **kwargs):
from warnings import warn
warn("The plistlib.Dict class is deprecated, use builtin dict instead",
PendingDeprecationWarning)
super(Dict, self).__init__(**kwargs)
class Plist(_InternalDict):
"""This class has been deprecated. Use readPlist() and writePlist()
functions instead, together with regular dict objects.
"""
def __init__(self, **kwargs):
from warnings import warn
warn("The Plist class is deprecated, use the readPlist() and "
"writePlist() functions instead", PendingDeprecationWarning)
super(Plist, self).__init__(**kwargs)
def fromFile(cls, pathOrFile):
"""Deprecated. Use the readPlist() function instead."""
rootObject = readPlist(pathOrFile)
plist = cls()
plist.update(rootObject)
return plist
fromFile = classmethod(fromFile)
def write(self, pathOrFile):
"""Deprecated. Use the writePlist() function instead."""
writePlist(self, pathOrFile)
def _encodeBase64(s, maxlinelength=76):
# copied from base64.encodestring(), with added maxlinelength argument
maxbinsize = (maxlinelength//4)*3
pieces = []
for i in range(0, len(s), maxbinsize):
chunk = s[i : i + maxbinsize]
pieces.append(binascii.b2a_base64(chunk))
return "".join(pieces)
class Data:
"""Wrapper for binary data."""
def __init__(self, data):
self.data = data
def fromBase64(cls, data):
# base64.decodestring just calls binascii.a2b_base64;
# it seems overkill to use both base64 and binascii.
return cls(binascii.a2b_base64(data))
fromBase64 = classmethod(fromBase64)
def asBase64(self, maxlinelength=76):
return _encodeBase64(self.data, maxlinelength)
def __cmp__(self, other):
if isinstance(other, self.__class__):
return cmp(self.data, other.data)
elif isinstance(other, str):
return cmp(self.data, other)
else:
return cmp(id(self), id(other))
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, repr(self.data))
class PlistParser:
def __init__(self):
self.stack = []
self.currentKey = None
self.root = None
def parse(self, fileobj):
from xml.parsers.expat import ParserCreate
parser = ParserCreate()
parser.StartElementHandler = self.handleBeginElement
parser.EndElementHandler = self.handleEndElement
parser.CharacterDataHandler = self.handleData
parser.ParseFile(fileobj)
return self.root
def handleBeginElement(self, element, attrs):
self.data = []
handler = getattr(self, "begin_" + element, None)
if handler is not None:
handler(attrs)
def handleEndElement(self, element):
handler = getattr(self, "end_" + element, None)
if handler is not None:
handler()
def handleData(self, data):
self.data.append(data)
def addObject(self, value):
if self.currentKey is not None:
self.stack[-1][self.currentKey] = value
self.currentKey = None
elif not self.stack:
# this is the root object
self.root = value
else:
self.stack[-1].append(value)
def getData(self):
data = "".join(self.data)
try:
data = data.encode("ascii")
except UnicodeError:
pass
self.data = []
return data
# element handlers
def begin_dict(self, attrs):
d = _InternalDict()
self.addObject(d)
self.stack.append(d)
def end_dict(self):
self.stack.pop()
def end_key(self):
self.currentKey = self.getData()
def begin_array(self, attrs):
a = []
self.addObject(a)
self.stack.append(a)
def end_array(self):
self.stack.pop()
def end_true(self):
self.addObject(True)
def end_false(self):
self.addObject(False)
def end_integer(self):
self.addObject(int(self.getData()))
def end_real(self):
self.addObject(float(self.getData()))
def end_string(self):
self.addObject(self.getData())
def end_data(self):
self.addObject(Data.fromBase64(self.getData()))
def end_date(self):
self.addObject(_dateFromString(self.getData()))
# cruft to support booleans in Python <= 2.3
import sys
if sys.version_info[:2] < (2, 3):
# Python 2.2 and earlier: no booleans
# Python 2.2.x: booleans are ints
class bool(int):
"""Imitation of the Python 2.3 bool object."""
def __new__(cls, value):
return int.__new__(cls, not not value)
def __repr__(self):
if self:
return "True"
else:
return "False"
True = bool(1)
False = bool(0)
| |
"""Provide functionality to interact with Cast devices on the network."""
import asyncio
from datetime import timedelta
import functools as ft
import json
import logging
from typing import Optional
import pychromecast
from pychromecast.controllers.homeassistant import HomeAssistantController
from pychromecast.controllers.multizone import MultizoneManager
from pychromecast.quick_play import quick_play
from pychromecast.socket_client import (
CONNECTION_STATUS_CONNECTED,
CONNECTION_STATUS_DISCONNECTED,
)
import voluptuous as vol
from homeassistant.auth.models import RefreshToken
from homeassistant.components import media_source, zeroconf
from homeassistant.components.http.auth import async_sign_path
from homeassistant.components.media_player import MediaPlayerEntity
from homeassistant.components.media_player.const import (
ATTR_MEDIA_EXTRA,
MEDIA_TYPE_MOVIE,
MEDIA_TYPE_MUSIC,
MEDIA_TYPE_TVSHOW,
SUPPORT_BROWSE_MEDIA,
SUPPORT_NEXT_TRACK,
SUPPORT_PAUSE,
SUPPORT_PLAY,
SUPPORT_PLAY_MEDIA,
SUPPORT_PREVIOUS_TRACK,
SUPPORT_SEEK,
SUPPORT_STOP,
SUPPORT_TURN_OFF,
SUPPORT_TURN_ON,
SUPPORT_VOLUME_MUTE,
SUPPORT_VOLUME_SET,
)
from homeassistant.const import (
EVENT_HOMEASSISTANT_STOP,
STATE_IDLE,
STATE_OFF,
STATE_PAUSED,
STATE_PLAYING,
)
from homeassistant.core import callback
from homeassistant.exceptions import PlatformNotReady
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.network import NoURLAvailableError, get_url
from homeassistant.helpers.typing import ConfigType, HomeAssistantType
import homeassistant.util.dt as dt_util
from homeassistant.util.logging import async_create_catching_coro
from .const import (
ADDED_CAST_DEVICES_KEY,
CAST_MULTIZONE_MANAGER_KEY,
DOMAIN as CAST_DOMAIN,
KNOWN_CHROMECAST_INFO_KEY,
SIGNAL_CAST_DISCOVERED,
SIGNAL_HASS_CAST_SHOW_VIEW,
)
from .discovery import setup_internal_discovery
from .helpers import CastStatusListener, ChromecastInfo, ChromeCastZeroconf
_LOGGER = logging.getLogger(__name__)
CONF_IGNORE_CEC = "ignore_cec"
CONF_UUID = "uuid"
CAST_SPLASH = "https://www.home-assistant.io/images/cast/splash.png"
SUPPORT_CAST = (
SUPPORT_PAUSE
| SUPPORT_PLAY
| SUPPORT_PLAY_MEDIA
| SUPPORT_STOP
| SUPPORT_TURN_OFF
| SUPPORT_TURN_ON
| SUPPORT_VOLUME_MUTE
| SUPPORT_VOLUME_SET
)
ENTITY_SCHEMA = vol.All(
vol.Schema(
{
vol.Optional(CONF_UUID): cv.string,
vol.Optional(CONF_IGNORE_CEC): vol.All(cv.ensure_list, [cv.string]),
}
),
)
@callback
def _async_create_cast_device(hass: HomeAssistantType, info: ChromecastInfo):
"""Create a CastDevice Entity from the chromecast object.
Returns None if the cast device has already been added.
"""
_LOGGER.debug("_async_create_cast_device: %s", info)
if info.uuid is None:
_LOGGER.error("_async_create_cast_device uuid none: %s", info)
return None
# Found a cast with UUID
added_casts = hass.data[ADDED_CAST_DEVICES_KEY]
if info.uuid in added_casts:
# Already added this one, the entity will take care of moved hosts
# itself
return None
# -> New cast device
added_casts.add(info.uuid)
return CastDevice(info)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up Cast from a config entry."""
config = hass.data[CAST_DOMAIN].get("media_player") or {}
if not isinstance(config, list):
config = [config]
# no pending task
done, _ = await asyncio.wait(
[
_async_setup_platform(hass, ENTITY_SCHEMA(cfg), async_add_entities)
for cfg in config
]
)
if any(task.exception() for task in done):
exceptions = [task.exception() for task in done]
for exception in exceptions:
_LOGGER.debug("Failed to setup chromecast", exc_info=exception)
raise PlatformNotReady
async def _async_setup_platform(
hass: HomeAssistantType, config: ConfigType, async_add_entities
):
"""Set up the cast platform."""
# Import CEC IGNORE attributes
pychromecast.IGNORE_CEC += config.get(CONF_IGNORE_CEC, [])
hass.data.setdefault(ADDED_CAST_DEVICES_KEY, set())
hass.data.setdefault(KNOWN_CHROMECAST_INFO_KEY, {})
info = None
if CONF_UUID in config:
info = ChromecastInfo(uuid=config[CONF_UUID], services=None)
@callback
def async_cast_discovered(discover: ChromecastInfo) -> None:
"""Handle discovery of a new chromecast."""
# If info is set, we're handling a specific cast device identified by UUID
if info is not None and (info.uuid is not None and info.uuid != discover.uuid):
# UUID not matching, this is not it.
return
cast_device = _async_create_cast_device(hass, discover)
if cast_device is not None:
async_add_entities([cast_device])
async_dispatcher_connect(hass, SIGNAL_CAST_DISCOVERED, async_cast_discovered)
# Re-play the callback for all past chromecasts, store the objects in
# a list to avoid concurrent modification resulting in exception.
for chromecast in hass.data[KNOWN_CHROMECAST_INFO_KEY].values():
async_cast_discovered(chromecast)
ChromeCastZeroconf.set_zeroconf(await zeroconf.async_get_instance(hass))
hass.async_add_executor_job(setup_internal_discovery, hass)
class CastDevice(MediaPlayerEntity):
"""Representation of a Cast device on the network.
This class is the holder of the pychromecast.Chromecast object and its
socket client. It therefore handles all reconnects and audio group changing
"elected leader" itself.
"""
def __init__(self, cast_info: ChromecastInfo):
"""Initialize the cast device."""
self._cast_info = cast_info
self.services = cast_info.services
self._chromecast: Optional[pychromecast.Chromecast] = None
self.cast_status = None
self.media_status = None
self.media_status_received = None
self.mz_media_status = {}
self.mz_media_status_received = {}
self.mz_mgr = None
self._available = False
self._status_listener: Optional[CastStatusListener] = None
self._hass_cast_controller: Optional[HomeAssistantController] = None
self._add_remove_handler = None
self._cast_view_remove_handler = None
async def async_added_to_hass(self):
"""Create chromecast object when added to hass."""
self._add_remove_handler = async_dispatcher_connect(
self.hass, SIGNAL_CAST_DISCOVERED, self._async_cast_discovered
)
self.hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, self._async_stop)
self.hass.async_create_task(
async_create_catching_coro(self.async_set_cast_info(self._cast_info))
)
self._cast_view_remove_handler = async_dispatcher_connect(
self.hass, SIGNAL_HASS_CAST_SHOW_VIEW, self._handle_signal_show_view
)
async def async_will_remove_from_hass(self) -> None:
"""Disconnect Chromecast object when removed."""
await self._async_disconnect()
if self._cast_info.uuid is not None:
# Remove the entity from the added casts so that it can dynamically
# be re-added again.
self.hass.data[ADDED_CAST_DEVICES_KEY].remove(self._cast_info.uuid)
if self._add_remove_handler:
self._add_remove_handler()
self._add_remove_handler = None
if self._cast_view_remove_handler:
self._cast_view_remove_handler()
self._cast_view_remove_handler = None
async def async_set_cast_info(self, cast_info):
"""Set the cast information and set up the chromecast object."""
self._cast_info = cast_info
if self._chromecast is not None:
# Only setup the chromecast once, added elements to services
# will automatically be picked up.
return
_LOGGER.debug(
"[%s %s] Connecting to cast device by service %s",
self.entity_id,
self._cast_info.friendly_name,
self.services,
)
chromecast = await self.hass.async_add_executor_job(
pychromecast.get_chromecast_from_service,
(
self.services,
cast_info.uuid,
cast_info.model_name,
cast_info.friendly_name,
None,
None,
),
ChromeCastZeroconf.get_zeroconf(),
)
self._chromecast = chromecast
if CAST_MULTIZONE_MANAGER_KEY not in self.hass.data:
self.hass.data[CAST_MULTIZONE_MANAGER_KEY] = MultizoneManager()
self.mz_mgr = self.hass.data[CAST_MULTIZONE_MANAGER_KEY]
self._status_listener = CastStatusListener(self, chromecast, self.mz_mgr)
self._available = False
self.cast_status = chromecast.status
self.media_status = chromecast.media_controller.status
self._chromecast.start()
self.async_write_ha_state()
async def _async_disconnect(self):
"""Disconnect Chromecast object if it is set."""
if self._chromecast is None:
# Can't disconnect if not connected.
return
_LOGGER.debug(
"[%s %s] Disconnecting from chromecast socket",
self.entity_id,
self._cast_info.friendly_name,
)
self._available = False
self.async_write_ha_state()
await self.hass.async_add_executor_job(self._chromecast.disconnect)
self._invalidate()
self.async_write_ha_state()
def _invalidate(self):
"""Invalidate some attributes."""
self._chromecast = None
self.cast_status = None
self.media_status = None
self.media_status_received = None
self.mz_media_status = {}
self.mz_media_status_received = {}
self.mz_mgr = None
self._hass_cast_controller = None
if self._status_listener is not None:
self._status_listener.invalidate()
self._status_listener = None
# ========== Callbacks ==========
def new_cast_status(self, cast_status):
"""Handle updates of the cast status."""
self.cast_status = cast_status
self.schedule_update_ha_state()
def new_media_status(self, media_status):
"""Handle updates of the media status."""
if (
media_status
and media_status.player_is_idle
and media_status.idle_reason == "ERROR"
):
external_url = None
internal_url = None
tts_base_url = None
url_description = ""
if "tts" in self.hass.config.components:
try:
tts_base_url = self.hass.components.tts.get_base_url(self.hass)
except KeyError:
# base_url not configured, ignore
pass
try:
external_url = get_url(self.hass, allow_internal=False)
except NoURLAvailableError:
# external_url not configured, ignore
pass
try:
internal_url = get_url(self.hass, allow_external=False)
except NoURLAvailableError:
# internal_url not configured, ignore
pass
if media_status.content_id:
if tts_base_url and media_status.content_id.startswith(tts_base_url):
url_description = f" from tts.base_url ({tts_base_url})"
if external_url and media_status.content_id.startswith(external_url):
url_description = f" from external_url ({external_url})"
if internal_url and media_status.content_id.startswith(internal_url):
url_description = f" from internal_url ({internal_url})"
_LOGGER.error(
"Failed to cast media %s%s. Please make sure the URL is: "
"Reachable from the cast device and either a publicly resolvable "
"hostname or an IP address",
media_status.content_id,
url_description,
)
self.media_status = media_status
self.media_status_received = dt_util.utcnow()
self.schedule_update_ha_state()
def new_connection_status(self, connection_status):
"""Handle updates of connection status."""
_LOGGER.debug(
"[%s %s] Received cast device connection status: %s",
self.entity_id,
self._cast_info.friendly_name,
connection_status.status,
)
if connection_status.status == CONNECTION_STATUS_DISCONNECTED:
self._available = False
self._invalidate()
self.schedule_update_ha_state()
return
new_available = connection_status.status == CONNECTION_STATUS_CONNECTED
if new_available != self._available:
# Connection status callbacks happen often when disconnected.
# Only update state when availability changed to put less pressure
# on state machine.
_LOGGER.debug(
"[%s %s] Cast device availability changed: %s",
self.entity_id,
self._cast_info.friendly_name,
connection_status.status,
)
self._available = new_available
self.schedule_update_ha_state()
def multizone_new_media_status(self, group_uuid, media_status):
"""Handle updates of audio group media status."""
_LOGGER.debug(
"[%s %s] Multizone %s media status: %s",
self.entity_id,
self._cast_info.friendly_name,
group_uuid,
media_status,
)
self.mz_media_status[group_uuid] = media_status
self.mz_media_status_received[group_uuid] = dt_util.utcnow()
self.schedule_update_ha_state()
# ========== Service Calls ==========
def _media_controller(self):
"""
Return media controller.
First try from our own cast, then groups which our cast is a member in.
"""
media_status = self.media_status
media_controller = self._chromecast.media_controller
if media_status is None or media_status.player_state == "UNKNOWN":
groups = self.mz_media_status
for k, val in groups.items():
if val and val.player_state != "UNKNOWN":
media_controller = self.mz_mgr.get_multizone_mediacontroller(k)
break
return media_controller
def turn_on(self):
"""Turn on the cast device."""
if not self._chromecast.is_idle:
# Already turned on
return
if self._chromecast.app_id is not None:
# Quit the previous app before starting splash screen
self._chromecast.quit_app()
# The only way we can turn the Chromecast is on is by launching an app
self._chromecast.play_media(CAST_SPLASH, pychromecast.STREAM_TYPE_BUFFERED)
def turn_off(self):
"""Turn off the cast device."""
self._chromecast.quit_app()
def mute_volume(self, mute):
"""Mute the volume."""
self._chromecast.set_volume_muted(mute)
def set_volume_level(self, volume):
"""Set volume level, range 0..1."""
self._chromecast.set_volume(volume)
def media_play(self):
"""Send play command."""
media_controller = self._media_controller()
media_controller.play()
def media_pause(self):
"""Send pause command."""
media_controller = self._media_controller()
media_controller.pause()
def media_stop(self):
"""Send stop command."""
media_controller = self._media_controller()
media_controller.stop()
def media_previous_track(self):
"""Send previous track command."""
media_controller = self._media_controller()
media_controller.queue_prev()
def media_next_track(self):
"""Send next track command."""
media_controller = self._media_controller()
media_controller.queue_next()
def media_seek(self, position):
"""Seek the media to a specific location."""
media_controller = self._media_controller()
media_controller.seek(position)
async def async_browse_media(self, media_content_type=None, media_content_id=None):
"""Implement the websocket media browsing helper."""
result = await media_source.async_browse_media(self.hass, media_content_id)
return result
async def async_play_media(self, media_type, media_id, **kwargs):
"""Play a piece of media."""
# Handle media_source
if media_source.is_media_source_id(media_id):
sourced_media = await media_source.async_resolve_media(self.hass, media_id)
media_type = sourced_media.mime_type
media_id = sourced_media.url
# If media ID is a relative URL, we serve it from HA.
# Create a signed path.
if media_id[0] == "/":
# Sign URL with Home Assistant Cast User
config_entries = self.hass.config_entries.async_entries(CAST_DOMAIN)
user_id = config_entries[0].data["user_id"]
user = await self.hass.auth.async_get_user(user_id)
if user.refresh_tokens:
refresh_token: RefreshToken = list(user.refresh_tokens.values())[0]
media_id = async_sign_path(
self.hass,
refresh_token.id,
media_id,
timedelta(minutes=5),
)
# prepend external URL
hass_url = get_url(self.hass, prefer_external=True)
media_id = f"{hass_url}{media_id}"
await self.hass.async_add_executor_job(
ft.partial(self.play_media, media_type, media_id, **kwargs)
)
def play_media(self, media_type, media_id, **kwargs):
"""Play media from a URL."""
# We do not want this to be forwarded to a group
if media_type == CAST_DOMAIN:
try:
app_data = json.loads(media_id)
except json.JSONDecodeError:
_LOGGER.error("Invalid JSON in media_content_id")
raise
# Special handling for passed `app_id` parameter. This will only launch
# an arbitrary cast app, generally for UX.
if "app_id" in app_data:
app_id = app_data.pop("app_id")
_LOGGER.info("Starting Cast app by ID %s", app_id)
self._chromecast.start_app(app_id)
if app_data:
_LOGGER.warning(
"Extra keys %s were ignored. Please use app_name to cast media",
app_data.keys(),
)
return
app_name = app_data.pop("app_name")
try:
quick_play(self._chromecast, app_name, app_data)
except NotImplementedError:
_LOGGER.error("App %s not supported", app_name)
else:
self._chromecast.media_controller.play_media(
media_id, media_type, **kwargs.get(ATTR_MEDIA_EXTRA, {})
)
# ========== Properties ==========
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def name(self):
"""Return the name of the device."""
return self._cast_info.friendly_name
@property
def device_info(self):
"""Return information about the device."""
cast_info = self._cast_info
if cast_info.model_name == "Google Cast Group":
return None
return {
"name": cast_info.friendly_name,
"identifiers": {(CAST_DOMAIN, cast_info.uuid.replace("-", ""))},
"model": cast_info.model_name,
"manufacturer": cast_info.manufacturer,
}
def _media_status(self):
"""
Return media status.
First try from our own cast, then groups which our cast is a member in.
"""
media_status = self.media_status
media_status_received = self.media_status_received
if media_status is None or media_status.player_state == "UNKNOWN":
groups = self.mz_media_status
for k, val in groups.items():
if val and val.player_state != "UNKNOWN":
media_status = val
media_status_received = self.mz_media_status_received[k]
break
return (media_status, media_status_received)
@property
def state(self):
"""Return the state of the player."""
media_status, _ = self._media_status()
if media_status is None:
return None
if media_status.player_is_playing:
return STATE_PLAYING
if media_status.player_is_paused:
return STATE_PAUSED
if media_status.player_is_idle:
return STATE_IDLE
if self._chromecast is not None and self._chromecast.is_idle:
return STATE_OFF
return None
@property
def available(self):
"""Return True if the cast device is connected."""
return self._available
@property
def volume_level(self):
"""Volume level of the media player (0..1)."""
return self.cast_status.volume_level if self.cast_status else None
@property
def is_volume_muted(self):
"""Boolean if volume is currently muted."""
return self.cast_status.volume_muted if self.cast_status else None
@property
def media_content_id(self):
"""Content ID of current playing media."""
media_status, _ = self._media_status()
return media_status.content_id if media_status else None
@property
def media_content_type(self):
"""Content type of current playing media."""
media_status, _ = self._media_status()
if media_status is None:
return None
if media_status.media_is_tvshow:
return MEDIA_TYPE_TVSHOW
if media_status.media_is_movie:
return MEDIA_TYPE_MOVIE
if media_status.media_is_musictrack:
return MEDIA_TYPE_MUSIC
return None
@property
def media_duration(self):
"""Duration of current playing media in seconds."""
media_status, _ = self._media_status()
return media_status.duration if media_status else None
@property
def media_image_url(self):
"""Image url of current playing media."""
media_status, _ = self._media_status()
if media_status is None:
return None
images = media_status.images
return (
images[0].url.replace("http://", "//") if images and images[0].url else None
)
@property
def media_image_remotely_accessible(self) -> bool:
"""If the image url is remotely accessible."""
return True
@property
def media_title(self):
"""Title of current playing media."""
media_status, _ = self._media_status()
return media_status.title if media_status else None
@property
def media_artist(self):
"""Artist of current playing media (Music track only)."""
media_status, _ = self._media_status()
return media_status.artist if media_status else None
@property
def media_album_name(self):
"""Album of current playing media (Music track only)."""
media_status, _ = self._media_status()
return media_status.album_name if media_status else None
@property
def media_album_artist(self):
"""Album artist of current playing media (Music track only)."""
media_status, _ = self._media_status()
return media_status.album_artist if media_status else None
@property
def media_track(self):
"""Track number of current playing media (Music track only)."""
media_status, _ = self._media_status()
return media_status.track if media_status else None
@property
def media_series_title(self):
"""Return the title of the series of current playing media."""
media_status, _ = self._media_status()
return media_status.series_title if media_status else None
@property
def media_season(self):
"""Season of current playing media (TV Show only)."""
media_status, _ = self._media_status()
return media_status.season if media_status else None
@property
def media_episode(self):
"""Episode of current playing media (TV Show only)."""
media_status, _ = self._media_status()
return media_status.episode if media_status else None
@property
def app_id(self):
"""Return the ID of the current running app."""
return self._chromecast.app_id if self._chromecast else None
@property
def app_name(self):
"""Name of the current running app."""
return self._chromecast.app_display_name if self._chromecast else None
@property
def supported_features(self):
"""Flag media player features that are supported."""
support = SUPPORT_CAST
media_status, _ = self._media_status()
if media_status:
if media_status.supports_queue_next:
support |= SUPPORT_PREVIOUS_TRACK
if media_status.supports_queue_next:
support |= SUPPORT_NEXT_TRACK
if media_status.supports_seek:
support |= SUPPORT_SEEK
if "media_source" in self.hass.config.components:
support |= SUPPORT_BROWSE_MEDIA
return support
@property
def media_position(self):
"""Position of current playing media in seconds."""
media_status, _ = self._media_status()
if media_status is None or not (
media_status.player_is_playing
or media_status.player_is_paused
or media_status.player_is_idle
):
return None
return media_status.current_time
@property
def media_position_updated_at(self):
"""When was the position of the current playing media valid.
Returns value from homeassistant.util.dt.utcnow().
"""
_, media_status_recevied = self._media_status()
return media_status_recevied
@property
def unique_id(self) -> Optional[str]:
"""Return a unique ID."""
return self._cast_info.uuid
async def _async_cast_discovered(self, discover: ChromecastInfo):
"""Handle discovery of new Chromecast."""
if self._cast_info.uuid is None:
# We can't handle empty UUIDs
return
if self._cast_info.uuid != discover.uuid:
# Discovered is not our device.
return
_LOGGER.debug("Discovered chromecast with same UUID: %s", discover)
await self.async_set_cast_info(discover)
async def _async_stop(self, event):
"""Disconnect socket on Home Assistant stop."""
await self._async_disconnect()
def _handle_signal_show_view(
self,
controller: HomeAssistantController,
entity_id: str,
view_path: str,
url_path: Optional[str],
):
"""Handle a show view signal."""
if entity_id != self.entity_id:
return
if self._hass_cast_controller is None:
self._hass_cast_controller = controller
self._chromecast.register_handler(controller)
self._hass_cast_controller.show_lovelace_view(view_path, url_path)
| |
__author__ = 'abdul'
import pymongo
import config
from bson import DBRef
from errors import MongoctlException
from mongoctl_logging import log_warning, log_verbose, log_info, log_exception
from mongo_uri_tools import parse_mongo_uri
from utils import (
resolve_class, document_pretty_string, is_valid_member_address, listify
)
from mongo_version import is_supported_mongo_version, is_valid_version
from mongo_uri_tools import is_cluster_mongo_uri, mask_mongo_uri
from pymongo.errors import ConnectionFailure
DEFAULT_SERVERS_FILE = "servers.config"
DEFAULT_CLUSTERS_FILE = "clusters.config"
DEFAULT_SERVERS_COLLECTION = "servers"
DEFAULT_CLUSTERS_COLLECTION = "clusters"
DEFAULT_ACTIVITY_COLLECTION = "logs.server-activity"
LOOKUP_TYPE_REPLICA_MEMBER = "replicaMembers"
LOOKUP_TYPE_CONFIG_SVR = "configServers"
LOOKUP_TYPE_SHARDS = "shards"
LOOKUP_TYPE_ANY = [LOOKUP_TYPE_CONFIG_SVR, LOOKUP_TYPE_REPLICA_MEMBER,
LOOKUP_TYPE_SHARDS]
###############################################################################
# Global variable: mongoctl's mongodb object
__mongoctl_db__ = None
###############################################################################
def get_mongoctl_database():
# if not using db then return
if not has_db_repository():
return
global __mongoctl_db__
if __mongoctl_db__ is not None:
return __mongoctl_db__
log_verbose("Connecting to mongoctl db...")
try:
(conn, dbname) = _db_repo_connect()
__mongoctl_db__ = conn[dbname]
return __mongoctl_db__
except ConnectionFailure, e:
log_exception(e)
__mongoctl_db__ = "OFFLINE"
log_verbose("\n*************\n"
"Will not be using database repository for configurations"
" at this time!"
"\nREASON: Could not establish a database"
" connection to mongoctl's database repository."
"\nCAUSE: %s."
"\n*************" % e)
###############################################################################
def has_db_repository():
return config.get_database_repository_conf() is not None
###############################################################################
def has_file_repository():
return config.get_file_repository_conf() is not None
###############################################################################
def consulting_db_repository():
return has_db_repository() and is_db_repository_online()
###############################################################################
def is_db_repository_online():
mongoctl_db = get_mongoctl_database()
return mongoctl_db and mongoctl_db != "OFFLINE"
###############################################################################
def _db_repo_connect():
db_conf = config.get_database_repository_conf()
uri = db_conf["databaseURI"]
conn = pymongo.Connection(uri)
dbname = parse_mongo_uri(uri).database
return conn, dbname
###############################################################################
def validate_repositories():
if ((not has_file_repository()) and
(not has_db_repository())):
raise MongoctlException("Invalid 'mongoctl.config': No fileRepository"
" or databaseRepository configured. At least"
" one repository has to be configured.")
###############################################################################
# Server lookup functions
###############################################################################
def lookup_server(server_id):
validate_repositories()
server = None
# lookup server from the db repo first
if consulting_db_repository():
server = db_lookup_server(server_id)
# if server is not found then try from file repo
if server is None and has_file_repository():
server = config_lookup_server(server_id)
return server
###############################################################################
def lookup_and_validate_server(server_id):
server = lookup_server(server_id)
if server is None:
raise MongoctlException("Cannot find configuration for a server "
"with _id of '%s'." % server_id)
validation_errors = validate_server(server)
if len(validation_errors) > 0:
raise MongoctlException(
"Server '%s' configuration is not valid. Please fix errors below"
" and try again.\n%s" % (server_id,"\n".join(validation_errors)))
return server
###############################################################################
def db_lookup_server(server_id):
server_collection = get_mongoctl_server_db_collection()
server_doc = server_collection.find_one({"_id": server_id})
if server_doc:
return new_server(server_doc)
else:
return None
###############################################################################
## Looks up the server from config file
def config_lookup_server(server_id):
servers = get_configured_servers()
return servers.get(server_id)
###############################################################################
# returns all servers configured in both DB and config file
def lookup_all_servers():
validate_repositories()
all_servers = {}
if consulting_db_repository():
all_servers = db_lookup_all_servers()
if has_file_repository():
file_repo_servers = get_configured_servers()
all_servers = dict(file_repo_servers.items() + all_servers.items())
return all_servers.values()
###############################################################################
# returns servers saved in the db collection of servers
def db_lookup_all_servers():
servers = get_mongoctl_server_db_collection()
return new_servers_dict(servers.find())
###############################################################################
# Cluster lookup functions
###############################################################################
def lookup_and_validate_cluster(cluster_id):
cluster = lookup_cluster(cluster_id)
if cluster is None:
raise MongoctlException("Unknown cluster: %s" % cluster_id)
validate_cluster(cluster)
return cluster
###############################################################################
# Lookup by cluster id
def lookup_cluster(cluster_id):
validate_repositories()
cluster = None
# lookup cluster from the db repo first
if consulting_db_repository():
cluster = db_lookup_cluster(cluster_id)
# if cluster is not found then try from file repo
if cluster is None and has_file_repository():
cluster = config_lookup_cluster(cluster_id)
return cluster
###############################################################################
# Looks up the server from config file
def config_lookup_cluster(cluster_id):
clusters = get_configured_clusters()
return clusters.get(cluster_id)
###############################################################################
def db_lookup_cluster(cluster_id):
cluster_collection = get_mongoctl_cluster_db_collection()
cluster_doc = cluster_collection.find_one({"_id": cluster_id})
if cluster_doc is not None:
return new_cluster(cluster_doc)
else:
return None
###############################################################################
# returns all clusters configured in both DB and config file
def lookup_all_clusters():
validate_repositories()
all_clusters = {}
if consulting_db_repository():
all_clusters = db_lookup_all_clusters()
if has_file_repository():
all_clusters = dict(get_configured_clusters().items() +
all_clusters.items())
return all_clusters.values()
###############################################################################
# returns a dictionary of (cluster_id, cluster) looked up from DB
def db_lookup_all_clusters():
clusters = get_mongoctl_cluster_db_collection()
return new_replicaset_clusters_dict(clusters.find())
###############################################################################
# Lookup by server id
def db_lookup_cluster_by_server(server, lookup_type=LOOKUP_TYPE_ANY):
cluster_collection = get_mongoctl_cluster_db_collection()
lookup_type = listify(lookup_type)
type_query =[]
for t in lookup_type:
prop_query = {"%s.server.$id" % t: server.id}
type_query.append(prop_query)
query = {
"$or": type_query
}
cluster_doc = cluster_collection.find_one(query)
if cluster_doc is not None:
return new_cluster(cluster_doc)
else:
return None
###############################################################################
# Lookup by server id
def db_lookup_cluster_by_shard(shard):
cluster_collection = get_mongoctl_cluster_db_collection()
query = {
"shards.cluster.$id": shard.id
}
cluster_doc = cluster_collection.find_one(query)
if cluster_doc is not None:
return new_cluster(cluster_doc)
else:
return None
###############################################################################
def config_lookup_cluster_by_server(server, lookup_type=LOOKUP_TYPE_ANY):
clusters = get_configured_clusters()
lookup_type = listify(lookup_type)
for t in lookup_type:
result = None
if t == LOOKUP_TYPE_REPLICA_MEMBER:
result = filter(lambda c: c.has_member_server(server),
clusters.values())
elif t == LOOKUP_TYPE_CONFIG_SVR:
result = filter(lambda c: cluster_has_config_server(c, server),
clusters.values())
elif t == LOOKUP_TYPE_SHARDS:
result = filter(lambda c: cluster_has_shard(c, server),
clusters.values())
if result:
return result[0]
###############################################################################
def config_lookup_cluster_by_shard(shard):
clusters = get_configured_clusters()
result = filter(lambda c: cluster_has_shard(c, shard), clusters.values())
if result:
return result[0]
###############################################################################
def cluster_has_config_server(cluster, server):
config_servers = cluster.get_property("configServers")
if config_servers:
for server_doc in config_servers:
server_ref = server_doc["server"]
if isinstance(server_ref, DBRef) and server_ref.id == server.id:
return cluster
###############################################################################
def cluster_has_shard(cluster, shard):
from objects.server import Server
shards = cluster.get_property("shards")
if shards:
for shard_doc in shards:
if isinstance(shard, Server):
ref = shard_doc.get("server")
else:
ref = shard_doc.get("cluster")
if isinstance(ref, DBRef) and ref.id == shard.id:
return cluster
###############################################################################
# Global variable: lazy loaded map that holds servers read from config file
__configured_servers__ = None
###############################################################################
def get_configured_servers():
global __configured_servers__
if __configured_servers__ is None:
__configured_servers__ = {}
file_repo_conf = config.get_file_repository_conf()
servers_path_or_url = file_repo_conf.get("servers",
DEFAULT_SERVERS_FILE)
server_documents = config.read_config_json("servers",
servers_path_or_url)
if not isinstance(server_documents, list):
raise MongoctlException("Server list in '%s' must be an array" %
servers_path_or_url)
for document in server_documents:
server = new_server(document)
__configured_servers__[server.id] = server
return __configured_servers__
###############################################################################
# Global variable: lazy loaded map that holds clusters read from config file
__configured_clusters__ = None
###############################################################################
def get_configured_clusters():
global __configured_clusters__
if __configured_clusters__ is None:
__configured_clusters__ = {}
file_repo_conf = config.get_file_repository_conf()
clusters_path_or_url = file_repo_conf.get("clusters",
DEFAULT_CLUSTERS_FILE)
cluster_documents = config.read_config_json("clusters",
clusters_path_or_url)
if not isinstance(cluster_documents, list):
raise MongoctlException("Cluster list in '%s' must be an array" %
clusters_path_or_url)
for document in cluster_documents:
cluster = new_cluster(document)
__configured_clusters__[cluster.id] = cluster
return __configured_clusters__
###############################################################################
def validate_cluster(cluster):
log_info("Validating cluster '%s'..." % cluster.id )
errors = []
if isinstance(cluster, replicaset_cluster_type()):
errors.extend(validate_replicaset_cluster(cluster))
elif isinstance(cluster, sharded_cluster_type()):
errors.extend(validate_sharded_cluster(cluster))
if len(errors) > 0:
raise MongoctlException("Cluster %s configuration is not valid. "
"Please fix errors below and try again.\n%s" %
(cluster.id , "\n".join(errors)))
return cluster
###############################################################################
def validate_replicaset_cluster(cluster):
errors = []
return errors
###############################################################################
def validate_sharded_cluster(cluster):
errors = []
if not cluster.config_members or len(cluster.config_members) not in [1,3]:
errors.append("Need 1 or 3 configServers configured in your cluster")
return errors
###############################################################################
def lookup_validate_cluster_by_server(server):
cluster = lookup_cluster_by_server(server)
if cluster is not None:
validate_cluster(cluster)
return cluster
###############################################################################
def lookup_cluster_by_server(server, lookup_type=LOOKUP_TYPE_ANY):
validate_repositories()
cluster = None
## Look for the cluster in db repo
if consulting_db_repository():
cluster = db_lookup_cluster_by_server(server, lookup_type=lookup_type)
## If nothing is found then look in file repo
if cluster is None and has_file_repository():
cluster = config_lookup_cluster_by_server(server,
lookup_type=lookup_type)
return cluster
###############################################################################
def lookup_cluster_by_shard(shard):
validate_repositories()
cluster = None
## Look for the cluster in db repo
if consulting_db_repository():
cluster = db_lookup_cluster_by_shard(shard)
## If nothing is found then look in file repo
if cluster is None and has_file_repository():
cluster = config_lookup_cluster_by_shard(shard)
return cluster
###############################################################################
def validate_server(server):
errors = []
version = server.get_mongo_version()
# None versions are ok
if version is not None:
if not is_valid_version(version):
errors.append("** Invalid mongoVersion value '%s'" % version)
elif not is_supported_mongo_version(version):
errors.append("** mongoVersion '%s' is not supported. Please refer"
" to mongoctl documentation for supported"
" versions." % version)
return errors
###############################################################################
def get_mongoctl_server_db_collection():
mongoctl_db = get_mongoctl_database()
conf = config.get_database_repository_conf()
server_collection_name = conf.get("servers", DEFAULT_SERVERS_COLLECTION)
return mongoctl_db[server_collection_name]
###############################################################################
def get_mongoctl_cluster_db_collection():
mongoctl_db = get_mongoctl_database()
conf = config.get_database_repository_conf()
cluster_collection_name = conf.get("clusters", DEFAULT_CLUSTERS_COLLECTION)
return mongoctl_db[cluster_collection_name]
###############################################################################
def get_activity_collection():
mongoctl_db = get_mongoctl_database()
activity_coll_name = config.get_mongoctl_config_val(
'activityCollectionName', DEFAULT_ACTIVITY_COLLECTION)
return mongoctl_db[activity_coll_name]
###############################################################################
# Factory Functions
###############################################################################
def new_server(server_doc):
_type = server_doc.get("_type")
if _type is None or _type == "mongod":
server_type = "mongoctl.objects.mongod.MongodServer"
elif _type == "mongos":
server_type = "mongoctl.objects.mongos.MongosServer"
else:
raise MongoctlException("Unknown server _type '%s' for server:\n%s" %
(_type, document_pretty_string(server_doc)))
clazz = resolve_class(server_type)
return clazz(server_doc)
###############################################################################
def build_server_from_address(address):
if not is_valid_member_address(address):
return None
port = int(address.split(":")[1])
server_doc = {"_id": address,
"address": address,
"cmdOptions":{
"port": port
}}
return new_server(server_doc)
###############################################################################
def build_server_from_uri(uri):
uri_wrapper = parse_mongo_uri(uri)
node = uri_wrapper.node_list[0]
host = node[0]
port = node[1]
database = uri_wrapper.database or "admin"
username = uri_wrapper.username
password = uri_wrapper.password
address = "%s:%s" % (host, port)
server = build_server_from_address(address)
# set login user if specified
if username:
server.set_login_user(database, username, password)
return server
###############################################################################
def build_cluster_from_uri(uri):
uri_wrapper = parse_mongo_uri(uri)
database = uri_wrapper.database or "admin"
username = uri_wrapper.username
password = uri_wrapper.password
nodes = uri_wrapper.node_list
cluster_doc = {
"_id": mask_mongo_uri(uri)
}
member_doc_list = []
for node in nodes:
host = node[0]
port = node[1]
member_doc = {
"host": "%s:%s" % (host, port)
}
member_doc_list.append(member_doc)
cluster_doc["members"] = member_doc_list
cluster = new_cluster(cluster_doc)
# set login user if specified
if username:
for member in cluster.get_members():
member.get_server().set_login_user(database, username, password)
return cluster
###############################################################################
def build_server_or_cluster_from_uri(uri):
if is_cluster_mongo_uri(uri):
return build_cluster_from_uri(uri)
else:
return build_server_from_uri(uri)
###############################################################################
def new_servers_dict(docs):
d = {}
map(lambda doc: d.update({doc['_id']: new_server(doc)}), docs)
return d
###############################################################################
def new_cluster(cluster_doc):
_type = cluster_doc.get("_type")
if _type is None or _type == "ReplicaSetCluster":
clazz = replicaset_cluster_type()
elif _type == "ShardedCluster":
clazz = sharded_cluster_type()
else:
raise MongoctlException("Unknown cluster _type '%s' for server:\n%s" %
(_type, document_pretty_string(cluster_doc)))
return clazz(cluster_doc)
###############################################################################
def new_replicaset_clusters_dict(docs):
d = {}
map(lambda doc: d.update({doc['_id']: new_cluster(doc)}), docs)
return d
###############################################################################
def new_replicaset_cluster_member(cluster_mem_doc):
mem_type = "mongoctl.objects.replicaset_cluster.ReplicaSetClusterMember"
clazz = resolve_class(mem_type)
return clazz(cluster_mem_doc)
###############################################################################
def new_replicaset_cluster_member_list(docs_iteratable):
return map(new_replicaset_cluster_member, docs_iteratable)
def replicaset_cluster_type():
return resolve_class("mongoctl.objects.replicaset_cluster."
"ReplicaSetCluster")
def sharded_cluster_type():
return resolve_class("mongoctl.objects.sharded_cluster.ShardedCluster")
| |
# -*- coding: utf-8 -*-
# Copyright 2018 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Choose the profile for a board that has been or is being setup."""
from __future__ import print_function
import functools
import os
import sys
import six
from chromite.lib import commandline
from chromite.lib import cros_build_lib
from chromite.lib import cros_logging as logging
from chromite.lib import osutils
from chromite.lib import sysroot_lib
assert sys.version_info >= (3, 6), 'This module requires Python 3.6+'
# Default value constants.
_DEFAULT_PROFILE = 'base'
def PathPrefixDecorator(f):
"""Add a prefix to the path or paths returned by the decorated function.
Will not prepend the prefix if the path already starts with the prefix, so the
decorator may be applied to functions that have mixed sources that may
or may not already have applied them. This is especially useful for allowing
tests and CLI args a little more leniency in how paths are provided.
"""
@functools.wraps(f)
def wrapper(*args, **kwargs):
result = f(*args, **kwargs)
prefix = PathPrefixDecorator.prefix
if not prefix or not result:
# Nothing to do.
return result
elif not isinstance(result, six.string_types):
# Transform each path in the collection.
new_result = []
for path in result:
prefixed_path = os.path.join(prefix, path.lstrip(os.sep))
new_result.append(path if path.startswith(prefix) else prefixed_path)
return new_result
elif not result.startswith(prefix):
# Add the prefix.
return os.path.join(prefix, result.lstrip(os.sep))
# An already prefixed path.
return result
return wrapper
PathPrefixDecorator.prefix = None
class Error(Exception):
"""Base error for custom exceptions in this script."""
class InvalidArgumentsError(Error):
"""Invalid arguments."""
class MakeProfileIsNotLinkError(Error):
"""The make profile exists but is not a link."""
class ProfileDirectoryNotFoundError(Error):
"""Unable to find the profile directory."""
def ChooseProfile(board, profile):
"""Make the link to choose the profile, print relevant warnings.
Args:
board: Board - the board being used.
profile: Profile - the profile being used.
Raises:
OSError when the board's make_profile path exists and is not a link.
"""
if not os.path.isfile(os.path.join(profile.directory, 'parent')):
logging.warning("Portage profile directory %s has no 'parent' file. "
'This likely means your profile directory is invalid and '
'build_packages will fail.', profile.directory)
current_profile = None
if os.path.exists(board.make_profile):
# Only try to read if it exists; we only want it to raise an error when the
# path exists and is not a link.
try:
current_profile = os.readlink(board.make_profile)
except OSError:
raise MakeProfileIsNotLinkError('%s is not a link.' % board.make_profile)
if current_profile == profile.directory:
# The existing link is what we were going to make, so nothing to do.
return
elif current_profile is not None:
# It exists and is changing, emit warning.
fmt = {'board': board.board_variant, 'profile': profile.name}
msg = ('You are switching profiles for a board that is already setup. This '
'can cause trouble for Portage. If you experience problems with '
'build_packages you may need to run:\n'
"\t'setup_board --board %(board)s --force --profile %(profile)s'\n"
'\nAlternatively, you can correct the dependency graph by using '
"'emerge-%(board)s -c' or 'emerge-%(board)s -C <ebuild>'.")
logging.warning(msg, fmt)
# Make the symlink, overwrites existing link if one already exists.
osutils.SafeSymlink(profile.directory, board.make_profile, sudo=True)
# Update the profile override value.
if profile.override:
board.profile_override = profile.override
class Profile(object):
"""Simple data container class for the profile data."""
def __init__(self, name, directory, override):
self.name = name
self._directory = directory
self.override = override
@property
@PathPrefixDecorator
def directory(self):
return self._directory
def _GetProfile(opts, board):
"""Get the profile list."""
# Determine the override value - which profile is being selected.
override = opts.profile if opts.profile else board.profile_override
profile = _DEFAULT_PROFILE
profile_directory = None
if override and os.path.exists(override):
profile_directory = os.path.abspath(override)
profile = os.path.basename(profile_directory)
elif override:
profile = override
if profile_directory is None:
# Build profile directories in reverse order so we can search from most to
# least specific.
profile_dirs = ['%s/profiles/%s' % (overlay, profile) for overlay in
reversed(board.overlays)]
for profile_dir in profile_dirs:
if os.path.isdir(profile_dir):
profile_directory = profile_dir
break
else:
searched = ', '.join(profile_dirs)
raise ProfileDirectoryNotFoundError(
'Profile directory not found, searched in (%s).' % searched)
return Profile(profile, profile_directory, override)
class Board(object):
"""Manage the board arguments and configs."""
# Files located on the board.
MAKE_PROFILE = '%(board_root)s/etc/portage/make.profile'
def __init__(self, board=None, variant=None, board_root=None):
"""Board constructor.
board [+ variant] is given preference when both board and board_root are
provided.
Preconditions:
Either board and build_root are not None, or board_root is not None.
With board + build_root [+ variant] we can construct the board root.
With the board root we can have the board[_variant] directory.
Args:
board: str|None - The board name.
variant: str|None - The variant name.
board_root: str|None - The boards fully qualified build directory path.
"""
if not board and not board_root:
# Enforce preconditions.
raise InvalidArgumentsError('Either board or board_root must be '
'provided.')
elif board:
# The board and variant can be specified separately, or can both be
# contained in the board name, separated by an underscore.
board_split = board.split('_')
variant_default = variant
self._board_root = None
else:
self._board_root = os.path.normpath(board_root)
board_split = os.path.basename(self._board_root).split('_')
variant_default = None
self.board = board_split.pop(0)
self.variant = board_split.pop(0) if board_split else variant_default
if self.variant:
self.board_variant = '%s_%s' % (self.board, self.variant)
else:
self.board_variant = self.board
self.make_profile = self.MAKE_PROFILE % {'board_root': self.root}
# This must come after the arguments required to build each variant of the
# build root have been processed.
self._sysroot_config = sysroot_lib.Sysroot(self.root)
@property
@PathPrefixDecorator
def root(self):
if self._board_root:
return self._board_root
return os.path.join(cros_build_lib.GetSysroot(self.board_variant))
@property
@PathPrefixDecorator
def overlays(self):
return self._sysroot_config.GetStandardField(
sysroot_lib.STANDARD_FIELD_BOARD_OVERLAY).split()
@property
def profile_override(self):
return self._sysroot_config.GetCachedField('PROFILE_OVERRIDE')
@profile_override.setter
def profile_override(self, value):
self._sysroot_config.SetCachedField('PROFILE_OVERRIDE', value)
def _GetBoard(opts):
"""Factory method to build a Board from the parsed CLI arguments."""
return Board(board=opts.board, variant=opts.variant,
board_root=opts.board_root)
def GetParser():
"""ArgumentParser builder and argument definitions."""
parser = commandline.ArgumentParser(description=__doc__)
parser.add_argument('-b', '--board',
default=os.environ.get('DEFAULT_BOARD'),
help='The name of the board to set up.')
parser.add_argument('-r', '--board-root',
type='path',
help='Board root where the profile should be created.')
parser.add_argument('-p', '--profile',
help='The portage configuration profile to use.')
parser.add_argument('--variant', help='Board variant.')
group = parser.add_argument_group('Advanced options')
group.add_argument('--filesystem-prefix',
type='path',
help='Force filesystem accesses to be prefixed by the '
'given path.')
return parser
def ParseArgs(argv):
"""Parse and validate the arguments."""
parser = GetParser()
opts = parser.parse_args(argv)
# See Board.__init__ Preconditions.
board_valid = opts.board is not None
board_root_valid = opts.board_root and os.path.exists(opts.board_root)
if not board_valid and not board_root_valid:
parser.error('Either board or board_root must be provided.')
PathPrefixDecorator.prefix = opts.filesystem_prefix
del opts.filesystem_prefix
opts.Freeze()
return opts
def main(argv):
# Parse arguments.
opts = ParseArgs(argv)
# Build and validate the board and profile.
board = _GetBoard(opts)
if not os.path.exists(board.root):
cros_build_lib.Die('The board has not been setup, please run setup_board '
'first.')
try:
profile = _GetProfile(opts, board)
except ProfileDirectoryNotFoundError as e:
cros_build_lib.Die(e)
# Change the profile to the selected.
logging.info('Selecting profile: %s for %s', profile.directory, board.root)
try:
ChooseProfile(board, profile)
except MakeProfileIsNotLinkError as e:
cros_build_lib.Die(e)
| |
# Copyright 2011 OpenStack Foundation
# Copyright 2012 Justin Santa Barbara
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from mox3 import mox
from oslo_config import cfg
from oslo_serialization import jsonutils
from oslo_utils import encodeutils
import webob
from jacket.api.compute.openstack.compute.legacy_v2.contrib import security_groups as \
secgroups_v2
from jacket.api.compute.openstack.compute import security_groups as \
secgroups_v21
from jacket.api.compute.openstack import wsgi
from jacket.compute import cloud
from jacket.compute.cloud import power_state
from jacket import context as context_maker
import jacket.db.compute
from jacket.compute import exception
from jacket.objects import compute
from jacket.compute import quota
from jacket.compute import test
from jacket.tests.compute.unit.api.openstack import fakes
from jacket.tests.compute.unit import fake_instance
from jacket.tests.compute import uuidsentinel as uuids
CONF = cfg.CONF
FAKE_UUID1 = 'a47ae74e-ab08-447f-8eee-ffd43fc46c16'
FAKE_UUID2 = 'c6e6430a-6563-4efa-9542-5e93c9e97d18'
UUID_SERVER = uuids.server
class AttrDict(dict):
def __getattr__(self, k):
return self[k]
def security_group_request_template(**kwargs):
sg = kwargs.copy()
sg.setdefault('name', 'test')
sg.setdefault('description', 'test-description')
return sg
def security_group_template(**kwargs):
sg = kwargs.copy()
sg.setdefault('tenant_id', '123')
sg.setdefault('name', 'test')
sg.setdefault('description', 'test-description')
return sg
def security_group_db(security_group, id=None):
attrs = security_group.copy()
if 'tenant_id' in attrs:
attrs['project_id'] = attrs.pop('tenant_id')
if id is not None:
attrs['id'] = id
attrs.setdefault('rules', [])
attrs.setdefault('instances', [])
return AttrDict(attrs)
def security_group_rule_template(**kwargs):
rule = kwargs.copy()
rule.setdefault('ip_protocol', 'tcp')
rule.setdefault('from_port', 22)
rule.setdefault('to_port', 22)
rule.setdefault('parent_group_id', 2)
return rule
def security_group_rule_db(rule, id=None):
attrs = rule.copy()
if 'ip_protocol' in attrs:
attrs['protocol'] = attrs.pop('ip_protocol')
return AttrDict(attrs)
def return_server(context, server_id,
columns_to_join=None, use_slave=False):
return fake_instance.fake_db_instance(
**{'id': 1,
'power_state': 0x01,
'host': "localhost",
'uuid': server_id,
'name': 'asdf'})
def return_server_by_uuid(context, server_uuid,
columns_to_join=None,
use_slave=False):
return fake_instance.fake_db_instance(
**{'id': 1,
'power_state': 0x01,
'host': "localhost",
'uuid': server_uuid,
'name': 'asdf'})
def return_non_running_server(context, server_id, columns_to_join=None):
return fake_instance.fake_db_instance(
**{'id': 1, 'power_state': power_state.SHUTDOWN,
'uuid': server_id, 'host': "localhost", 'name': 'asdf'})
def return_security_group_by_name(context, project_id, group_name):
return {'id': 1, 'name': group_name,
"instances": [{'id': 1, 'uuid': UUID_SERVER}]}
def return_security_group_without_instances(context, project_id, group_name):
return {'id': 1, 'name': group_name}
def return_server_nonexistent(context, server_id, columns_to_join=None):
raise exception.InstanceNotFound(instance_id=server_id)
class TestSecurityGroupsV21(test.TestCase):
secgrp_ctl_cls = secgroups_v21.SecurityGroupController
server_secgrp_ctl_cls = secgroups_v21.ServerSecurityGroupController
secgrp_act_ctl_cls = secgroups_v21.SecurityGroupActionController
def setUp(self):
super(TestSecurityGroupsV21, self).setUp()
self.controller = self.secgrp_ctl_cls()
self.server_controller = self.server_secgrp_ctl_cls()
self.manager = self.secgrp_act_ctl_cls()
# This needs to be done here to set fake_id because the derived
# class needs to be called first if it wants to set
# 'security_group_api' and this setUp method needs to be called.
if self.controller.security_group_api.id_is_uuid:
self.fake_id = '11111111-1111-1111-1111-111111111111'
else:
self.fake_id = '11111111'
self.req = fakes.HTTPRequest.blank('')
self.admin_req = fakes.HTTPRequest.blank('', use_admin_context=True)
def _assert_no_security_groups_reserved(self, context):
"""Check that no reservations are leaked during tests."""
result = quota.QUOTAS.get_project_quotas(context, context.project_id)
self.assertEqual(result['security_groups']['reserved'], 0)
def _assert_security_groups_in_use(self, project_id, user_id, in_use):
context = context_maker.get_admin_context()
result = quota.QUOTAS.get_user_quotas(context, project_id, user_id)
self.assertEqual(result['security_groups']['in_use'], in_use)
def test_create_security_group(self):
sg = security_group_request_template()
res_dict = self.controller.create(self.req, {'security_group': sg})
self.assertEqual(res_dict['security_group']['name'], 'test')
self.assertEqual(res_dict['security_group']['description'],
'test-description')
def test_create_security_group_with_no_name(self):
sg = security_group_request_template()
del sg['name']
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, self.req,
{'security_group': sg})
self._assert_no_security_groups_reserved(
self.req.environ['compute.context'])
def test_create_security_group_with_no_description(self):
sg = security_group_request_template()
del sg['description']
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
self.req, {'security_group': sg})
self._assert_no_security_groups_reserved(
self.req.environ['compute.context'])
def test_create_security_group_with_empty_description(self):
sg = security_group_request_template()
sg['description'] = ""
try:
self.controller.create(self.req, {'security_group': sg})
self.fail('Should have raised BadRequest exception')
except webob.exc.HTTPBadRequest as exc:
self.assertEqual('description has a minimum character requirement'
' of 1.', exc.explanation)
except exception.InvalidInput:
self.fail('Should have raised BadRequest exception instead of')
self._assert_no_security_groups_reserved(
self.req.environ['compute.context'])
def test_create_security_group_with_blank_name(self):
sg = security_group_request_template(name='')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
self.req, {'security_group': sg})
self._assert_no_security_groups_reserved(
self.req.environ['compute.context'])
def test_create_security_group_with_whitespace_name(self):
sg = security_group_request_template(name=' ')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
self.req, {'security_group': sg})
self._assert_no_security_groups_reserved(
self.req.environ['compute.context'])
def test_create_security_group_with_blank_description(self):
sg = security_group_request_template(description='')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
self.req, {'security_group': sg})
self._assert_no_security_groups_reserved(
self.req.environ['compute.context'])
def test_create_security_group_with_whitespace_description(self):
sg = security_group_request_template(description=' ')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
self.req, {'security_group': sg})
self._assert_no_security_groups_reserved(
self.req.environ['compute.context'])
def test_create_security_group_with_duplicate_name(self):
sg = security_group_request_template()
# FIXME: Stub out _get instead of creating twice
self.controller.create(self.req, {'security_group': sg})
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
self.req, {'security_group': sg})
self._assert_no_security_groups_reserved(
self.req.environ['compute.context'])
def test_create_security_group_with_no_body(self):
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, self.req, None)
self._assert_no_security_groups_reserved(
self.req.environ['compute.context'])
def test_create_security_group_with_no_security_group(self):
body = {'no-securityGroup': None}
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, self.req, body)
self._assert_no_security_groups_reserved(
self.req.environ['compute.context'])
def test_create_security_group_above_255_characters_name(self):
sg = security_group_request_template(name='1234567890' * 26)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
self.req, {'security_group': sg})
self._assert_no_security_groups_reserved(
self.req.environ['compute.context'])
def test_create_security_group_above_255_characters_description(self):
sg = security_group_request_template(description='1234567890' * 26)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
self.req, {'security_group': sg})
self._assert_no_security_groups_reserved(
self.req.environ['compute.context'])
def test_create_security_group_non_string_name(self):
sg = security_group_request_template(name=12)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
self.req, {'security_group': sg})
self._assert_no_security_groups_reserved(
self.req.environ['compute.context'])
def test_create_security_group_non_string_description(self):
sg = security_group_request_template(description=12)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
self.req, {'security_group': sg})
self._assert_no_security_groups_reserved(
self.req.environ['compute.context'])
def test_create_security_group_quota_limit(self):
for num in range(1, CONF.quota_security_groups):
name = 'test%s' % num
sg = security_group_request_template(name=name)
res_dict = self.controller.create(self.req, {'security_group': sg})
self.assertEqual(res_dict['security_group']['name'], name)
sg = security_group_request_template()
self.assertRaises(webob.exc.HTTPForbidden, self.controller.create,
self.req, {'security_group': sg})
def test_get_security_group_list(self):
groups = []
for i, name in enumerate(['default', 'test']):
sg = security_group_template(id=i + 1,
name=name,
description=name + '-desc',
rules=[])
groups.append(sg)
expected = {'security_groups': groups}
def return_security_groups(context, project_id):
return [security_group_db(sg) for sg in groups]
self.stub_out('cloud.db.security_group_get_by_project',
return_security_groups)
res_dict = self.controller.index(self.req)
self.assertEqual(res_dict, expected)
def test_get_security_group_list_missing_group_id_rule(self):
groups = []
rule1 = security_group_rule_template(cidr='10.2.3.124/24',
parent_group_id=1,
group_id={}, id=88,
protocol='TCP')
rule2 = security_group_rule_template(cidr='10.2.3.125/24',
parent_group_id=1,
id=99, protocol=88,
group_id='HAS_BEEN_DELETED')
sg = security_group_template(id=1,
name='test',
description='test-desc',
rules=[rule1, rule2])
groups.append(sg)
# An expected rule here needs to be created as the api returns
# different attributes on the rule for a response than what was
# passed in. For example:
# "cidr": "0.0.0.0/0" ->"ip_range": {"cidr": "0.0.0.0/0"}
expected_rule = security_group_rule_template(
ip_range={'cidr': '10.2.3.124/24'}, parent_group_id=1,
group={}, id=88, ip_protocol='TCP')
expected = security_group_template(id=1,
name='test',
description='test-desc',
rules=[expected_rule])
expected = {'security_groups': [expected]}
def return_security_groups(context, project, search_opts):
return [security_group_db(sg) for sg in groups]
self.stubs.Set(self.controller.security_group_api, 'list',
return_security_groups)
res_dict = self.controller.index(self.req)
self.assertEqual(res_dict, expected)
def test_get_security_group_list_all_tenants(self):
all_groups = []
tenant_groups = []
for i, name in enumerate(['default', 'test']):
sg = security_group_template(id=i + 1,
name=name,
description=name + '-desc',
rules=[])
all_groups.append(sg)
if name == 'default':
tenant_groups.append(sg)
all = {'security_groups': all_groups}
tenant_specific = {'security_groups': tenant_groups}
def return_all_security_groups(context):
return [security_group_db(sg) for sg in all_groups]
self.stub_out('jacket.cloud.security_group_get_all',
return_all_security_groups)
def return_tenant_security_groups(context, project_id):
return [security_group_db(sg) for sg in tenant_groups]
self.stub_out('jacket.cloud.security_group_get_by_project',
return_tenant_security_groups)
path = '/v2/fake/os-security-groups'
req = fakes.HTTPRequest.blank(path, use_admin_context=True)
res_dict = self.controller.index(req)
self.assertEqual(res_dict, tenant_specific)
req = fakes.HTTPRequest.blank('%s?all_tenants=1' % path,
use_admin_context=True)
res_dict = self.controller.index(req)
self.assertEqual(res_dict, all)
def test_get_security_group_by_instance(self):
groups = []
for i, name in enumerate(['default', 'test']):
sg = security_group_template(id=i + 1,
name=name,
description=name + '-desc',
rules=[])
groups.append(sg)
expected = {'security_groups': groups}
def return_instance(context, server_id,
columns_to_join=None, use_slave=False):
self.assertEqual(server_id, FAKE_UUID1)
return return_server_by_uuid(context, server_id)
self.stub_out('jacket.cloud.instance_get_by_uuid',
return_instance)
def return_security_groups(context, instance_uuid):
self.assertEqual(instance_uuid, FAKE_UUID1)
return [security_group_db(sg) for sg in groups]
self.stub_out('cloud.db.security_group_get_by_instance',
return_security_groups)
res_dict = self.server_controller.index(self.req, FAKE_UUID1)
self.assertEqual(res_dict, expected)
@mock.patch('jacket.cloud.instance_get_by_uuid')
@mock.patch('cloud.db.security_group_get_by_instance', return_value=[])
def test_get_security_group_empty_for_instance(self, mock_sec_group,
mock_db_get_ins):
expected = {'security_groups': []}
def return_instance(context, server_id,
columns_to_join=None, use_slave=False):
self.assertEqual(server_id, FAKE_UUID1)
return return_server_by_uuid(context, server_id)
mock_db_get_ins.side_effect = return_instance
res_dict = self.server_controller.index(self.req, FAKE_UUID1)
self.assertEqual(expected, res_dict)
mock_sec_group.assert_called_once_with(
self.req.environ['compute.context'], FAKE_UUID1)
def test_get_security_group_by_instance_non_existing(self):
self.stub_out('jacket.cloud.instance_get', return_server_nonexistent)
self.stub_out('jacket.cloud.instance_get_by_uuid',
return_server_nonexistent)
self.assertRaises(webob.exc.HTTPNotFound,
self.server_controller.index, self.req, '1')
def test_get_security_group_by_instance_invalid_id(self):
self.assertRaises(webob.exc.HTTPNotFound,
self.server_controller.index, self.req, 'invalid')
def test_get_security_group_by_id(self):
sg = security_group_template(id=2, rules=[])
def return_security_group(context, group_id):
self.assertEqual(sg['id'], group_id)
return security_group_db(sg)
self.stub_out('cloud.db.security_group_get',
return_security_group)
res_dict = self.controller.show(self.req, '2')
expected = {'security_group': sg}
self.assertEqual(res_dict, expected)
def test_get_security_group_by_invalid_id(self):
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.delete,
self.req, 'invalid')
def test_get_security_group_by_non_existing_id(self):
self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete,
self.req, self.fake_id)
def test_update_security_group(self):
sg = security_group_template(id=2, rules=[])
sg_update = security_group_template(id=2, rules=[],
name='update_name', description='update_desc')
def return_security_group(context, group_id):
self.assertEqual(sg['id'], group_id)
return security_group_db(sg)
def return_update_security_group(context, group_id, values,
columns_to_join=None):
self.assertEqual(sg_update['id'], group_id)
self.assertEqual(sg_update['name'], values['name'])
self.assertEqual(sg_update['description'], values['description'])
return security_group_db(sg_update)
self.stub_out('cloud.db.security_group_update',
return_update_security_group)
self.stub_out('cloud.db.security_group_get',
return_security_group)
res_dict = self.controller.update(self.req, '2',
{'security_group': sg_update})
expected = {'security_group': sg_update}
self.assertEqual(res_dict, expected)
def test_update_security_group_name_to_default(self):
sg = security_group_template(id=2, rules=[], name='default')
def return_security_group(context, group_id):
self.assertEqual(sg['id'], group_id)
return security_group_db(sg)
self.stub_out('jacket.cloud.security_group_get',
return_security_group)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
self.req, '2', {'security_group': sg})
def test_update_default_security_group_fail(self):
sg = security_group_template()
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
self.req, '1', {'security_group': sg})
def test_delete_security_group_by_id(self):
sg = security_group_template(id=1, project_id='fake_project',
user_id='fake_user', rules=[])
self.called = False
def security_group_destroy(context, id):
self.called = True
def return_security_group(context, group_id):
self.assertEqual(sg['id'], group_id)
return security_group_db(sg)
self.stub_out('cloud.db.security_group_destroy',
security_group_destroy)
self.stub_out('jacket.cloud.security_group_get',
return_security_group)
self.controller.delete(self.req, '1')
self.assertTrue(self.called)
def test_delete_security_group_by_admin(self):
sg = security_group_request_template()
self.controller.create(self.req, {'security_group': sg})
context = self.req.environ['compute.context']
# Ensure quota usage for security group is correct.
self._assert_security_groups_in_use(context.project_id,
context.user_id, 2)
# Delete the security group by admin.
self.controller.delete(self.admin_req, '2')
# Ensure quota for security group in use is released.
self._assert_security_groups_in_use(context.project_id,
context.user_id, 1)
def test_delete_security_group_by_invalid_id(self):
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.delete,
self.req, 'invalid')
def test_delete_security_group_by_non_existing_id(self):
self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete,
self.req, self.fake_id)
def test_delete_security_group_in_use(self):
sg = security_group_template(id=1, rules=[])
def security_group_in_use(context, id):
return True
def return_security_group(context, group_id):
self.assertEqual(sg['id'], group_id)
return security_group_db(sg)
self.stub_out('cloud.db.security_group_in_use',
security_group_in_use)
self.stub_out('jacket.cloud.security_group_get',
return_security_group)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.delete,
self.req, '1')
def test_associate_by_non_existing_security_group_name(self):
self.stub_out('cloud.db.instance_get', return_server)
self.assertEqual(return_server(None, '1'),
jacket.db.compute.instance_get(None, '1'))
body = dict(addSecurityGroup=dict(name='non-existing'))
self.assertRaises(webob.exc.HTTPNotFound,
self.manager._addSecurityGroup, self.req, '1', body)
def test_associate_by_invalid_server_id(self):
body = dict(addSecurityGroup=dict(name='test'))
self.assertRaises(webob.exc.HTTPNotFound,
self.manager._addSecurityGroup, self.req,
'invalid', body)
def test_associate_without_body(self):
self.stub_out('jacket.cloud.instance_get', return_server)
body = dict(addSecurityGroup=None)
self.assertRaises(webob.exc.HTTPBadRequest,
self.manager._addSecurityGroup, self.req, '1', body)
def test_associate_no_security_group_name(self):
self.stub_out('cloud.db.instance_get', return_server)
body = dict(addSecurityGroup=dict())
self.assertRaises(webob.exc.HTTPBadRequest,
self.manager._addSecurityGroup, self.req, '1', body)
def test_associate_security_group_name_with_whitespaces(self):
self.stub_out('cloud.db.instance_get', return_server)
body = dict(addSecurityGroup=dict(name=" "))
self.assertRaises(webob.exc.HTTPBadRequest,
self.manager._addSecurityGroup, self.req, '1', body)
def test_associate_non_existing_instance(self):
self.stub_out('jacket.cloud.instance_get', return_server_nonexistent)
self.stub_out('jacket.cloud.instance_get_by_uuid',
return_server_nonexistent)
body = dict(addSecurityGroup=dict(name="test"))
self.assertRaises(webob.exc.HTTPNotFound,
self.manager._addSecurityGroup, self.req, '1', body)
def test_associate_non_running_instance(self):
self.stub_out('cloud.db.instance_get', return_non_running_server)
self.stub_out('jacket.cloud.instance_get_by_uuid',
return_non_running_server)
self.stub_out('jacket.cloud.security_group_get_by_name',
return_security_group_without_instances)
body = dict(addSecurityGroup=dict(name="test"))
self.manager._addSecurityGroup(self.req, UUID_SERVER, body)
def test_associate_already_associated_security_group_to_instance(self):
self.stub_out('cloud.db.instance_get', return_server)
self.stub_out('jacket.cloud.instance_get_by_uuid',
return_server_by_uuid)
self.stub_out('jacket.cloud.security_group_get_by_name',
return_security_group_by_name)
body = dict(addSecurityGroup=dict(name="test"))
self.assertRaises(webob.exc.HTTPBadRequest,
self.manager._addSecurityGroup, self.req,
UUID_SERVER, body)
def test_associate(self):
self.stub_out('jacket.cloud.instance_get', return_server)
self.stub_out('jacket.cloud.instance_get_by_uuid',
return_server_by_uuid)
self.mox.StubOutWithMock(jacket.db.compute, 'instance_add_security_group')
jacket.db.compute.instance_add_security_group(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg())
self.stub_out('cloud.db.security_group_get_by_name',
return_security_group_without_instances)
self.mox.ReplayAll()
body = dict(addSecurityGroup=dict(name="test"))
self.manager._addSecurityGroup(self.req, UUID_SERVER, body)
def test_disassociate_by_non_existing_security_group_name(self):
self.stub_out('cloud.db.instance_get', return_server)
self.assertEqual(return_server(None, '1'),
jacket.db.compute.instance_get(None, '1'))
body = dict(removeSecurityGroup=dict(name='non-existing'))
self.assertRaises(webob.exc.HTTPNotFound,
self.manager._removeSecurityGroup, self.req,
UUID_SERVER, body)
def test_disassociate_by_invalid_server_id(self):
self.stub_out('cloud.db.security_group_get_by_name',
return_security_group_by_name)
body = dict(removeSecurityGroup=dict(name='test'))
self.assertRaises(webob.exc.HTTPNotFound,
self.manager._removeSecurityGroup, self.req,
'invalid', body)
def test_disassociate_without_body(self):
self.stub_out('jacket.cloud.instance_get', return_server)
body = dict(removeSecurityGroup=None)
self.assertRaises(webob.exc.HTTPBadRequest,
self.manager._removeSecurityGroup, self.req,
'1', body)
def test_disassociate_no_security_group_name(self):
self.stub_out('jacket.cloud.instance_get', return_server)
body = dict(removeSecurityGroup=dict())
self.assertRaises(webob.exc.HTTPBadRequest,
self.manager._removeSecurityGroup, self.req,
'1', body)
def test_disassociate_security_group_name_with_whitespaces(self):
self.stub_out('cloud.db.instance_get', return_server)
body = dict(removeSecurityGroup=dict(name=" "))
self.assertRaises(webob.exc.HTTPBadRequest,
self.manager._removeSecurityGroup, self.req,
'1', body)
def test_disassociate_non_existing_instance(self):
self.stub_out('cloud.db.instance_get', return_server_nonexistent)
self.stub_out('cloud.db.security_group_get_by_name',
return_security_group_by_name)
body = dict(removeSecurityGroup=dict(name="test"))
self.assertRaises(webob.exc.HTTPNotFound,
self.manager._removeSecurityGroup,
self.req, '1', body)
def test_disassociate_non_running_instance(self):
self.stub_out('cloud.db.instance_get', return_non_running_server)
self.stub_out('cloud.db.instance_get_by_uuid',
return_non_running_server)
self.stub_out('cloud.db.security_group_get_by_name',
return_security_group_by_name)
body = dict(removeSecurityGroup=dict(name="test"))
self.manager._removeSecurityGroup(self.req, UUID_SERVER, body)
def test_disassociate_already_associated_security_group_to_instance(self):
self.stub_out('jacket.cloud.instance_get', return_server)
self.stub_out('cloud.db.instance_get_by_uuid',
return_server_by_uuid)
self.stub_out('cloud.db.security_group_get_by_name',
return_security_group_without_instances)
body = dict(removeSecurityGroup=dict(name="test"))
self.assertRaises(webob.exc.HTTPBadRequest,
self.manager._removeSecurityGroup, self.req,
UUID_SERVER, body)
def test_disassociate(self):
self.stub_out('cloud.db.instance_get', return_server)
self.stub_out('cloud.db.instance_get_by_uuid',
return_server_by_uuid)
self.mox.StubOutWithMock(jacket.db.compute, 'instance_remove_security_group')
jacket.db.compute.instance_remove_security_group(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg())
self.stub_out('jacket.cloud.security_group_get_by_name',
return_security_group_by_name)
self.mox.ReplayAll()
body = dict(removeSecurityGroup=dict(name="test"))
self.manager._removeSecurityGroup(self.req, UUID_SERVER, body)
class TestSecurityGroupsV2(TestSecurityGroupsV21):
secgrp_ctl_cls = secgroups_v2.SecurityGroupController
server_secgrp_ctl_cls = secgroups_v2.ServerSecurityGroupController
secgrp_act_ctl_cls = secgroups_v2.SecurityGroupActionController
class TestSecurityGroupRulesV21(test.TestCase):
secgrp_ctl_cls = secgroups_v21.SecurityGroupRulesController
def setUp(self):
super(TestSecurityGroupRulesV21, self).setUp()
self.controller = self.secgrp_ctl_cls()
if self.controller.security_group_api.id_is_uuid:
id1 = '11111111-1111-1111-1111-111111111111'
id2 = '22222222-2222-2222-2222-222222222222'
self.invalid_id = '33333333-3333-3333-3333-333333333333'
else:
id1 = 1
id2 = 2
self.invalid_id = '33333333'
self.sg1 = security_group_template(id=id1)
self.sg2 = security_group_template(
id=id2, name='authorize_revoke',
description='authorize-revoke testing')
db1 = security_group_db(self.sg1)
db2 = security_group_db(self.sg2)
def return_security_group(context, group_id, columns_to_join=None):
if group_id == db1['id']:
return db1
if group_id == db2['id']:
return db2
raise exception.SecurityGroupNotFound(security_group_id=group_id)
self.stub_out('cloud.db.security_group_get',
return_security_group)
self.parent_security_group = db2
self.req = fakes.HTTPRequest.blank('')
def test_create_by_cidr(self):
rule = security_group_rule_template(cidr='10.2.3.124/24',
parent_group_id=self.sg2['id'])
res_dict = self.controller.create(self.req,
{'security_group_rule': rule})
security_group_rule = res_dict['security_group_rule']
self.assertNotEqual(security_group_rule['id'], 0)
self.assertEqual(security_group_rule['parent_group_id'],
self.sg2['id'])
self.assertEqual(security_group_rule['ip_range']['cidr'],
"10.2.3.124/24")
def test_create_by_group_id(self):
rule = security_group_rule_template(group_id=self.sg1['id'],
parent_group_id=self.sg2['id'])
res_dict = self.controller.create(self.req,
{'security_group_rule': rule})
security_group_rule = res_dict['security_group_rule']
self.assertNotEqual(security_group_rule['id'], 0)
self.assertEqual(security_group_rule['parent_group_id'],
self.sg2['id'])
def test_create_by_same_group_id(self):
rule1 = security_group_rule_template(group_id=self.sg1['id'],
from_port=80, to_port=80,
parent_group_id=self.sg2['id'])
self.parent_security_group['rules'] = [security_group_rule_db(rule1)]
rule2 = security_group_rule_template(group_id=self.sg1['id'],
from_port=81, to_port=81,
parent_group_id=self.sg2['id'])
res_dict = self.controller.create(self.req,
{'security_group_rule': rule2})
security_group_rule = res_dict['security_group_rule']
self.assertNotEqual(security_group_rule['id'], 0)
self.assertEqual(security_group_rule['parent_group_id'],
self.sg2['id'])
self.assertEqual(security_group_rule['from_port'], 81)
self.assertEqual(security_group_rule['to_port'], 81)
def test_create_none_value_from_to_port(self):
rule = {'parent_group_id': self.sg1['id'],
'group_id': self.sg1['id']}
res_dict = self.controller.create(self.req,
{'security_group_rule': rule})
security_group_rule = res_dict['security_group_rule']
self.assertIsNone(security_group_rule['from_port'])
self.assertIsNone(security_group_rule['to_port'])
self.assertEqual(security_group_rule['group']['name'], 'test')
self.assertEqual(security_group_rule['parent_group_id'],
self.sg1['id'])
def test_create_none_value_from_to_port_icmp(self):
rule = {'parent_group_id': self.sg1['id'],
'group_id': self.sg1['id'],
'ip_protocol': 'ICMP'}
res_dict = self.controller.create(self.req,
{'security_group_rule': rule})
security_group_rule = res_dict['security_group_rule']
self.assertEqual(security_group_rule['ip_protocol'], 'ICMP')
self.assertEqual(security_group_rule['from_port'], -1)
self.assertEqual(security_group_rule['to_port'], -1)
self.assertEqual(security_group_rule['group']['name'], 'test')
self.assertEqual(security_group_rule['parent_group_id'],
self.sg1['id'])
def test_create_none_value_from_to_port_tcp(self):
rule = {'parent_group_id': self.sg1['id'],
'group_id': self.sg1['id'],
'ip_protocol': 'TCP'}
res_dict = self.controller.create(self.req,
{'security_group_rule': rule})
security_group_rule = res_dict['security_group_rule']
self.assertEqual(security_group_rule['ip_protocol'], 'TCP')
self.assertEqual(security_group_rule['from_port'], 1)
self.assertEqual(security_group_rule['to_port'], 65535)
self.assertEqual(security_group_rule['group']['name'], 'test')
self.assertEqual(security_group_rule['parent_group_id'],
self.sg1['id'])
def test_create_by_invalid_cidr_json(self):
rule = security_group_rule_template(
ip_protocol="tcp",
from_port=22,
to_port=22,
parent_group_id=self.sg2['id'],
cidr="10.2.3.124/2433")
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
self.req, {'security_group_rule': rule})
def test_create_by_invalid_tcp_port_json(self):
rule = security_group_rule_template(
ip_protocol="tcp",
from_port=75534,
to_port=22,
parent_group_id=self.sg2['id'],
cidr="10.2.3.124/24")
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
self.req, {'security_group_rule': rule})
def test_create_by_invalid_icmp_port_json(self):
rule = security_group_rule_template(
ip_protocol="icmp",
from_port=1,
to_port=256,
parent_group_id=self.sg2['id'],
cidr="10.2.3.124/24")
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
self.req, {'security_group_rule': rule})
def test_create_add_existing_rules_by_cidr(self):
rule = security_group_rule_template(cidr='10.0.0.0/24',
parent_group_id=self.sg2['id'])
self.parent_security_group['rules'] = [security_group_rule_db(rule)]
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
self.req, {'security_group_rule': rule})
def test_create_add_existing_rules_by_group_id(self):
rule = security_group_rule_template(group_id=1)
self.parent_security_group['rules'] = [security_group_rule_db(rule)]
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
self.req, {'security_group_rule': rule})
def test_create_with_no_body(self):
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, self.req, None)
def test_create_with_no_security_group_rule_in_body(self):
rules = {'test': 'test'}
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, self.req, rules)
def test_create_with_invalid_parent_group_id(self):
rule = security_group_rule_template(parent_group_id='invalid')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
self.req, {'security_group_rule': rule})
def test_create_with_non_existing_parent_group_id(self):
rule = security_group_rule_template(group_id=None,
parent_group_id=self.invalid_id)
self.assertRaises(webob.exc.HTTPNotFound, self.controller.create,
self.req, {'security_group_rule': rule})
def test_create_with_non_existing_group_id(self):
rule = security_group_rule_template(group_id='invalid',
parent_group_id=self.sg2['id'])
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
self.req, {'security_group_rule': rule})
def test_create_with_invalid_protocol(self):
rule = security_group_rule_template(ip_protocol='invalid-protocol',
cidr='10.2.2.0/24',
parent_group_id=self.sg2['id'])
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
self.req, {'security_group_rule': rule})
def test_create_with_no_protocol(self):
rule = security_group_rule_template(cidr='10.2.2.0/24',
parent_group_id=self.sg2['id'])
del rule['ip_protocol']
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
self.req, {'security_group_rule': rule})
def test_create_with_invalid_from_port(self):
rule = security_group_rule_template(from_port='666666',
cidr='10.2.2.0/24',
parent_group_id=self.sg2['id'])
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
self.req, {'security_group_rule': rule})
def test_create_with_invalid_to_port(self):
rule = security_group_rule_template(to_port='666666',
cidr='10.2.2.0/24',
parent_group_id=self.sg2['id'])
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
self.req, {'security_group_rule': rule})
def test_create_with_non_numerical_from_port(self):
rule = security_group_rule_template(from_port='invalid',
cidr='10.2.2.0/24',
parent_group_id=self.sg2['id'])
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
self.req, {'security_group_rule': rule})
def test_create_with_non_numerical_to_port(self):
rule = security_group_rule_template(to_port='invalid',
cidr='10.2.2.0/24',
parent_group_id=self.sg2['id'])
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
self.req, {'security_group_rule': rule})
def test_create_with_no_from_port(self):
rule = security_group_rule_template(cidr='10.2.2.0/24',
parent_group_id=self.sg2['id'])
del rule['from_port']
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
self.req, {'security_group_rule': rule})
def test_create_with_no_to_port(self):
rule = security_group_rule_template(cidr='10.2.2.0/24',
parent_group_id=self.sg2['id'])
del rule['to_port']
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
self.req, {'security_group_rule': rule})
def test_create_with_invalid_cidr(self):
rule = security_group_rule_template(cidr='10.2.2222.0/24',
parent_group_id=self.sg2['id'])
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
self.req, {'security_group_rule': rule})
def test_create_with_no_cidr_group(self):
rule = security_group_rule_template(parent_group_id=self.sg2['id'])
res_dict = self.controller.create(self.req,
{'security_group_rule': rule})
security_group_rule = res_dict['security_group_rule']
self.assertNotEqual(security_group_rule['id'], 0)
self.assertEqual(security_group_rule['parent_group_id'],
self.parent_security_group['id'])
self.assertEqual(security_group_rule['ip_range']['cidr'],
"0.0.0.0/0")
def test_create_with_invalid_group_id(self):
rule = security_group_rule_template(group_id='invalid',
parent_group_id=self.sg2['id'])
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
self.req, {'security_group_rule': rule})
def test_create_with_empty_group_id(self):
rule = security_group_rule_template(group_id='',
parent_group_id=self.sg2['id'])
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
self.req, {'security_group_rule': rule})
def test_create_with_nonexist_group_id(self):
rule = security_group_rule_template(group_id=self.invalid_id,
parent_group_id=self.sg2['id'])
self.assertRaises(webob.exc.HTTPNotFound, self.controller.create,
self.req, {'security_group_rule': rule})
def test_create_with_same_group_parent_id_and_group_id(self):
rule = security_group_rule_template(group_id=self.sg1['id'],
parent_group_id=self.sg1['id'])
res_dict = self.controller.create(self.req,
{'security_group_rule': rule})
security_group_rule = res_dict['security_group_rule']
self.assertNotEqual(security_group_rule['id'], 0)
self.assertEqual(security_group_rule['parent_group_id'],
self.sg1['id'])
self.assertEqual(security_group_rule['group']['name'],
self.sg1['name'])
def _test_create_with_no_ports_and_no_group(self, proto):
rule = {'ip_protocol': proto, 'parent_group_id': self.sg2['id']}
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
self.req, {'security_group_rule': rule})
def _test_create_with_no_ports(self, proto):
rule = {'ip_protocol': proto, 'parent_group_id': self.sg2['id'],
'group_id': self.sg1['id']}
res_dict = self.controller.create(self.req,
{'security_group_rule': rule})
security_group_rule = res_dict['security_group_rule']
expected_rule = {
'from_port': 1, 'group': {'tenant_id': '123', 'name': 'test'},
'ip_protocol': proto, 'to_port': 65535, 'parent_group_id':
self.sg2['id'], 'ip_range': {}, 'id': security_group_rule['id']
}
if proto == 'icmp':
expected_rule['to_port'] = -1
expected_rule['from_port'] = -1
self.assertEqual(expected_rule, security_group_rule)
def test_create_with_no_ports_icmp(self):
self._test_create_with_no_ports_and_no_group('icmp')
self._test_create_with_no_ports('icmp')
def test_create_with_no_ports_tcp(self):
self._test_create_with_no_ports_and_no_group('tcp')
self._test_create_with_no_ports('tcp')
def test_create_with_no_ports_udp(self):
self._test_create_with_no_ports_and_no_group('udp')
self._test_create_with_no_ports('udp')
def _test_create_with_ports(self, proto, from_port, to_port):
rule = {
'ip_protocol': proto, 'from_port': from_port, 'to_port': to_port,
'parent_group_id': self.sg2['id'], 'group_id': self.sg1['id']
}
res_dict = self.controller.create(self.req,
{'security_group_rule': rule})
security_group_rule = res_dict['security_group_rule']
expected_rule = {
'from_port': from_port,
'group': {'tenant_id': '123', 'name': 'test'},
'ip_protocol': proto, 'to_port': to_port, 'parent_group_id':
self.sg2['id'], 'ip_range': {}, 'id': security_group_rule['id']
}
self.assertEqual(proto, security_group_rule['ip_protocol'])
self.assertEqual(from_port, security_group_rule['from_port'])
self.assertEqual(to_port, security_group_rule['to_port'])
self.assertEqual(expected_rule, security_group_rule)
def test_create_with_ports_icmp(self):
self._test_create_with_ports('icmp', 0, 1)
self._test_create_with_ports('icmp', 0, 0)
self._test_create_with_ports('icmp', 1, 0)
def test_create_with_ports_tcp(self):
self._test_create_with_ports('tcp', 1, 1)
self._test_create_with_ports('tcp', 1, 65535)
self._test_create_with_ports('tcp', 65535, 65535)
def test_create_with_ports_udp(self):
self._test_create_with_ports('udp', 1, 1)
self._test_create_with_ports('udp', 1, 65535)
self._test_create_with_ports('udp', 65535, 65535)
def test_delete(self):
rule = security_group_rule_template(id=self.sg2['id'],
parent_group_id=self.sg2['id'])
def security_group_rule_get(context, id):
return security_group_rule_db(rule)
def security_group_rule_destroy(context, id):
pass
self.stub_out('cloud.db.security_group_rule_get',
security_group_rule_get)
self.stub_out('jacket.cloud.security_group_rule_destroy',
security_group_rule_destroy)
self.controller.delete(self.req, self.sg2['id'])
def test_delete_invalid_rule_id(self):
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.delete,
self.req, 'invalid')
def test_delete_non_existing_rule_id(self):
self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete,
self.req, self.invalid_id)
def test_create_rule_quota_limit(self):
for num in range(100, 100 + CONF.quota_security_group_rules):
rule = {
'ip_protocol': 'tcp', 'from_port': num,
'to_port': num, 'parent_group_id': self.sg2['id'],
'group_id': self.sg1['id']
}
self.controller.create(self.req, {'security_group_rule': rule})
rule = {
'ip_protocol': 'tcp', 'from_port': '121', 'to_port': '121',
'parent_group_id': self.sg2['id'], 'group_id': self.sg1['id']
}
self.assertRaises(webob.exc.HTTPForbidden, self.controller.create,
self.req, {'security_group_rule': rule})
def test_create_rule_cidr_allow_all(self):
rule = security_group_rule_template(cidr='0.0.0.0/0',
parent_group_id=self.sg2['id'])
res_dict = self.controller.create(self.req,
{'security_group_rule': rule})
security_group_rule = res_dict['security_group_rule']
self.assertNotEqual(security_group_rule['id'], 0)
self.assertEqual(security_group_rule['parent_group_id'],
self.parent_security_group['id'])
self.assertEqual(security_group_rule['ip_range']['cidr'],
"0.0.0.0/0")
def test_create_rule_cidr_ipv6_allow_all(self):
rule = security_group_rule_template(cidr='::/0',
parent_group_id=self.sg2['id'])
res_dict = self.controller.create(self.req,
{'security_group_rule': rule})
security_group_rule = res_dict['security_group_rule']
self.assertNotEqual(security_group_rule['id'], 0)
self.assertEqual(security_group_rule['parent_group_id'],
self.parent_security_group['id'])
self.assertEqual(security_group_rule['ip_range']['cidr'],
"::/0")
def test_create_rule_cidr_allow_some(self):
rule = security_group_rule_template(cidr='15.0.0.0/8',
parent_group_id=self.sg2['id'])
res_dict = self.controller.create(self.req,
{'security_group_rule': rule})
security_group_rule = res_dict['security_group_rule']
self.assertNotEqual(security_group_rule['id'], 0)
self.assertEqual(security_group_rule['parent_group_id'],
self.parent_security_group['id'])
self.assertEqual(security_group_rule['ip_range']['cidr'],
"15.0.0.0/8")
def test_create_rule_cidr_bad_netmask(self):
rule = security_group_rule_template(cidr='15.0.0.0/0')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
self.req, {'security_group_rule': rule})
class TestSecurityGroupRulesV2(TestSecurityGroupRulesV21):
secgrp_ctl_cls = secgroups_v2.SecurityGroupRulesController
UUID1 = '00000000-0000-0000-0000-000000000001'
UUID2 = '00000000-0000-0000-0000-000000000002'
UUID3 = '00000000-0000-0000-0000-000000000003'
def fake_compute_get_all(*args, **kwargs):
base = {'id': 1, 'description': 'foo', 'user_id': 'bar',
'project_id': 'baz', 'deleted': False, 'deleted_at': None,
'updated_at': None, 'created_at': None}
inst_list = [
fakes.stub_instance_obj(
None, 1, uuid=UUID1,
security_groups=[dict(base, **{'name': 'fake-0-0'}),
dict(base, **{'name': 'fake-0-1'})]),
fakes.stub_instance_obj(
None, 2, uuid=UUID2,
security_groups=[dict(base, **{'name': 'fake-1-0'}),
dict(base, **{'name': 'fake-1-1'})])
]
return cloud.InstanceList(cloud=inst_list)
def fake_compute_get(*args, **kwargs):
secgroups = cloud.SecurityGroupList()
secgroups.objects = [
cloud.SecurityGroup(name='fake-2-0'),
cloud.SecurityGroup(name='fake-2-1'),
]
inst = fakes.stub_instance_obj(None, 1, uuid=UUID3)
inst.security_groups = secgroups
return inst
def fake_compute_create(*args, **kwargs):
return ([fake_compute_get(*args, **kwargs)], '')
def fake_get_instances_security_groups_bindings(inst, context, servers):
groups = {UUID1: [{'name': 'fake-0-0'}, {'name': 'fake-0-1'}],
UUID2: [{'name': 'fake-1-0'}, {'name': 'fake-1-1'}],
UUID3: [{'name': 'fake-2-0'}, {'name': 'fake-2-1'}]}
result = {}
for server in servers:
result[server['id']] = groups.get(server['id'])
return result
class SecurityGroupsOutputTestV21(test.TestCase):
base_url = '/v2/fake/servers'
content_type = 'application/json'
def setUp(self):
super(SecurityGroupsOutputTestV21, self).setUp()
fakes.stub_out_nw_api(self)
self.stubs.Set(cloud.api.API, 'get', fake_compute_get)
self.stubs.Set(cloud.api.API, 'get_all', fake_compute_get_all)
self.stubs.Set(cloud.api.API, 'create', fake_compute_create)
self.flags(
osapi_compute_extension=[
'cloud.api.openstack.cloud.contrib.select_extensions'],
osapi_compute_ext_list=['Security_groups'])
self.app = self._setup_app()
def _setup_app(self):
return fakes.wsgi_app_v21(init_only=('os-security-groups', 'servers'))
def _make_request(self, url, body=None):
req = webob.Request.blank(url)
if body:
req.method = 'POST'
req.body = encodeutils.safe_encode(self._encode_body(body))
req.content_type = self.content_type
req.headers['Accept'] = self.content_type
res = req.get_response(self.app)
return res
def _encode_body(self, body):
return jsonutils.dumps(body)
def _get_server(self, body):
return jsonutils.loads(body).get('server')
def _get_servers(self, body):
return jsonutils.loads(body).get('servers')
def _get_groups(self, server):
return server.get('security_groups')
def test_create(self):
image_uuid = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
server = dict(name='server_test', imageRef=image_uuid, flavorRef=2)
res = self._make_request(self.base_url, {'server': server})
self.assertEqual(res.status_int, 202)
server = self._get_server(res.body)
for i, group in enumerate(self._get_groups(server)):
name = 'fake-2-%s' % i
self.assertEqual(group.get('name'), name)
def test_show(self):
url = self.base_url + '/' + UUID3
res = self._make_request(url)
self.assertEqual(res.status_int, 200)
server = self._get_server(res.body)
for i, group in enumerate(self._get_groups(server)):
name = 'fake-2-%s' % i
self.assertEqual(group.get('name'), name)
def test_detail(self):
url = self.base_url + '/detail'
res = self._make_request(url)
self.assertEqual(res.status_int, 200)
for i, server in enumerate(self._get_servers(res.body)):
for j, group in enumerate(self._get_groups(server)):
name = 'fake-%s-%s' % (i, j)
self.assertEqual(group.get('name'), name)
def test_no_instance_passthrough_404(self):
def fake_compute_get(*args, **kwargs):
raise exception.InstanceNotFound(instance_id='fake')
self.stubs.Set(cloud.api.API, 'get', fake_compute_get)
url = self.base_url + '/70f6db34-de8d-4fbd-aafb-4065bdfa6115'
res = self._make_request(url)
self.assertEqual(res.status_int, 404)
class SecurityGroupsOutputTestV2(SecurityGroupsOutputTestV21):
def _setup_app(self):
return fakes.wsgi_app(init_only=('servers',))
class SecurityGroupsOutputPolicyEnforcementV21(test.NoDBTestCase):
def setUp(self):
super(SecurityGroupsOutputPolicyEnforcementV21, self).setUp()
self.controller = secgroups_v21.SecurityGroupsOutputController()
self.req = fakes.HTTPRequest.blank('')
self.rule_name = "os_compute_api:os-security-groups"
self.rule = {self.rule_name: "project:non_fake"}
self.policy.set_rules(self.rule)
self.fake_res = wsgi.ResponseObject({
'server': {'id': '0'},
'servers': [{'id': '0'}, {'id': '2'}]})
@mock.patch.object(secgroups_v21, "softauth")
def test_show_policy_softauth_is_called(self, mock_softauth):
mock_softauth.return_value = False
self.controller.show(self.req, self.fake_res, FAKE_UUID1)
self.assertTrue(mock_softauth.called)
@mock.patch.object(jacket.compute.network.security_group.openstack_driver,
"is_neutron_security_groups")
def test_show_policy_failed(self, is_neutron_security_groups):
self.controller.show(self.req, self.fake_res, FAKE_UUID1)
self.assertFalse(is_neutron_security_groups.called)
@mock.patch.object(secgroups_v21, "softauth")
def test_create_policy_softauth_is_called(self, mock_softauth):
mock_softauth.return_value = False
self.controller.show(self.req, self.fake_res, {})
self.assertTrue(mock_softauth.called)
@mock.patch.object(jacket.compute.network.security_group.openstack_driver,
"is_neutron_security_groups")
def test_create_policy_failed(self, is_neutron_security_groups):
self.controller.create(self.req, self.fake_res, {})
self.assertFalse(is_neutron_security_groups.called)
@mock.patch.object(secgroups_v21, "softauth")
def test_detail_policy_softauth_is_called(self, mock_softauth):
mock_softauth.return_value = False
self.controller.detail(self.req, self.fake_res)
self.assertTrue(mock_softauth.called)
@mock.patch.object(jacket.compute.network.security_group.openstack_driver,
"is_neutron_security_groups")
def test_detail_policy_failed(self, is_neutron_security_groups):
self.controller.detail(self.req, self.fake_res)
self.assertFalse(is_neutron_security_groups.called)
class PolicyEnforcementV21(test.NoDBTestCase):
def setUp(self):
super(PolicyEnforcementV21, self).setUp()
self.req = fakes.HTTPRequest.blank('')
self.rule_name = "os_compute_api:os-security-groups"
self.rule = {self.rule_name: "project:non_fake"}
def _common_policy_check(self, func, *arg, **kwarg):
self.policy.set_rules(self.rule)
exc = self.assertRaises(
exception.PolicyNotAuthorized, func, *arg, **kwarg)
self.assertEqual(
"Policy doesn't allow %s to be performed." % self.rule_name,
exc.format_message())
class SecurityGroupPolicyEnforcementV21(PolicyEnforcementV21):
def setUp(self):
super(SecurityGroupPolicyEnforcementV21, self).setUp()
self.controller = secgroups_v21.SecurityGroupController()
def test_create_policy_failed(self):
self._common_policy_check(self.controller.create, self.req, {})
def test_show_policy_failed(self):
self._common_policy_check(self.controller.show, self.req, FAKE_UUID1)
def test_delete_policy_failed(self):
self._common_policy_check(self.controller.delete, self.req, FAKE_UUID1)
def test_index_policy_failed(self):
self._common_policy_check(self.controller.index, self.req)
def test_update_policy_failed(self):
self._common_policy_check(
self.controller.update, self.req, FAKE_UUID1, {})
class ServerSecurityGroupPolicyEnforcementV21(PolicyEnforcementV21):
def setUp(self):
super(ServerSecurityGroupPolicyEnforcementV21, self).setUp()
self.controller = secgroups_v21.ServerSecurityGroupController()
def test_index_policy_failed(self):
self._common_policy_check(self.controller.index, self.req, FAKE_UUID1)
class SecurityGroupRulesPolicyEnforcementV21(PolicyEnforcementV21):
def setUp(self):
super(SecurityGroupRulesPolicyEnforcementV21, self).setUp()
self.controller = secgroups_v21.SecurityGroupRulesController()
def test_create_policy_failed(self):
self._common_policy_check(self.controller.create, self.req, {})
def test_delete_policy_failed(self):
self._common_policy_check(self.controller.delete, self.req, FAKE_UUID1)
class SecurityGroupActionPolicyEnforcementV21(PolicyEnforcementV21):
def setUp(self):
super(SecurityGroupActionPolicyEnforcementV21, self).setUp()
self.controller = secgroups_v21.SecurityGroupActionController()
def test_add_security_group_policy_failed(self):
self._common_policy_check(
self.controller._addSecurityGroup, self.req, FAKE_UUID1, {})
def test_remove_security_group_policy_failed(self):
self._common_policy_check(
self.controller._removeSecurityGroup, self.req, FAKE_UUID1, {})
| |
import json
import os
import pickle
import re
import subprocess
import sys
import time
import traceback
from imp import load_source
from importlib import import_module
from pkgutil import find_loader
from os.path import basename, exists
from subprocess import check_output
import numpy as np
from seisflows.tools import msg
class Struct(dict):
def __init__(self, *args, **kwargs):
super(Struct, self).__init__(*args, **kwargs)
self.__dict__ = self
def call(*args, **kwargs):
if 'shell' not in kwargs:
kwargs['shell'] = True
subprocess.check_call(*args, **kwargs)
def diff(list1, list2):
""" Difference between two lists
"""
c = set(list1).union(set(list2))
d = set(list1).intersection(set(list2))
return list(c - d)
def divides(i, j):
"""True if j divides i"""
if j is 0:
return False
elif i % j:
return False
else:
return True
def exists(names):
"""Wrapper for os.path.exists"""
for name in iterable(names):
if not name:
return False
elif not isinstance(name, basestring):
raise TypeError
elif not os.path.exists(name):
return False
else:
return True
def findpath(name):
"""Resolves absolute path of module"""
path = import_module(name).__file__
# adjust file extension
path = re.sub('.pyc$', '.py', path)
# strip trailing "__init__.py"
path = re.sub('__init__.py$', '', path)
return path
def iterable(arg):
if not isinstance(arg, (list, tuple)):
return [arg]
else:
return arg
def module_exists(name):
return find_loader(name)
def package_exists(name):
return find_loader(name)
def pkgpath(name):
for path in import_module('seisflows').__path__:
if name+'/seisflows' in path:
return path
def timestamp():
return time.strftime('%H:%M:%S')
def loadobj(filename):
"""Load object using pickle"""
with open(filename, 'rb') as file:
return pickle.load(file)
def saveobj(filename, obj):
"""Save object using pickle"""
with open(filename, 'wb') as file:
pickle.dump(obj, file)
def loadjson(filename):
"""Load object using json"""
with open(filename, 'rb') as file:
return json.load(file)
def savejson(filename, obj):
"""Save object using json"""
with open(filename, 'wb') as file:
json.dump(obj, file, sort_keys=True, indent=4)
def loadpy(filename):
if not exists(filename):
print msg.FileError % filename
raise IOError
# load module
name = re.sub('.py$', '', basename(filename))
module = load_source(name, filename)
# strip private attributes
output = Struct()
for key, val in vars(module).items():
if key[0] != '_':
output[key] = val
return output
def loadnpy(filename):
"""Loads numpy binary file."""
return np.load(filename)
def savenpy(filename, v):
"""Saves numpy binary file."""
np.save(filename, v)
os.rename(filename + '.npy', filename)
def loadyaml(filename):
import yaml
with open(filename, 'rb') as file:
dict = yaml.load(file)
# replace None
if 'None' in dict.values():
for key,val in dict.items():
if val=='None': dict[key]=None
return dict
def getset(arg):
if not arg:
return set()
elif isinstance(arg, basestring):
return set([arg])
else:
return set(arg)
def loadtxt(filename):
"""Load scalar from text file"""
return float(np.loadtxt(filename))
def savetxt(filename, v):
"""Save scalar to text file"""
np.savetxt(filename, [v], '%11.6e')
def nproc():
try:
return _nproc1()
except:
return _nproc2()
def _nproc1():
# get number of processors using nproc
if not which('nproc'):
raise EnvironmentError
stdout = check_output('nproc --all', shell=True)
nproc = int(stdout.strip())
return nproc
def _nproc2():
# get number of processors using /proc/cpuinfo
if not exists('/proc/cpuinfo'):
raise EnvironmentError
stdout = check_output("cat /proc/cpuinfo | awk '/^processor/{print $3}'",
shell=True)
nproc = len(stdout.split('\n'))
return nproc
| |
"""
Named ellipsoid classes that can be created or parsed.
"""
from . import parameters
def find(ellipsname, crstype, strict=False):
"""
Search for a ellipsoid name located in this module.
Arguments:
- **ellipsname**: The ellipsoid name to search for.
- **crstype**: Which CRS naming convention to search (different
CRS formats have different names for the same ellipsoid).
- **strict** (optional): If False, ignores minor name mismatches
such as underscore or character casing, otherwise must be exact
match (defaults to False).
"""
if not strict:
ellipsname = ellipsname.lower().replace(" ","_")
for itemname,item in globals().items():
if itemname.startswith("_") or itemname == 'Ellipsoid':
continue
try:
if hasattr(item.name, crstype):
itemname = getattr(item.name, crstype)
if not strict:
itemname = itemname.lower().replace(" ","_")
if ellipsname == itemname:
return item
except:
pass
else:
return None
##+ellps Ellipsoid name (see `proj -le`)
class Ellipsoid:
proj4 = "+ellps"
ogc_wkt = "SPHEROID"
esri_wkt = "SPHEROID"
name = None
semimaj_ax = None
semimin_ax = None
flat = None
inv_flat = None
def __init__(self, **kwargs):
"""
The ellipsoid that defines the shape of the earth.
To sufficiently define an ellipsoid, either set semimaj_ax + semimin_ax, semimaj_ax + flat,
or semimaj_ax + inv_flat.
Arguments:
- **name**: A pycrs.ellipsoids.EllipsoidName instance with the name given by each supported format.
- **semimaj_ax**: A pycrs.parameters.SemiMajorRadius representing the radius of the semimajor axis.
- **semimin_ax**: A pycrs.parameters.SemiMinorRadius representing the radius of the semiminor axis.
- **flat**: A pycrs.parameters.Flattening representing the flattening factor.
- **inv_flat**: A pycrs.parameters.InverseFlattening representing the inverse flattening factor.
"""
self.name = kwargs.get('name', self.name)
self.semimaj_ax = kwargs.get('semimaj_ax', self.semimaj_ax)
self.semimin_ax = kwargs.get('semimin_ax', self.semimin_ax)
self.flat = kwargs.get('flat', self.flat)
self.inv_flat = kwargs.get('inv_flat', self.inv_flat)
def _get_flat(self):
if self.flat:
# flattening given directly
flat = self.flat.value
elif self.semimaj_ax and self.semimin_ax:
# calculate flattening from semimajor and minor radius
a = float(self.semimaj_ax.value)
b = float(self.semimin_ax.value)
flat = (a - b) / float(a)
elif self.inv_flat:
# calculate flattening from the inverse flattening
flat = 1 / float(self.inv_flat.value)
else:
raise Exception("Cannot get ellipsoid flattening, needs either semimaj_ax + semimin_ax, semimaj_ax + flat, or semimaj_ax + inv_flat")
return flat
def _get_wkt_invflat(self):
# WKT is special in that it falsely sets the inverse flattening to 0 for perfect spheres
# mathematically, when flattening is 0, then the inverse undefined
if self.inv_flat:
inv_flat = self.inv_flat.value
else:
flat = self._get_flat()
if flat == 0:
inv_flat = 0 # special WKT handling
else:
inv_flat = 1 / float(flat)
return inv_flat
def to_proj4(self):
proj4 = []
if self.name.proj4:
# ellipsoid name
proj4.append("+ellps=%s" % self.name.proj4)
if self.semimaj_ax:
proj4.append(self.semimaj_ax.to_proj4())
# include just one of semiminor, flattening, or inverse flattening (all aspects of the same)
# TODO: If has name matching a predefined ellipsoid, maybe consider comparing and only reporting
# those values that differ.
if self.semimin_ax:
proj4.append(self.semimin_ax.to_proj4())
elif self.inv_flat:
proj4.append(self.inv_flat.to_proj4())
elif self.flat:
proj4.append(self.flat.to_proj4())
if not proj4:
raise Exception("Not enough information to export the ellipsoid to proj4")
return " ".join(proj4)
def to_ogc_wkt(self):
inv_flat = self._get_wkt_invflat()
return 'SPHEROID["%s", %s, %s]' % (self.name.ogc_wkt, self.semimaj_ax.value, inv_flat)
def to_esri_wkt(self):
inv_flat = self._get_wkt_invflat()
return 'SPHEROID["%s", %s, %s]' % (self.name.esri_wkt, self.semimaj_ax.value, inv_flat)
def to_geotiff(self):
pass
#return "GeogEllipsoid"
class EllipsoidName:
def __init__(self, proj4="", ogc_wkt="", esri_wkt=""):
self.proj4 = proj4
self.ogc_wkt = ogc_wkt
self.esri_wkt = esri_wkt
# Specific predefined ellipsoid classes
class WGS84(Ellipsoid):
name = EllipsoidName(
proj4 = "WGS84",
ogc_wkt = "WGS_1984",
esri_wkt = "WGS_1984",
)
semimaj_ax = parameters.SemiMajorRadius(6378137.0)
inv_flat = parameters.InverseFlattening(298.257223563)
class WGS72(Ellipsoid):
name = EllipsoidName(
proj4 = "WGS72",
ogc_wkt = "WGS 72",
esri_wkt = "WGS_1972",
)
semimaj_ax = parameters.SemiMajorRadius(6378135.0)
inv_flat = parameters.InverseFlattening(298.26)
class International(Ellipsoid):
name = EllipsoidName(
proj4 = "intl",
ogc_wkt = "International_1924",
esri_wkt = "International_1924",
)
semimaj_ax = parameters.SemiMajorRadius(6378388.0)
inv_flat = parameters.InverseFlattening(297.0)
class GRS80(Ellipsoid):
name = EllipsoidName(
proj4 = "GRS80",
ogc_wkt = "GRS_1980",
esri_wkt = "GRS_1980",
)
semimaj_ax = parameters.SemiMajorRadius(6378137.0)
inv_flat = parameters.InverseFlattening(298.257222101)
class Clarke1866(Ellipsoid):
name = EllipsoidName(
proj4 = "clrk66",
ogc_wkt = "Clarke_1866",
esri_wkt = "Clarke_1866",
)
semimaj_ax = parameters.SemiMajorRadius(6378206.4)
inv_flat = parameters.InverseFlattening(294.9786982)
class Clarke1880(Ellipsoid):
name = EllipsoidName(
proj4 = "clrk80",
ogc_wkt = "Clarke 1880 (RGS)",
esri_wkt = "Clarke_1880_RGS",
)
semimaj_ax = parameters.SemiMajorRadius(6378249.145)
inv_flat = parameters.InverseFlattening(293.465)
class Airy1830(Ellipsoid):
name = EllipsoidName(
proj4 = "airy",
ogc_wkt = "Airy 1830",
esri_wkt = "Airy_1830",
)
semimaj_ax = parameters.SemiMajorRadius(6377563.396)
inv_flat = parameters.InverseFlattening(299.3249646)
class SphereArcInfo(Ellipsoid):
name = EllipsoidName(
proj4 = "", # no name
ogc_wkt = "Sphere_ARC_INFO",
esri_wkt = "Sphere_ARC_INFO",
)
semimaj_ax = parameters.SemiMajorRadius(6370997.0)
flat = parameters.Flattening(0.0)
class Krassowsky1940(Ellipsoid):
name = EllipsoidName(
proj4 = "krass",
ogc_wkt = "Krassowsky 1940",
esri_wkt = "Krassowsky_1940",
)
semimaj_ax = parameters.SemiMajorRadius(6378245.0)
inv_flat = parameters.InverseFlattening(298.3)
class Bessel1841(Ellipsoid):
name = EllipsoidName(
proj4 = "bessel",
ogc_wkt = "Bessel 1841",
esri_wkt = "Bessel_1841",
)
semimaj_ax = parameters.SemiMajorRadius(6377397.155)
inv_flat = parameters.InverseFlattening(299.1528128)
class Unknown(Ellipsoid):
name = EllipsoidName(
proj4 = "",
ogc_wkt = "Unknown",
esri_wkt = "Unknown",
)
# values have to be set manually in Ellipsoid class
semimaj_ax = None
inv_flat = None
| |
import sys, os
import scipy
from pylab import *
from matplotlib import *
from scipy.stats import *
from numpy import *
from scipy import *
import kepfit
import kepmsg
"""
This code is based on the PyKE routine kepsff
found at keplerscience.arc.nasa.gov
The kepsff code is based on Vanderberg and Johnson 2014.
If you use this you must cite V&J 2014.
"""
def martinsff(intime,indata,centr1,centr2,
npoly_cxcy,sigma_cxcy,npoly_ardx,
npoly_dsdt,sigma_dsdt,npoly_arfl,sigma_arfl,verbose,logfile,
status):
# startup parameters
status = 0
labelsize = 16
ticksize = 14
xsize = 20
ysize = 8
lcolor = '#0000ff'
lwidth = 1.0
fcolor = '#ffff00'
falpha = 0.2
seterr(all="ignore")
# fit centroid data with low-order polynomial
cfit = zeros((len(centr2)))
csig = zeros((len(centr2)))
functype = 'poly' + str(npoly_cxcy)
pinit = array([nanmean(centr2)])
if npoly_cxcy > 0:
for j in range(npoly_cxcy):
pinit = append(pinit,0.0)
try:
coeffs, errors, covar, iiter, sigma, chi2, dof, fit, plotx, ploty, status = \
kepfit.lsqclip(functype,pinit,centr1,centr2,None,sigma_cxcy,sigma_cxcy,10,logfile,verbose)
for j in range(len(coeffs)):
cfit += coeffs[j] * numpy.power(centr1,j)
csig[:] = sigma
except:
message = 'ERROR -- KEPSFF: could not fit centroid data with polynomial. There are no data points within the range of input rows %d - %d. Either increase the stepsize (with an appreciation of the effects on light curve quality this will have!), or better yet - cut the timeseries up to remove large gaps in the input light curve using kepclip.' % (t1,t2)
status = kepmsg.err(logfile,message,verbose)
# sys.exit('')
os._exit(1)
# reject outliers
time_good = array([],'float64')
centr1_good = array([],'float32')
centr2_good = array([],'float32')
flux_good = array([],'float32')
cad_good = array([],dtype=bool)
for i in range(len(cfit)):
if abs(centr2[i] - cfit[i]) < sigma_cxcy * csig[i]:
cad_good = append(cad_good, True)
time_good = append(time_good,intime[i])
centr1_good = append(centr1_good,centr1[i])
centr2_good = append(centr2_good,centr2[i])
flux_good = append(flux_good,indata[i])
else:
#import ipdb
#ipdb.set_trace()
cad_good = append(cad_good, False)
print(intime[i])
# covariance matrix for centroid time series
centr = concatenate([[centr1_good] - mean(centr1_good), [centr2_good] - mean(centr2_good)])
covar = cov(centr)
# eigenvector eigenvalues of covariance matrix
[eval, evec] = numpy.linalg.eigh(covar)
ex = arange(-10.0,10.0,0.1)
epar = evec[1,1] / evec[0,1] * ex
enor = evec[1,0] / evec[0,0] * ex
ex = ex + mean(centr1)
epar = epar + mean(centr2_good)
enor = enor + mean(centr2_good)
# rotate centroid data
centr_rot = dot(evec.T,centr)
# fit polynomial to rotated centroids
rfit = zeros((len(centr2)))
rsig = zeros((len(centr2)))
functype = 'poly' + str(npoly_ardx)
pinit = array([nanmean(centr_rot[0,:])])
pinit = array([1.0])
if npoly_ardx > 0:
for j in range(npoly_ardx):
pinit = append(pinit,0.0)
try:
coeffs, errors, covar, iiter, sigma, chi2, dof, fit, plotx, ploty, status = \
kepfit.lsqclip(functype,pinit,centr_rot[1,:],centr_rot[0,:],None,100.0,100.0,1,
logfile,verbose)
except:
message = 'ERROR -- KEPSFF: could not fit rotated centroid data with polynomial'
status = kepmsg.err(logfile,message,verbose)
rx = linspace(nanmin(centr_rot[1,:]),nanmax(centr_rot[1,:]),100)
ry = zeros((len(rx)))
for i in range(len(coeffs)):
ry = ry + coeffs[i] * numpy.power(rx,i)
# calculate arclength of centroids
s = zeros((len(rx)))
for i in range(1,len(s)):
work3 = ((ry[i] - ry[i-1]) / (rx[i] - rx[i-1]))**2
s[i] = s[i-1] + math.sqrt(1.0 + work3) * (rx[i] - rx[i-1])
# fit arclength as a function of strongest eigenvector
sfit = zeros((len(centr2)))
ssig = zeros((len(centr2)))
functype = 'poly' + str(npoly_ardx)
pinit = array([nanmean(s)])
if npoly_ardx > 0:
for j in range(npoly_ardx):
pinit = append(pinit,0.0)
try:
acoeffs, errors, covar, iiter, sigma, chi2, dof, fit, plotx, ploty, status = \
kepfit.lsqclip(functype,pinit,rx,s,None,100.0,100.0,100,logfile,verbose)
except:
message = 'ERROR -- KEPSFF: could not fit rotated centroid data with polynomial'
status = kepmsg.err(logfile,message,verbose)
# correlate arclength with detrended flux
t = copy(time_good)
y = copy(flux_good)
z = centr_rot[1,:]
x = zeros((len(z)))
for i in range(len(acoeffs)):
x = x + acoeffs[i] * numpy.power(z,i)
# calculate time derivative of arclength s
dx = zeros((len(x)))
for i in range(1,len(x)):
dx[i] = (x[i] - x[i-1]) / (t[i] - t[i-1])
dx[0] = dx[1]
# fit polynomial to derivative and flag outliers (thruster firings)
dfit = zeros((len(dx)))
dsig = zeros((len(dx)))
functype = 'poly' + str(npoly_dsdt)
pinit = array([nanmean(dx)])
if npoly_dsdt > 0:
for j in range(npoly_dsdt):
pinit = append(pinit,0.0)
try:
dcoeffs, errors, covar, iiter, dsigma, chi2, dof, fit, dumx, dumy, status = \
kepfit.lsqclip(functype,pinit,t,dx,None,3.0,3.0,10,logfile,verbose)
except:
message = 'ERROR -- KEPSFF: could not fit rotated centroid data with polynomial'
status = kepmsg.err(logfile,message,verbose)
for i in range(len(dcoeffs)):
dfit = dfit + dcoeffs[i] * numpy.power(t,i)
centr1_pnt = array([],'float32')
centr2_pnt = array([],'float32')
time_pnt = array([],'float64')
flux_pnt = array([],'float32')
dx_pnt = array([],'float32')
s_pnt = array([],'float32')
time_thr = array([],'float64')
flux_thr = array([],'float32')
dx_thr = array([],'float32')
thr_cadence = zeros(len(t),dtype=bool)
for i in range(len(t)):
if dx[i] < dfit[i] + sigma_dsdt * dsigma and dx[i] > dfit[i] - sigma_dsdt * dsigma:
time_pnt = append(time_pnt,time_good[i])
flux_pnt = append(flux_pnt,flux_good[i])
dx_pnt = append(dx_pnt,dx[i])
s_pnt = append(s_pnt,x[i])
centr1_pnt = append(centr1_pnt,centr1_good[i])
centr2_pnt = append(centr2_pnt,centr2_good[i])
else:
time_thr = append(time_thr,time_good[i])
flux_thr = append(flux_thr,flux_good[i])
dx_thr = append(dx_thr,dx[i])
thr_cadence[i] = True
# fit arclength-flux correlation
cfit = zeros((len(time_pnt)))
csig = zeros((len(time_pnt)))
functype = 'poly' + str(npoly_arfl)
pinit = array([nanmean(flux_pnt)])
if npoly_arfl > 0:
for j in range(npoly_arfl):
pinit = append(pinit,0.0)
try:
ccoeffs, errors, covar, iiter, sigma, chi2, dof, fit, plx, ply, status = \
kepfit.lsqclip(functype,pinit,s_pnt,flux_pnt,None,sigma_arfl,sigma_arfl,100,logfile,verbose)
except:
message = 'ERROR -- KEPSFF: could not fit rotated centroid data with polynomial'
status = kepmsg.err(logfile,message,verbose)
# correction factors for unfiltered data
centr = concatenate([[centr1] - mean(centr1_good), [centr2] - mean(centr2_good)])
centr_rot = dot(evec.T,centr)
yy = copy(indata)
zz = centr_rot[1,:]
xx = zeros((len(zz)))
cfac = zeros((len(zz)))
for i in range(len(acoeffs)):
xx = xx + acoeffs[i] * numpy.power(zz,i)
for i in range(len(ccoeffs)):
cfac = cfac + ccoeffs[i] * numpy.power(xx,i)
# apply correction to flux time-series
out_detsap = indata / cfac
#add back in the missing thr_cadence data
new_thr = np.zeros_like(cad_good)
j = 0
if np.all(cad_good == True):
pass
else:
for i,c in enumerate(cad_good):
if c == False:
j+=1
else:
new_thr[i] = thr_cadence[i-j]
return out_detsap, cfac, new_thr
| |
import csv
import random
import logging
from io import BytesIO
from datetime import datetime
from celery import task
from time import strptime
from django.conf import settings
from django.core.mail import EmailMessage
from django.template.loader import render_to_string
from django.core import management
from django.contrib.auth.models import User
from django.db.models import Q
from django.shortcuts import get_object_or_404
from molo.core.utils import (
create_new_article_relations, copy_translation_pages)
from molo.core.models import (
Site,
ArticlePage,
Main,
SectionIndexPage,
SectionPage,
Languages,
SiteSettings,
BannerIndexPage,
FooterIndexPage,
TagIndexPage,
)
from molo.core.api.importers import (
RecordKeeper,
LanguageImporter,
ImageImporter,
ContentImporter,
Logger,
)
from molo.core.api.constants import ACTION
from django.utils import timezone
from six import iteritems
from wagtail.core.models import Page
IMPORT_EMAIL_TEMPLATE = "content_import/import_email.html"
VALIDATE_EMAIL_TEMPLATE = "content_import/validate_email.html"
COPY_EMAIL_TEMPLATE = "core/copy_email.html"
COPY_FAILED_EMAIL_TEMPLATE = "core/copy_failed_email.html"
@task(ignore_result=True)
def rotate_content(day=None):
""" this method gets the parameters that are needed for rotate_latest
and rotate_featured_in_homepage methods, and calls them both"""
# getting the content rotation settings from site settings
for main in Main.objects.all():
site = main.sites_rooted_here.all().first()
main_lang = Languages.for_site(site).languages.filter(
is_main_language=True).first()
index = SectionIndexPage.objects.live().child_of(main).first()
site_settings = SiteSettings.for_site(site)
if day is None:
day = datetime.today().weekday()
# calls the two rotate methods with the necessary params
if main and index:
rotate_latest(main_lang, index, main, site_settings, day)
rotate_featured_in_homepage(main_lang, day, main)
@task(ignore_result=True)
def publish_scheduled_pages():
management.call_command(
'publish_scheduled_pages', verbosity=0, interactive=False)
@task(ignore_result=True)
def clearsessions():
# Expired sessions will only be cleared roughly once an hour - randomly
if random.randint(0, 59) == 0:
management.call_command(
'clearsessions', verbosity=0, interactive=False)
@task(ignore_result=True)
def demote_articles():
ArticlePage.objects.live().filter(
featured_in_latest_end_date__lte=datetime.now()).update(
featured_in_latest=False,
featured_in_latest_start_date=None,
featured_in_latest_end_date=None)
ArticlePage.objects.live().filter(
featured_in_section_end_date__lte=datetime.now()).update(
featured_in_section=False,
featured_in_section_start_date=None,
featured_in_section_end_date=None)
ArticlePage.objects.live().filter(
featured_in_homepage_end_date__lte=datetime.now()).update(
featured_in_homepage=False,
featured_in_homepage_start_date=None,
featured_in_homepage_end_date=None)
@task(ignore_result=True)
def promote_articles():
ArticlePage.objects.live().filter(
featured_in_latest_start_date__lte=datetime.now()).update(
featured_in_latest=True)
ArticlePage.objects.live().filter(
featured_in_section_start_date__lte=datetime.now()).update(
featured_in_section=True)
ArticlePage.objects.live().filter(
featured_in_homepage_start_date__lte=datetime.now()).update(
featured_in_homepage=True)
def get_days_section(section=None):
return [
section.monday_rotation, section.tuesday_rotation,
section.wednesday_rotation, section.thursday_rotation,
section.friday_rotation, section.saturday_rotation,
section.sunday_rotation]
def get_days_site_settings(site_settings):
return [
site_settings.monday_rotation, site_settings.tuesday_rotation,
site_settings.wednesday_rotation, site_settings.thursday_rotation,
site_settings.friday_rotation, site_settings.saturday_rotation,
site_settings.sunday_rotation]
def rotate_latest(main_lang, index, main, site_settings, day):
"""This rotates all the articles that have been marked as
featured_in_latest. It checks whether current date falls within the set
date range for content rotation. It then checks whether the current weekday
is set to rotate, and then rotates an articles for each hour the admin has
set."""
def demote_last_featured_article():
# set the last featured_in_latest article to false
if main.latest_articles().live().count() >= 2:
article = main.latest_articles().live().last()
article.featured_in_latest = False
article.featured_in_latest_start_date = None
article.featured_in_latest_end_date = None
article.save_revision().publish()
days = get_days_site_settings(site_settings)
# checks if the current date is within the content rotation range
if site_settings.content_rotation_start_date and \
site_settings.content_rotation_end_date:
if site_settings.content_rotation_start_date < timezone.now() \
< site_settings.content_rotation_end_date:
# checks if the current weekday is set to rotate
if days[day]:
for time in site_settings.time:
time = strptime(str(time), '%H:%M:%S')
if time.tm_hour == datetime.now().hour:
# get a random article
random_article = ArticlePage.objects.live().filter(
featured_in_latest=False,
language__id=main_lang.id
).descendant_of(index).order_by('?').exact_type(
ArticlePage).first()
# set random article to feature in latest
if random_article:
random_article.featured_in_latest_start_date = \
datetime.now()
random_article.save_revision().publish()
promote_articles()
demote_last_featured_article()
def rotate_featured_in_homepage(main_lang, day, main):
def demote_last_featured_article_in_homepage(section):
articles = ArticlePage.objects.live().filter(
featured_in_homepage=True,
language__id=main_lang.id
).descendant_of(section).order_by(
'-featured_in_homepage_start_date')
if articles.count() >= 2:
article = articles.last()
article.featured_in_homepage = False
article.featured_in_homepage_start_date = None
article.featured_in_homepage_end_date = None
article.save_revision().publish()
for section in SectionPage.objects.descendant_of(main):
days = get_days_section(section)
# checks if current date is within the rotation date range
if section.content_rotation_start_date and \
section.content_rotation_end_date:
if section.content_rotation_start_date < timezone.now() \
< section.content_rotation_end_date:
# checks if the current weekday is set to rotate
if days[day]:
for time in section.time:
time = strptime(str(time), '%H:%M:%S')
if time.tm_hour == datetime.now().hour:
random_article = ArticlePage.objects.live().filter(
featured_in_homepage=False,
language__id=main_lang.id
).descendant_of(section).order_by('?').exact_type(
ArticlePage).first()
# promotes an article and bumps last one off list
if random_article:
random_article. \
featured_in_homepage_start_date = \
datetime.now()
random_article.save_revision().publish()
promote_articles()
demote_last_featured_article_in_homepage(
section)
def send_import_email(to_email, context):
from_email = settings.FROM_EMAIL
subject = settings.CONTENT_IMPORT_SUBJECT
body = render_to_string(IMPORT_EMAIL_TEMPLATE, context)
email_message = EmailMessage(subject, body, from_email, [to_email])
email_message.send()
def send_validate_email(to_email, context):
from_email = settings.FROM_EMAIL
subject = settings.CONTENT_IMPORT_SUBJECT
body = render_to_string(VALIDATE_EMAIL_TEMPLATE, context)
email_message = EmailMessage(subject, body, from_email, [to_email])
email_message.send()
def send_copy_email(to_email, context, csv=None):
from_email = settings.FROM_EMAIL
subject = settings.CONTENT_COPY_SUBJECT \
if hasattr(settings, 'CONTENT_COPY_SUBJECT') else 'Molo Content Copy'
body = render_to_string(COPY_EMAIL_TEMPLATE, context)
email_message = EmailMessage(subject, body, from_email, [to_email])
if csv:
email_message.attach('file.csv', csv.getvalue(), 'text/csv')
email_message.send()
def send_copy_failed_email(to_email, context):
from_email = settings.FROM_EMAIL
subject = settings.CONTENT_COPY_FAILED_SUBJECT \
if hasattr(settings, 'CONTENT_COPY_FAILED_SUBJECT') \
else 'Molo Content Copy Failed'
body = render_to_string(COPY_FAILED_EMAIL_TEMPLATE, context)
email_message = EmailMessage(subject, body, from_email, [to_email])
email_message.send()
@task(ignore_result=True)
def molo_consolidated_minute_task():
"""
Purpose: To reduce the number of db connections, we run all
tasks in 1 call
"""
demote_articles()
promote_articles()
publish_scheduled_pages()
clearsessions()
@task(ignore_result=True)
def copy_to_all_task(page_id, user_id, site_pk):
# getting data needed
user = User.objects.get(pk=user_id)
site = Site.objects.get(pk=site_pk)
page = get_object_or_404(Page, id=page_id).specific
parent = page.get_parent()
excluded_main = Main.objects.ancestor_of(page).first()
errors = []
# loop through all the mains except for the main the page exists in
for main in Main.objects.all().exclude(pk=excluded_main.pk):
new_page = None
# search for the parent page in the destination site
destination_parent = Page.objects.descendant_of(main).filter(
Q(slug=parent.slug) | Q(title=parent.title))
if destination_parent.exists():
destination_parent = destination_parent.first()
# if it exists, check to make sure the page doesn't already exist
destination_page = Page.objects.descendant_of(
destination_parent).filter(
Q(slug=page.slug) | Q(title=page.title))
if not destination_page.exists():
new_page = page.copy(
recursive='true',
to=destination_parent,
update_attrs={
'title': page.title,
'slug': page.slug,
},
keep_live='true',
user=user,
)
copy_translation_pages(page, new_page)
create_new_article_relations(page, new_page)
revision = new_page.save_revision()
# If the original page is scheduled
if not page.live and not page.expired and \
page.approved_schedule:
# If the new page is in draft
if not new_page.live and not new_page.expired and \
not new_page.approved_schedule:
if new_page.go_live_at is not None:
revision.publish()
else:
errors.append(str(
page.title + ' already exists in ' + main.title))
else:
errors.append(str(
parent.title + ' does not exist in ' + main.title))
send_copy_email(
user.email,
{
'name': ((user.get_full_name() or user.username)
if user else None),
'source': site,
'logs': errors
},)
@task(ignore_result=True)
def copy_sections_index(
section_pk, user_pk, to_pk, copy_revisions, recursive, keep_live):
section_index = SectionIndexPage.objects.get(pk=section_pk)
user = User.objects.get(pk=user_pk) if user_pk else None
to = Page.objects.get(pk=to_pk).specific
try:
section_index.copy(
user=user,
to=to,
copy_revisions=copy_revisions,
recursive=recursive,
keep_live=keep_live,
via_celery=True)
old_main = section_index.get_site().root_page
new_main = to.get_site().root_page
create_new_article_relations(old_main, new_main)
send_copy_email(user.email, {
'name': (user.get_full_name() or user.username) if user else None,
'source': section_index.get_parent().title,
'to': to.title
})
except Exception as e:
logging.error(e, exc_info=True)
send_copy_failed_email(user.email, {
'name': (user.get_full_name() or user.username) if user else None,
'source': section_index.get_parent().title,
'to': to.title
})
@task(ignore_result=True)
def import_site(root_url, site_pk, user_pk):
user = User.objects.get(pk=user_pk) if user_pk else None
record_keeper = RecordKeeper()
logger = Logger()
site = Site.objects.get(pk=site_pk)
language_importer = LanguageImporter(
site.pk, root_url,
record_keeper=record_keeper,
logger=logger)
image_importer = ImageImporter(
site.pk, root_url,
record_keeper=record_keeper,
logger=logger)
content_importer = ContentImporter(
site.pk, root_url,
record_keeper=record_keeper,
logger=logger)
try:
# get languages
language_importer.copy_site_languages()
image_importer.import_images()
# copy_content SectionIndexPage
section_index_page = SectionIndexPage.objects.descendant_of(
site.root_page).first()
foreign_section_index_page_id = content_importer.get_foreign_page_id_from_type( # noqa
"core.SectionIndexPage")
content_importer.copy_children(
foreign_id=foreign_section_index_page_id,
existing_node=section_index_page)
# copy_content Banner Pages
banner_index_page = BannerIndexPage.objects.descendant_of(
site.root_page).first()
foreign_banner_index_page_id = content_importer.get_foreign_page_id_from_type( # noqa
"core.BannerIndexPage")
content_importer.copy_children(
foreign_id=foreign_banner_index_page_id,
existing_node=banner_index_page)
# copy_content Footer Pages
footer_index_page = FooterIndexPage.objects.descendant_of(
site.root_page).first()
foreign_footer_index_page_id = content_importer.get_foreign_page_id_from_type( # noqa
"core.FooterIndexPage")
content_importer.copy_children(
foreign_id=foreign_footer_index_page_id,
existing_node=footer_index_page)
# copy_content TagIndexPage
tag_index_page = TagIndexPage.objects.descendant_of(
site.root_page).first()
foreign_tag_index_page_id = content_importer.get_foreign_page_id_from_type( # noqa
"core.TagIndexPage")
content_importer.copy_children(
foreign_id=foreign_tag_index_page_id,
existing_node=tag_index_page)
logger.log(ACTION, "Creating Recommended Articles")
content_importer.create_recommended_articles()
logger.log(ACTION, "Creating Related Sections")
content_importer.create_related_sections()
logger.log(ACTION, "Creating Nav Tag Relationships")
content_importer.create_nav_tag_relationships()
logger.log(ACTION, "Creating Section Tag Relationships")
content_importer.create_section_tag_relationship()
logger.log(ACTION, "Creating Banner Page Links")
content_importer.create_banner_page_links()
logger.log(ACTION, "Recreating Article Body")
content_importer.recreate_article_body()
# create CSV
foreign_local_map = record_keeper.foreign_local_map["page_map"]
csvfile = BytesIO()
writer = csv.writer(csvfile)
rows = [["foreign_id", "local_id"]]
for foreign_id, local_id in iteritems(foreign_local_map):
rows.append([foreign_id, local_id])
writer.writerows(rows)
# send email
send_copy_email(
user.email,
{
'name': ((user.get_full_name() or user.username)
if user else None),
'source': root_url,
'to': site.root_url,
'logs': logger.get_email_logs()
},
csv=csvfile)
except Exception as e:
logging.error(e, exc_info=True)
send_copy_failed_email(user.email, {
'name': (user.get_full_name() or user.username) if user else None,
'source': root_url,
'to': site.root_url,
'logs': logger.get_email_logs(),
})
| |
#!/usr/bin/python3
# bwTL - BW's template library
# by Bill Weinman [http://bw.org/]
# Copyright 1995-2010 The BearHeart Group LLC
import sqlite3
__version__ = '1.0.3'
class bwDB:
def __init__(self, **kwargs):
'''
db = bwDB( [ table = ''] [, filename = ''] )
constructor method
table is for CRUD methods
filename is for connecting to the database file
'''
# see filename setter below
self.filename = kwargs.get('filename')
self.table = kwargs.get('table', '')
def sql_do(self, sql, params = ()):
'''
db.sql_do( sql[, params] )
method for non-select queries
sql is string containing SQL
params is list containing parameters
returns nothing
'''
self._db.execute(sql, params)
self._db.commit()
def sql_query(self, sql, params = ()):
'''
db.sql_query( sql[, params] )
generator method for queries
sql is string containing SQL
params is list containing parameters
returns a generator with one row per iteration
each row is a Row factory
'''
c = self._db.cursor()
c.execute(sql, params)
for r in c:
yield r
def sql_query_row(self, sql, params = ()):
'''
db.sql_query_row( sql[, params] )
query for a single row
sql is string containing SQL
params is list containing parameters
returns a single row as a Row factory
'''
c = self._db.cursor()
c.execute(sql, params)
return c.fetchone()
def sql_query_value(self, sql, params = ()):
'''
db.sql_query_row( sql[, params] )
query for a single value
sql is string containing SQL
params is list containing parameters
returns a single value
'''
c = self._db.cursor()
c.execute(sql, params)
return c.fetchone()[0]
def getrec(self, id):
'''
db.getrec(id)
get a single row, by id
'''
query = 'SELECT * FROM {} WHERE id = ?'.format(self.table)
c = self._db.execute(query, (id,))
return c.fetchone()
def getrecs(self):
'''
db.getrecs(id)
get all rows, returns a generator of Row factories
'''
query = 'SELECT * FROM {}'.format(self.table)
c = self._db.execute(query)
for r in c:
yield r
def insert(self, rec):
'''
db.insert(rec)
insert a single record into the table
rec is a dict with key/value pairs corresponding to table schema
omit id column to let SQLite generate it
'''
klist = sorted(rec.keys())
values = [ rec[v] for v in klist ] # a list of values ordered by key
q = 'INSERT INTO {} ({}) VALUES ({})'.format(
self.table,
', '.join(klist),
', '.join('?' for i in range(len(values)))
)
c = self._db.execute(q, values)
self._db.commit()
return c.lastrowid
def update(self, id, rec):
'''
db.update(id, rec)
update a row in the table
id is the value of the id column for the row to be updated
rec is a dict with key/value pairs corresponding to table schema
'''
klist = sorted(rec.keys())
values = [ rec[v] for v in klist ] # a list of values ordered by key
for i, k in enumerate(klist): # don't udpate id
if k == 'id':
del klist[i]
del values[i]
q = 'UPDATE {} SET {} WHERE id = ?'.format(
self.table,
', '.join(map(lambda str: '{} = ?'.format(str), klist))
)
self._db.execute(q, values + [ id ])
self._db.commit()
def delete(self, id):
'''
db.delete(id)
delete a row from the table, by id
'''
query = 'DELETE FROM {} WHERE id = ?'.format(self.table)
self._db.execute(query, [id])
self._db.commit()
def countrecs(self):
'''
db.countrecs()
count the records in the table
returns a single integer value
'''
query = 'SELECT COUNT(*) FROM {}'.format(self.table)
c = self._db.cursor()
c.execute(query)
return c.fetchone()[0]
### filename property
@property
def filename(self):
return self._dbFilename
@filename.setter
def filename(self, fn):
self._dbFilename = fn
self._db = sqlite3.connect(fn)
self._db.row_factory = sqlite3.Row
@filename.deleter
def filename(self):
self.close()
def close(self):
self._db.close()
del self._dbFilename
def test():
import os
fn = ':memory:' # in-memory database
t = 'foo'
recs = [
dict( string = 'one' ),
dict( string = 'two' ),
dict( string = 'three' )
]
### for file-based database
# try: os.stat(fn)
# except: pass
# else:
# print('Delete', fn)
# os.unlink(fn)
print('version', __version__)
print('Create database file {} ...'.format(fn), end = '')
db = bwDB( filename = fn, table = t )
print('Done.')
print('Create table ... ', end='')
db.sql_do(' DROP TABLE IF EXISTS {} '.format(t))
db.sql_do(' CREATE TABLE {} ( id INTEGER PRIMARY KEY, string TEXT ) '.format(t))
print('Done.')
print('Insert into table ... ', end = '')
for r in recs: db.insert(r)
print('Done.')
print('Read from table')
for r in db.getrecs(): print(dict(r))
print('Update table')
db.update(2, dict(string = 'TWO'))
print( dict( db.getrec(2) ) )
print('Insert an extra row ... ', end = '')
newid = db.insert( dict( string = 'extra' ) )
print('(id is {})'.format(newid))
print( dict( db.getrec(newid) ) )
print('Now delete it')
db.delete(newid)
for r in db.getrecs(): print(dict(r))
db.close()
if __name__ == "__main__": test()
| |
import difflib
from collections import namedtuple
from six.moves import zip_longest
_BLANK = object()
class Conflict(object):
def __init__(self, mine, other, base):
self.mine = [mine] if mine is not _BLANK else None
self.other = [other] if other is not _BLANK else None
self.base = [base] if base is not _BLANK else None
@classmethod
def from_prepared(cls, mine, other, base):
m = mine[0] if mine else _BLANK
o = other[0] if other else _BLANK
b = base[0] if base else _BLANK
conflict = cls(m, o, b)
for m, o, b in zip_longest(mine[1:], other[1:], base[1:],
fillvalue=_BLANK):
conflict.update(m, o, b)
return conflict
@classmethod
def resolve_sub_conflict(cls, mine, other):
c = cls.from_prepared(mine, other, [])
return c.resolve_conflict()
def update(self, m, o, b):
if m is not _BLANK:
self.mine.append(m)
if o is not _BLANK:
self.other.append(o)
if b is not _BLANK:
self.base.append(b)
def resolve_conflict(self):
if self.mine is None and self.other is not None:
return self.other
if self.other is None and self.mine is not None:
return self.mine
if self.other == self.mine:
return self.mine
combined = set(self.mine or []) | set(self.other or [])
if (self.base is not None and
not any(i in combined for i in self.base)):
return [self]
mine = self.mine if self.mine else []
other = self.other if self.other else []
i_mine, i_other = iter(mine), iter(other)
result, new_mine, new_other = [], [], []
for diff in difflib.Differ().compare([str(i) for i in other],
[str(i) for i in mine]):
if ((diff.startswith('+') and (new_other or result)) or
(diff.startswith('-') and (new_mine or result)) or
(result and (new_other or new_mine))):
if new_mine or new_other:
result.insert(0, Conflict.from_prepared(new_mine,
new_other,
[]))
result.extend(Conflict.resolve_sub_conflict(
[i for i in i_mine],
[i for i in i_other]))
break
elif diff.startswith('-'):
new_other.append(next(i_other))
elif diff.startswith('+'):
new_mine.append(next(i_mine))
else:
next(i_other)
result.append(next(i_mine))
return result
def _asdict(self):
return {
'my_op': 'CHANGED',
'my_val': self.mine,
'other_op': 'CHANGED',
'other_val': self.other,
'base_val': self.base
}
def __eq__(self, other):
return (self.mine == other.mine and self.other == other.other and
self.base == other.base)
def __str__(self):
return 'Conflict{}'.format(str((self.mine, self.other, self.base)))
def __repr__(self):
return str(self)
def merge_lists(base, mine, other):
if mine == other:
return mine
if other == base:
return mine
if mine == base:
return other
result = []
last_conflict = False
for i, (m, o, b) in enumerate(zip_longest(mine, other, base,
fillvalue=_BLANK)):
if m == o and _BLANK not in (m, o):
result.append(m)
else: # Conflict
if last_conflict:
c = result[-1]
c.update(m, o, b)
else:
c = Conflict(m, o, b)
result.append(c)
last_conflict = True
continue
last_conflict = False
offset = 0
for i, r in enumerate(result[:]):
if isinstance(r, Conflict):
c = r.resolve_conflict()
result = result[:i + offset] + c + result[i + offset + 1:]
offset += len(c) - 1
return result
class JsonDiff(object):
"""
Compares two json objects and stores the differences.
Only the outermost objects are considered, the comparison does not recurse
into nested objects.
"""
def __init__(self, old, new):
set_new, set_old = set(new), set(old)
common = set_new & set_old
self.added = list(set_new - common)
self.removed = list(set_old - common)
self.changed = [k for k in common if new[k] != old[k]]
self.unchanged = [k for k in common if new[k] == old[k]]
def op_for_field(self, field_name):
for operation in ('ADDED', 'UNCHANGED', 'CHANGED', 'REMOVED'):
if field_name in getattr(self, operation.lower()):
return operation
return None
FieldDiff = namedtuple(
'DiffOp', ['my_op', 'my_val', 'other_op', 'other_val', 'base_val'])
def merge_jsons(base, mine, other):
"""
Performs a 3-way merge of mine and other using base as the common ancestor.
Some conflicts are automatically resolved, e.g. mine and other both delete
the same field.
Conflicts that can't be automatically resolved (e.g. mine and other assign
different values to the same field) are serialized into the merged json in
a way that can be used for a later manual resolve:
field: { __CONFLICT:
base_val: X, # the original value of the field
my_val: Y, # the value assigned by mine json
my_op: Z, # the operation performed by mine json
other_val: U, # the value assigned by other json
other_op: W, # the operation performed by other json
}
my_op and other_op can take any of this values: 'ADDED', 'REMOVED',
'CHANGED', 'UNCHAGED'. If my_op == 'DELETED' then my_value == None
(the same applies to other_op and other_val respectively).
The merge recurses into dictionaries but considers lists as atomic values.
Returns a tuple of the form (merged, had_conflict).
"""
def build_merge_dict(base, mine, other):
my_diff = JsonDiff(base, mine)
other_diff = JsonDiff(base, other)
all_fields = set(base.keys()).union(mine.keys()).union(other.keys())
merge_dict = {}
for k in all_fields:
base_val, my_val, other_val = (
base.get(k, {}), mine.get(k), other.get(k))
if isinstance(my_val, dict) and isinstance(other_val, dict):
merge_dict[k] = build_merge_dict(base_val, my_val, other_val)
if isinstance(my_val, list) and isinstance(other_val, list):
merge_dict[k] = merge_lists(base_val, my_val, other_val)
else:
merge_dict[k] = FieldDiff(base_val=base.get(k),
my_val=my_val,
my_op=my_diff.op_for_field(k),
other_val=other_val,
other_op=other_diff.op_for_field(k))
return merge_dict
def eq_vals(diff):
return diff.other_val == diff.my_val
def conflict(diff):
return {'__CONFLICT': diff._asdict()}
def resolve_json(merge_dict):
out_json = {}
had_conflict = False
for key, diff in merge_dict.items():
if isinstance(diff, dict):
out_json[key], rconflict = resolve_json(diff)
had_conflict = had_conflict or rconflict
if isinstance(diff, list):
for i, item in enumerate(diff):
if isinstance(item, Conflict):
if (item.mine and isinstance(item.mine[0], dict) and
'__CONFLICT' in item.mine[0]):
diff[i] = item.mine[0]
else:
diff[i] = conflict(item)
had_conflict = True
out_json[key] = diff
elif diff.my_op in ('UNCHANGED', None):
if diff.other_op != 'REMOVED':
out_json[key] = diff.other_val
elif diff.my_op == 'ADDED':
if diff.other_op != 'ADDED' or eq_vals(diff):
out_json[key] = diff.my_val
else:
out_json[key] = conflict(diff)
had_conflict = True
elif diff.my_op == 'REMOVED':
if diff.other_op == 'CHANGED':
out_json[key] = conflict(diff)
had_conflict = True
elif diff.my_op == 'CHANGED':
if diff.other_op == 'UNCHANGED' or eq_vals(diff):
out_json[key] = diff.my_val
else:
out_json[key] = conflict(diff)
had_conflict = True
return out_json, had_conflict
return resolve_json(build_merge_dict(base, mine, other))
| |
import struct
from gearman.constants import PRIORITY_NONE, PRIORITY_LOW, PRIORITY_HIGH
from gearman.errors import ProtocolError
from gearman import compat
# Protocol specific constants
NULL_CHAR = '\x00'
MAGIC_RES_STRING = '%sRES' % NULL_CHAR
MAGIC_REQ_STRING = '%sREQ' % NULL_CHAR
COMMAND_HEADER_SIZE = 12
# Gearman commands 1-9
GEARMAN_COMMAND_CAN_DO = 1
GEARMAN_COMMAND_CANT_DO = 2
GEARMAN_COMMAND_RESET_ABILITIES = 3
GEARMAN_COMMAND_PRE_SLEEP = 4
GEARMAN_COMMAND_NOOP = 6
GEARMAN_COMMAND_SUBMIT_JOB = 7
GEARMAN_COMMAND_JOB_CREATED = 8
GEARMAN_COMMAND_GRAB_JOB = 9
# Gearman commands 10-19
GEARMAN_COMMAND_NO_JOB = 10
GEARMAN_COMMAND_JOB_ASSIGN = 11
GEARMAN_COMMAND_WORK_STATUS = 12
GEARMAN_COMMAND_WORK_COMPLETE = 13
GEARMAN_COMMAND_WORK_FAIL = 14
GEARMAN_COMMAND_GET_STATUS = 15
GEARMAN_COMMAND_ECHO_REQ = 16
GEARMAN_COMMAND_ECHO_RES = 17
GEARMAN_COMMAND_SUBMIT_JOB_BG = 18
GEARMAN_COMMAND_ERROR = 19
# Gearman commands 20-29
GEARMAN_COMMAND_STATUS_RES = 20
GEARMAN_COMMAND_SUBMIT_JOB_HIGH = 21
GEARMAN_COMMAND_SET_CLIENT_ID = 22
GEARMAN_COMMAND_CAN_DO_TIMEOUT = 23
GEARMAN_COMMAND_ALL_YOURS = 24
GEARMAN_COMMAND_WORK_EXCEPTION = 25
GEARMAN_COMMAND_OPTION_REQ = 26
GEARMAN_COMMAND_OPTION_RES = 27
GEARMAN_COMMAND_WORK_DATA = 28
GEARMAN_COMMAND_WORK_WARNING = 29
# Gearman commands 30-39
GEARMAN_COMMAND_GRAB_JOB_UNIQ = 30
GEARMAN_COMMAND_JOB_ASSIGN_UNIQ = 31
GEARMAN_COMMAND_SUBMIT_JOB_HIGH_BG = 32
GEARMAN_COMMAND_SUBMIT_JOB_LOW = 33
GEARMAN_COMMAND_SUBMIT_JOB_LOW_BG = 34
# Fake command code
GEARMAN_COMMAND_TEXT_COMMAND = 9999
GEARMAN_PARAMS_FOR_COMMAND = {
# Gearman commands 1-9
GEARMAN_COMMAND_CAN_DO: ['task'],
GEARMAN_COMMAND_CANT_DO: ['task'],
GEARMAN_COMMAND_RESET_ABILITIES: [],
GEARMAN_COMMAND_PRE_SLEEP: [],
GEARMAN_COMMAND_NOOP: [],
GEARMAN_COMMAND_SUBMIT_JOB: ['task', 'unique', 'data'],
GEARMAN_COMMAND_JOB_CREATED: ['job_handle'],
GEARMAN_COMMAND_GRAB_JOB: [],
# Gearman commands 10-19
GEARMAN_COMMAND_NO_JOB: [],
GEARMAN_COMMAND_JOB_ASSIGN: ['job_handle', 'task', 'data'],
GEARMAN_COMMAND_WORK_STATUS: ['job_handle', 'numerator', 'denominator'],
GEARMAN_COMMAND_WORK_COMPLETE: ['job_handle', 'data'],
GEARMAN_COMMAND_WORK_FAIL: ['job_handle'],
GEARMAN_COMMAND_GET_STATUS: ['job_handle'],
GEARMAN_COMMAND_ECHO_REQ: ['data'],
GEARMAN_COMMAND_ECHO_RES: ['data'],
GEARMAN_COMMAND_SUBMIT_JOB_BG: ['task', 'unique', 'data'],
GEARMAN_COMMAND_ERROR: ['error_code', 'error_text'],
# Gearman commands 20-29
GEARMAN_COMMAND_STATUS_RES: ['job_handle', 'known', 'running', 'numerator', 'denominator'],
GEARMAN_COMMAND_SUBMIT_JOB_HIGH: ['task', 'unique', 'data'],
GEARMAN_COMMAND_SET_CLIENT_ID: ['client_id'],
GEARMAN_COMMAND_CAN_DO_TIMEOUT: ['task', 'timeout'],
GEARMAN_COMMAND_ALL_YOURS: [],
GEARMAN_COMMAND_WORK_EXCEPTION: ['job_handle', 'data'],
GEARMAN_COMMAND_OPTION_REQ: ['option_name'],
GEARMAN_COMMAND_OPTION_RES: ['option_name'],
GEARMAN_COMMAND_WORK_DATA: ['job_handle', 'data'],
GEARMAN_COMMAND_WORK_WARNING: ['job_handle', 'data'],
# Gearman commands 30-39
GEARMAN_COMMAND_GRAB_JOB_UNIQ: [],
GEARMAN_COMMAND_JOB_ASSIGN_UNIQ: ['job_handle', 'task', 'unique', 'data'],
GEARMAN_COMMAND_SUBMIT_JOB_HIGH_BG: ['task', 'unique', 'data'],
GEARMAN_COMMAND_SUBMIT_JOB_LOW: ['task', 'unique', 'data'],
GEARMAN_COMMAND_SUBMIT_JOB_LOW_BG: ['task', 'unique', 'data'],
# Fake gearman command
GEARMAN_COMMAND_TEXT_COMMAND: ['raw_text']
}
GEARMAN_COMMAND_TO_NAME = {
GEARMAN_COMMAND_CAN_DO: 'GEARMAN_COMMAND_CAN_DO',
GEARMAN_COMMAND_CANT_DO: 'GEARMAN_COMMAND_CANT_DO',
GEARMAN_COMMAND_RESET_ABILITIES: 'GEARMAN_COMMAND_RESET_ABILITIES',
GEARMAN_COMMAND_PRE_SLEEP: 'GEARMAN_COMMAND_PRE_SLEEP',
GEARMAN_COMMAND_NOOP: 'GEARMAN_COMMAND_NOOP',
GEARMAN_COMMAND_SUBMIT_JOB: 'GEARMAN_COMMAND_SUBMIT_JOB',
GEARMAN_COMMAND_JOB_CREATED: 'GEARMAN_COMMAND_JOB_CREATED',
GEARMAN_COMMAND_GRAB_JOB: 'GEARMAN_COMMAND_GRAB_JOB',
# Gearman commands 10-19
GEARMAN_COMMAND_NO_JOB: 'GEARMAN_COMMAND_NO_JOB',
GEARMAN_COMMAND_JOB_ASSIGN: 'GEARMAN_COMMAND_JOB_ASSIGN',
GEARMAN_COMMAND_WORK_STATUS: 'GEARMAN_COMMAND_WORK_STATUS',
GEARMAN_COMMAND_WORK_COMPLETE: 'GEARMAN_COMMAND_WORK_COMPLETE',
GEARMAN_COMMAND_WORK_FAIL: 'GEARMAN_COMMAND_WORK_FAIL',
GEARMAN_COMMAND_GET_STATUS: 'GEARMAN_COMMAND_GET_STATUS',
GEARMAN_COMMAND_ECHO_REQ: 'GEARMAN_COMMAND_ECHO_REQ',
GEARMAN_COMMAND_ECHO_RES: 'GEARMAN_COMMAND_ECHO_RES',
GEARMAN_COMMAND_SUBMIT_JOB_BG: 'GEARMAN_COMMAND_SUBMIT_JOB_BG',
GEARMAN_COMMAND_ERROR: 'GEARMAN_COMMAND_ERROR',
# Gearman commands 20-29
GEARMAN_COMMAND_STATUS_RES: 'GEARMAN_COMMAND_STATUS_RES',
GEARMAN_COMMAND_SUBMIT_JOB_HIGH: 'GEARMAN_COMMAND_SUBMIT_JOB_HIGH',
GEARMAN_COMMAND_SET_CLIENT_ID: 'GEARMAN_COMMAND_SET_CLIENT_ID',
GEARMAN_COMMAND_CAN_DO_TIMEOUT: 'GEARMAN_COMMAND_CAN_DO_TIMEOUT',
GEARMAN_COMMAND_ALL_YOURS: 'GEARMAN_COMMAND_ALL_YOURS',
GEARMAN_COMMAND_WORK_EXCEPTION: 'GEARMAN_COMMAND_WORK_EXCEPTION',
GEARMAN_COMMAND_OPTION_REQ: 'GEARMAN_COMMAND_OPTION_REQ',
GEARMAN_COMMAND_OPTION_RES: 'GEARMAN_COMMAND_OPTION_RES',
GEARMAN_COMMAND_WORK_DATA: 'GEARMAN_COMMAND_WORK_DATA',
GEARMAN_COMMAND_WORK_WARNING: 'GEARMAN_COMMAND_WORK_WARNING',
# Gearman commands 30-39
GEARMAN_COMMAND_GRAB_JOB_UNIQ: 'GEARMAN_COMMAND_GRAB_JOB_UNIQ',
GEARMAN_COMMAND_JOB_ASSIGN_UNIQ: 'GEARMAN_COMMAND_JOB_ASSIGN_UNIQ',
GEARMAN_COMMAND_SUBMIT_JOB_HIGH_BG: 'GEARMAN_COMMAND_SUBMIT_JOB_HIGH_BG',
GEARMAN_COMMAND_SUBMIT_JOB_LOW: 'GEARMAN_COMMAND_SUBMIT_JOB_LOW',
GEARMAN_COMMAND_SUBMIT_JOB_LOW_BG: 'GEARMAN_COMMAND_SUBMIT_JOB_LOW_BG',
GEARMAN_COMMAND_TEXT_COMMAND: 'GEARMAN_COMMAND_TEXT_COMMAND'
}
GEARMAN_SERVER_COMMAND_STATUS = 'status'
GEARMAN_SERVER_COMMAND_VERSION = 'version'
GEARMAN_SERVER_COMMAND_WORKERS = 'workers'
GEARMAN_SERVER_COMMAND_MAXQUEUE = 'maxqueue'
GEARMAN_SERVER_COMMAND_SHUTDOWN = 'shutdown'
def get_command_name(cmd_type):
return GEARMAN_COMMAND_TO_NAME.get(cmd_type, cmd_type)
def submit_cmd_for_background_priority(background, priority):
cmd_type_lookup = {
(True, PRIORITY_NONE): GEARMAN_COMMAND_SUBMIT_JOB_BG,
(True, PRIORITY_LOW): GEARMAN_COMMAND_SUBMIT_JOB_LOW_BG,
(True, PRIORITY_HIGH): GEARMAN_COMMAND_SUBMIT_JOB_HIGH_BG,
(False, PRIORITY_NONE): GEARMAN_COMMAND_SUBMIT_JOB,
(False, PRIORITY_LOW): GEARMAN_COMMAND_SUBMIT_JOB_LOW,
(False, PRIORITY_HIGH): GEARMAN_COMMAND_SUBMIT_JOB_HIGH
}
lookup_tuple = (background, priority)
cmd_type = cmd_type_lookup[lookup_tuple]
return cmd_type
def parse_binary_command(in_buffer, is_response=True):
"""Parse data and return (command type, command arguments dict, command size)
or (None, None, data) if there's not enough data for a complete command.
"""
in_buffer_size = len(in_buffer)
magic = None
cmd_type = None
cmd_args = None
cmd_len = 0
expected_packet_size = None
# If we don't have enough data to parse, error early
if in_buffer_size < COMMAND_HEADER_SIZE:
return cmd_type, cmd_args, cmd_len
# By default, we'll assume we're dealing with a gearman command
magic, cmd_type, cmd_len = struct.unpack('!4sII', in_buffer[:COMMAND_HEADER_SIZE])
received_bad_response = is_response and bool(magic != MAGIC_RES_STRING)
received_bad_request = not is_response and bool(magic != MAGIC_REQ_STRING)
if received_bad_response or received_bad_request:
raise ProtocolError('Malformed Magic')
expected_cmd_params = GEARMAN_PARAMS_FOR_COMMAND.get(cmd_type, None)
# GEARMAN_COMMAND_TEXT_COMMAND is a faked command that we use to support server text-based commands
if expected_cmd_params is None or cmd_type == GEARMAN_COMMAND_TEXT_COMMAND:
raise ProtocolError('Received unknown binary command: %s' % cmd_type)
# If everything indicates this is a valid command, we should check to see if we have enough stuff to read in our buffer
expected_packet_size = COMMAND_HEADER_SIZE + cmd_len
if in_buffer_size < expected_packet_size:
return None, None, 0
binary_payload = in_buffer[COMMAND_HEADER_SIZE:expected_packet_size]
split_arguments = []
if len(expected_cmd_params) > 0:
binary_payload = binary_payload.tostring()
split_arguments = binary_payload.split(NULL_CHAR, len(expected_cmd_params) - 1)
elif binary_payload:
raise ProtocolError('Expected no binary payload: %s' % get_command_name(cmd_type))
# This is a sanity check on the binary_payload.split() phase
# We should never be able to get here with any VALID gearman data
if len(split_arguments) != len(expected_cmd_params):
raise ProtocolError('Received %d argument(s), expecting %d argument(s): %s' % (len(split_arguments), len(expected_cmd_params), get_command_name(cmd_type)))
# Iterate through the split arguments and assign them labels based on their order
cmd_args = dict((param_label, param_value) for param_label, param_value in zip(expected_cmd_params, split_arguments))
return cmd_type, cmd_args, expected_packet_size
def pack_binary_command(cmd_type, cmd_args, is_response=False):
"""Packs the given command using the parameter ordering specified in GEARMAN_PARAMS_FOR_COMMAND.
*NOTE* Expects that all arguments in cmd_args are already str's.
"""
expected_cmd_params = GEARMAN_PARAMS_FOR_COMMAND.get(cmd_type, None)
if expected_cmd_params is None or cmd_type == GEARMAN_COMMAND_TEXT_COMMAND:
raise ProtocolError('Received unknown binary command: %s' % get_command_name(cmd_type))
expected_parameter_set = set(expected_cmd_params)
received_parameter_set = set(cmd_args.keys())
if expected_parameter_set != received_parameter_set:
raise ProtocolError('Received arguments did not match expected arguments: %r != %r' % (expected_parameter_set, received_parameter_set))
# Select the right expected magic
if is_response:
magic = MAGIC_RES_STRING
else:
magic = MAGIC_REQ_STRING
# !NOTE! str should be replaced with bytes in Python 3.x
# We will iterate in ORDER and str all our command arguments
if compat.any(type(param_value) != str for param_value in cmd_args.itervalues()):
raise ProtocolError('Received non-binary arguments: %r' % cmd_args)
data_items = [cmd_args[param] for param in expected_cmd_params]
# Now check that all but the last argument are free of \0 as per the protocol spec.
if compat.any('\0' in argument for argument in data_items[:-1]):
raise ProtocolError('Received arguments with NULL byte in non-final argument')
binary_payload = NULL_CHAR.join(data_items)
# Pack the header in the !4sII format then append the binary payload
payload_size = len(binary_payload)
packing_format = '!4sII%ds' % payload_size
return struct.pack(packing_format, magic, cmd_type, payload_size, binary_payload)
def parse_text_command(in_buffer):
"""Parse a text command and return a single line at a time"""
cmd_type = None
cmd_args = None
cmd_len = 0
if '\n' not in in_buffer:
return cmd_type, cmd_args, cmd_len
text_command, in_buffer = in_buffer.tostring().split('\n', 1)
if NULL_CHAR in text_command:
raise ProtocolError('Received unexpected character: %s' % text_command)
# Fake gearman command "TEXT_COMMAND" used to process server admin client responses
cmd_type = GEARMAN_COMMAND_TEXT_COMMAND
cmd_args = dict(raw_text=text_command)
cmd_len = len(text_command) + 1
return cmd_type, cmd_args, cmd_len
def pack_text_command(cmd_type, cmd_args):
"""Parse a text command and return a single line at a time"""
if cmd_type != GEARMAN_COMMAND_TEXT_COMMAND:
raise ProtocolError('Unknown cmd_type: Received %s, expecting %s' % (get_command_name(cmd_type), get_command_name(GEARMAN_COMMAND_TEXT_COMMAND)))
cmd_line = cmd_args.get('raw_text')
if cmd_line is None:
raise ProtocolError('Did not receive arguments any valid arguments: %s' % cmd_args)
return str(cmd_line)
| |
"""
Component to interface with various media players.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/media_player/
"""
import asyncio
from datetime import timedelta
import functools as ft
import collections
import hashlib
import logging
import os
from random import SystemRandom
from aiohttp import web
from aiohttp.hdrs import CONTENT_TYPE, CACHE_CONTROL
import async_timeout
import voluptuous as vol
from homeassistant.components.http import KEY_AUTHENTICATED, HomeAssistantView
from homeassistant.config import load_yaml_config_file
from homeassistant.const import (
STATE_OFF, STATE_IDLE, STATE_PLAYING, STATE_UNKNOWN, ATTR_ENTITY_ID,
SERVICE_TOGGLE, SERVICE_TURN_ON, SERVICE_TURN_OFF, SERVICE_VOLUME_UP,
SERVICE_MEDIA_PLAY, SERVICE_MEDIA_SEEK, SERVICE_MEDIA_STOP,
SERVICE_VOLUME_SET, SERVICE_MEDIA_PAUSE, SERVICE_SHUFFLE_SET,
SERVICE_VOLUME_DOWN, SERVICE_VOLUME_MUTE, SERVICE_MEDIA_NEXT_TRACK,
SERVICE_MEDIA_PLAY_PAUSE, SERVICE_MEDIA_PREVIOUS_TRACK)
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.config_validation import PLATFORM_SCHEMA # noqa
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.loader import bind_hass
from homeassistant.util.async import run_coroutine_threadsafe
_LOGGER = logging.getLogger(__name__)
_RND = SystemRandom()
DOMAIN = 'media_player'
DEPENDENCIES = ['http']
SCAN_INTERVAL = timedelta(seconds=10)
ENTITY_ID_FORMAT = DOMAIN + '.{}'
ENTITY_IMAGE_URL = '/api/media_player_proxy/{0}?token={1}&cache={2}'
CACHE_IMAGES = 'images'
CACHE_MAXSIZE = 'maxsize'
CACHE_LOCK = 'lock'
CACHE_URL = 'url'
CACHE_CONTENT = 'content'
ENTITY_IMAGE_CACHE = {
CACHE_IMAGES: collections.OrderedDict(),
CACHE_MAXSIZE: 16
}
SERVICE_PLAY_MEDIA = 'play_media'
SERVICE_SELECT_SOURCE = 'select_source'
SERVICE_CLEAR_PLAYLIST = 'clear_playlist'
ATTR_MEDIA_VOLUME_LEVEL = 'volume_level'
ATTR_MEDIA_VOLUME_MUTED = 'is_volume_muted'
ATTR_MEDIA_SEEK_POSITION = 'seek_position'
ATTR_MEDIA_CONTENT_ID = 'media_content_id'
ATTR_MEDIA_CONTENT_TYPE = 'media_content_type'
ATTR_MEDIA_DURATION = 'media_duration'
ATTR_MEDIA_POSITION = 'media_position'
ATTR_MEDIA_POSITION_UPDATED_AT = 'media_position_updated_at'
ATTR_MEDIA_TITLE = 'media_title'
ATTR_MEDIA_ARTIST = 'media_artist'
ATTR_MEDIA_ALBUM_NAME = 'media_album_name'
ATTR_MEDIA_ALBUM_ARTIST = 'media_album_artist'
ATTR_MEDIA_TRACK = 'media_track'
ATTR_MEDIA_SERIES_TITLE = 'media_series_title'
ATTR_MEDIA_SEASON = 'media_season'
ATTR_MEDIA_EPISODE = 'media_episode'
ATTR_MEDIA_CHANNEL = 'media_channel'
ATTR_MEDIA_PLAYLIST = 'media_playlist'
ATTR_APP_ID = 'app_id'
ATTR_APP_NAME = 'app_name'
ATTR_INPUT_SOURCE = 'source'
ATTR_INPUT_SOURCE_LIST = 'source_list'
ATTR_MEDIA_ENQUEUE = 'enqueue'
ATTR_MEDIA_SHUFFLE = 'shuffle'
MEDIA_TYPE_MUSIC = 'music'
MEDIA_TYPE_TVSHOW = 'tvshow'
MEDIA_TYPE_VIDEO = 'movie'
MEDIA_TYPE_EPISODE = 'episode'
MEDIA_TYPE_CHANNEL = 'channel'
MEDIA_TYPE_PLAYLIST = 'playlist'
SUPPORT_PAUSE = 1
SUPPORT_SEEK = 2
SUPPORT_VOLUME_SET = 4
SUPPORT_VOLUME_MUTE = 8
SUPPORT_PREVIOUS_TRACK = 16
SUPPORT_NEXT_TRACK = 32
SUPPORT_TURN_ON = 128
SUPPORT_TURN_OFF = 256
SUPPORT_PLAY_MEDIA = 512
SUPPORT_VOLUME_STEP = 1024
SUPPORT_SELECT_SOURCE = 2048
SUPPORT_STOP = 4096
SUPPORT_CLEAR_PLAYLIST = 8192
SUPPORT_PLAY = 16384
SUPPORT_SHUFFLE_SET = 32768
# Service call validation schemas
MEDIA_PLAYER_SCHEMA = vol.Schema({
ATTR_ENTITY_ID: cv.entity_ids,
})
MEDIA_PLAYER_SET_VOLUME_SCHEMA = MEDIA_PLAYER_SCHEMA.extend({
vol.Required(ATTR_MEDIA_VOLUME_LEVEL): cv.small_float,
})
MEDIA_PLAYER_MUTE_VOLUME_SCHEMA = MEDIA_PLAYER_SCHEMA.extend({
vol.Required(ATTR_MEDIA_VOLUME_MUTED): cv.boolean,
})
MEDIA_PLAYER_MEDIA_SEEK_SCHEMA = MEDIA_PLAYER_SCHEMA.extend({
vol.Required(ATTR_MEDIA_SEEK_POSITION):
vol.All(vol.Coerce(float), vol.Range(min=0)),
})
MEDIA_PLAYER_SELECT_SOURCE_SCHEMA = MEDIA_PLAYER_SCHEMA.extend({
vol.Required(ATTR_INPUT_SOURCE): cv.string,
})
MEDIA_PLAYER_PLAY_MEDIA_SCHEMA = MEDIA_PLAYER_SCHEMA.extend({
vol.Required(ATTR_MEDIA_CONTENT_TYPE): cv.string,
vol.Required(ATTR_MEDIA_CONTENT_ID): cv.string,
vol.Optional(ATTR_MEDIA_ENQUEUE): cv.boolean,
})
MEDIA_PLAYER_SET_SHUFFLE_SCHEMA = MEDIA_PLAYER_SCHEMA.extend({
vol.Required(ATTR_MEDIA_SHUFFLE): cv.boolean,
})
SERVICE_TO_METHOD = {
SERVICE_TURN_ON: {'method': 'async_turn_on'},
SERVICE_TURN_OFF: {'method': 'async_turn_off'},
SERVICE_TOGGLE: {'method': 'async_toggle'},
SERVICE_VOLUME_UP: {'method': 'async_volume_up'},
SERVICE_VOLUME_DOWN: {'method': 'async_volume_down'},
SERVICE_MEDIA_PLAY_PAUSE: {'method': 'async_media_play_pause'},
SERVICE_MEDIA_PLAY: {'method': 'async_media_play'},
SERVICE_MEDIA_PAUSE: {'method': 'async_media_pause'},
SERVICE_MEDIA_STOP: {'method': 'async_media_stop'},
SERVICE_MEDIA_NEXT_TRACK: {'method': 'async_media_next_track'},
SERVICE_MEDIA_PREVIOUS_TRACK: {'method': 'async_media_previous_track'},
SERVICE_CLEAR_PLAYLIST: {'method': 'async_clear_playlist'},
SERVICE_VOLUME_SET: {
'method': 'async_set_volume_level',
'schema': MEDIA_PLAYER_SET_VOLUME_SCHEMA},
SERVICE_VOLUME_MUTE: {
'method': 'async_mute_volume',
'schema': MEDIA_PLAYER_MUTE_VOLUME_SCHEMA},
SERVICE_MEDIA_SEEK: {
'method': 'async_media_seek',
'schema': MEDIA_PLAYER_MEDIA_SEEK_SCHEMA},
SERVICE_SELECT_SOURCE: {
'method': 'async_select_source',
'schema': MEDIA_PLAYER_SELECT_SOURCE_SCHEMA},
SERVICE_PLAY_MEDIA: {
'method': 'async_play_media',
'schema': MEDIA_PLAYER_PLAY_MEDIA_SCHEMA},
SERVICE_SHUFFLE_SET: {
'method': 'async_set_shuffle',
'schema': MEDIA_PLAYER_SET_SHUFFLE_SCHEMA},
}
ATTR_TO_PROPERTY = [
ATTR_MEDIA_VOLUME_LEVEL,
ATTR_MEDIA_VOLUME_MUTED,
ATTR_MEDIA_CONTENT_ID,
ATTR_MEDIA_CONTENT_TYPE,
ATTR_MEDIA_DURATION,
ATTR_MEDIA_POSITION,
ATTR_MEDIA_POSITION_UPDATED_AT,
ATTR_MEDIA_TITLE,
ATTR_MEDIA_ARTIST,
ATTR_MEDIA_ALBUM_NAME,
ATTR_MEDIA_ALBUM_ARTIST,
ATTR_MEDIA_TRACK,
ATTR_MEDIA_SERIES_TITLE,
ATTR_MEDIA_SEASON,
ATTR_MEDIA_EPISODE,
ATTR_MEDIA_CHANNEL,
ATTR_MEDIA_PLAYLIST,
ATTR_APP_ID,
ATTR_APP_NAME,
ATTR_INPUT_SOURCE,
ATTR_INPUT_SOURCE_LIST,
ATTR_MEDIA_SHUFFLE,
]
@bind_hass
def is_on(hass, entity_id=None):
"""
Return true if specified media player entity_id is on.
Check all media player if no entity_id specified.
"""
entity_ids = [entity_id] if entity_id else hass.states.entity_ids(DOMAIN)
return any(not hass.states.is_state(entity_id, STATE_OFF)
for entity_id in entity_ids)
@bind_hass
def turn_on(hass, entity_id=None):
"""Turn on specified media player or all."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}
hass.services.call(DOMAIN, SERVICE_TURN_ON, data)
@bind_hass
def turn_off(hass, entity_id=None):
"""Turn off specified media player or all."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}
hass.services.call(DOMAIN, SERVICE_TURN_OFF, data)
@bind_hass
def toggle(hass, entity_id=None):
"""Toggle specified media player or all."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}
hass.services.call(DOMAIN, SERVICE_TOGGLE, data)
@bind_hass
def volume_up(hass, entity_id=None):
"""Send the media player the command for volume up."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}
hass.services.call(DOMAIN, SERVICE_VOLUME_UP, data)
@bind_hass
def volume_down(hass, entity_id=None):
"""Send the media player the command for volume down."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}
hass.services.call(DOMAIN, SERVICE_VOLUME_DOWN, data)
@bind_hass
def mute_volume(hass, mute, entity_id=None):
"""Send the media player the command for muting the volume."""
data = {ATTR_MEDIA_VOLUME_MUTED: mute}
if entity_id:
data[ATTR_ENTITY_ID] = entity_id
hass.services.call(DOMAIN, SERVICE_VOLUME_MUTE, data)
@bind_hass
def set_volume_level(hass, volume, entity_id=None):
"""Send the media player the command for setting the volume."""
data = {ATTR_MEDIA_VOLUME_LEVEL: volume}
if entity_id:
data[ATTR_ENTITY_ID] = entity_id
hass.services.call(DOMAIN, SERVICE_VOLUME_SET, data)
@bind_hass
def media_play_pause(hass, entity_id=None):
"""Send the media player the command for play/pause."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}
hass.services.call(DOMAIN, SERVICE_MEDIA_PLAY_PAUSE, data)
@bind_hass
def media_play(hass, entity_id=None):
"""Send the media player the command for play/pause."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}
hass.services.call(DOMAIN, SERVICE_MEDIA_PLAY, data)
@bind_hass
def media_pause(hass, entity_id=None):
"""Send the media player the command for pause."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}
hass.services.call(DOMAIN, SERVICE_MEDIA_PAUSE, data)
@bind_hass
def media_stop(hass, entity_id=None):
"""Send the media player the stop command."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}
hass.services.call(DOMAIN, SERVICE_MEDIA_STOP, data)
@bind_hass
def media_next_track(hass, entity_id=None):
"""Send the media player the command for next track."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}
hass.services.call(DOMAIN, SERVICE_MEDIA_NEXT_TRACK, data)
@bind_hass
def media_previous_track(hass, entity_id=None):
"""Send the media player the command for prev track."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}
hass.services.call(DOMAIN, SERVICE_MEDIA_PREVIOUS_TRACK, data)
@bind_hass
def media_seek(hass, position, entity_id=None):
"""Send the media player the command to seek in current playing media."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}
data[ATTR_MEDIA_SEEK_POSITION] = position
hass.services.call(DOMAIN, SERVICE_MEDIA_SEEK, data)
@bind_hass
def play_media(hass, media_type, media_id, entity_id=None, enqueue=None):
"""Send the media player the command for playing media."""
data = {ATTR_MEDIA_CONTENT_TYPE: media_type,
ATTR_MEDIA_CONTENT_ID: media_id}
if entity_id:
data[ATTR_ENTITY_ID] = entity_id
if enqueue:
data[ATTR_MEDIA_ENQUEUE] = enqueue
hass.services.call(DOMAIN, SERVICE_PLAY_MEDIA, data)
@bind_hass
def select_source(hass, source, entity_id=None):
"""Send the media player the command to select input source."""
data = {ATTR_INPUT_SOURCE: source}
if entity_id:
data[ATTR_ENTITY_ID] = entity_id
hass.services.call(DOMAIN, SERVICE_SELECT_SOURCE, data)
@bind_hass
def clear_playlist(hass, entity_id=None):
"""Send the media player the command for clear playlist."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}
hass.services.call(DOMAIN, SERVICE_CLEAR_PLAYLIST, data)
@bind_hass
def set_shuffle(hass, shuffle, entity_id=None):
"""Send the media player the command to enable/disable shuffle mode."""
data = {ATTR_MEDIA_SHUFFLE: shuffle}
if entity_id:
data[ATTR_ENTITY_ID] = entity_id
hass.services.call(DOMAIN, SERVICE_SHUFFLE_SET, data)
@asyncio.coroutine
def async_setup(hass, config):
"""Track states and offer events for media_players."""
component = EntityComponent(
logging.getLogger(__name__), DOMAIN, hass, SCAN_INTERVAL)
hass.http.register_view(MediaPlayerImageView(component.entities))
yield from component.async_setup(config)
descriptions = yield from hass.async_add_job(
load_yaml_config_file, os.path.join(
os.path.dirname(__file__), 'services.yaml'))
@asyncio.coroutine
def async_service_handler(service):
"""Map services to methods on MediaPlayerDevice."""
method = SERVICE_TO_METHOD.get(service.service)
if not method:
return
params = {}
if service.service == SERVICE_VOLUME_SET:
params['volume'] = service.data.get(ATTR_MEDIA_VOLUME_LEVEL)
elif service.service == SERVICE_VOLUME_MUTE:
params['mute'] = service.data.get(ATTR_MEDIA_VOLUME_MUTED)
elif service.service == SERVICE_MEDIA_SEEK:
params['position'] = service.data.get(ATTR_MEDIA_SEEK_POSITION)
elif service.service == SERVICE_SELECT_SOURCE:
params['source'] = service.data.get(ATTR_INPUT_SOURCE)
elif service.service == SERVICE_PLAY_MEDIA:
params['media_type'] = \
service.data.get(ATTR_MEDIA_CONTENT_TYPE)
params['media_id'] = service.data.get(ATTR_MEDIA_CONTENT_ID)
params[ATTR_MEDIA_ENQUEUE] = \
service.data.get(ATTR_MEDIA_ENQUEUE)
elif service.service == SERVICE_SHUFFLE_SET:
params[ATTR_MEDIA_SHUFFLE] = \
service.data.get(ATTR_MEDIA_SHUFFLE)
target_players = component.async_extract_from_service(service)
update_tasks = []
for player in target_players:
yield from getattr(player, method['method'])(**params)
if not player.should_poll:
continue
update_tasks.append(player.async_update_ha_state(True))
if update_tasks:
yield from asyncio.wait(update_tasks, loop=hass.loop)
for service in SERVICE_TO_METHOD:
schema = SERVICE_TO_METHOD[service].get(
'schema', MEDIA_PLAYER_SCHEMA)
hass.services.async_register(
DOMAIN, service, async_service_handler,
descriptions.get(service), schema=schema)
return True
class MediaPlayerDevice(Entity):
"""ABC for media player devices."""
_access_token = None
# pylint: disable=no-self-use
# Implement these for your media player
@property
def state(self):
"""State of the player."""
return STATE_UNKNOWN
@property
def access_token(self):
"""Access token for this media player."""
if self._access_token is None:
self._access_token = hashlib.sha256(
_RND.getrandbits(256).to_bytes(32, 'little')).hexdigest()
return self._access_token
@property
def volume_level(self):
"""Volume level of the media player (0..1)."""
return None
@property
def is_volume_muted(self):
"""Boolean if volume is currently muted."""
return None
@property
def media_content_id(self):
"""Content ID of current playing media."""
return None
@property
def media_content_type(self):
"""Content type of current playing media."""
return None
@property
def media_duration(self):
"""Duration of current playing media in seconds."""
return None
@property
def media_position(self):
"""Position of current playing media in seconds."""
return None
@property
def media_position_updated_at(self):
"""When was the position of the current playing media valid.
Returns value from homeassistant.util.dt.utcnow().
"""
return None
@property
def media_image_url(self):
"""Image url of current playing media."""
return None
@property
def media_image_hash(self):
"""Hash value for media image."""
url = self.media_image_url
if url is not None:
return hashlib.sha256(url.encode('utf-8')).hexdigest()[:16]
return None
@asyncio.coroutine
def async_get_media_image(self):
"""Fetch media image of current playing image."""
url = self.media_image_url
if url is None:
return None, None
return (yield from _async_fetch_image(self.hass, url))
@property
def media_title(self):
"""Title of current playing media."""
return None
@property
def media_artist(self):
"""Artist of current playing media, music track only."""
return None
@property
def media_album_name(self):
"""Album name of current playing media, music track only."""
return None
@property
def media_album_artist(self):
"""Album artist of current playing media, music track only."""
return None
@property
def media_track(self):
"""Track number of current playing media, music track only."""
return None
@property
def media_series_title(self):
"""Title of series of current playing media, TV show only."""
return None
@property
def media_season(self):
"""Season of current playing media, TV show only."""
return None
@property
def media_episode(self):
"""Episode of current playing media, TV show only."""
return None
@property
def media_channel(self):
"""Channel currently playing."""
return None
@property
def media_playlist(self):
"""Title of Playlist currently playing."""
return None
@property
def app_id(self):
"""ID of the current running app."""
return None
@property
def app_name(self):
"""Name of the current running app."""
return None
@property
def source(self):
"""Name of the current input source."""
return None
@property
def source_list(self):
"""List of available input sources."""
return None
@property
def shuffle(self):
"""Boolean if shuffle is enabled."""
return None
@property
def supported_features(self):
"""Flag media player features that are supported."""
return 0
def turn_on(self):
"""Turn the media player on."""
raise NotImplementedError()
def async_turn_on(self):
"""Turn the media player on.
This method must be run in the event loop and returns a coroutine.
"""
return self.hass.async_add_job(self.turn_on)
def turn_off(self):
"""Turn the media player off."""
raise NotImplementedError()
def async_turn_off(self):
"""Turn the media player off.
This method must be run in the event loop and returns a coroutine.
"""
return self.hass.async_add_job(self.turn_off)
def mute_volume(self, mute):
"""Mute the volume."""
raise NotImplementedError()
def async_mute_volume(self, mute):
"""Mute the volume.
This method must be run in the event loop and returns a coroutine.
"""
return self.hass.async_add_job(self.mute_volume, mute)
def set_volume_level(self, volume):
"""Set volume level, range 0..1."""
raise NotImplementedError()
def async_set_volume_level(self, volume):
"""Set volume level, range 0..1.
This method must be run in the event loop and returns a coroutine.
"""
return self.hass.async_add_job(self.set_volume_level, volume)
def media_play(self):
"""Send play command."""
raise NotImplementedError()
def async_media_play(self):
"""Send play command.
This method must be run in the event loop and returns a coroutine.
"""
return self.hass.async_add_job(self.media_play)
def media_pause(self):
"""Send pause command."""
raise NotImplementedError()
def async_media_pause(self):
"""Send pause command.
This method must be run in the event loop and returns a coroutine.
"""
return self.hass.async_add_job(self.media_pause)
def media_stop(self):
"""Send stop command."""
raise NotImplementedError()
def async_media_stop(self):
"""Send stop command.
This method must be run in the event loop and returns a coroutine.
"""
return self.hass.async_add_job(self.media_stop)
def media_previous_track(self):
"""Send previous track command."""
raise NotImplementedError()
def async_media_previous_track(self):
"""Send previous track command.
This method must be run in the event loop and returns a coroutine.
"""
return self.hass.async_add_job(self.media_previous_track)
def media_next_track(self):
"""Send next track command."""
raise NotImplementedError()
def async_media_next_track(self):
"""Send next track command.
This method must be run in the event loop and returns a coroutine.
"""
return self.hass.async_add_job(self.media_next_track)
def media_seek(self, position):
"""Send seek command."""
raise NotImplementedError()
def async_media_seek(self, position):
"""Send seek command.
This method must be run in the event loop and returns a coroutine.
"""
return self.hass.async_add_job(self.media_seek, position)
def play_media(self, media_type, media_id, **kwargs):
"""Play a piece of media."""
raise NotImplementedError()
def async_play_media(self, media_type, media_id, **kwargs):
"""Play a piece of media.
This method must be run in the event loop and returns a coroutine.
"""
return self.hass.async_add_job(
ft.partial(self.play_media, media_type, media_id, **kwargs))
def select_source(self, source):
"""Select input source."""
raise NotImplementedError()
def async_select_source(self, source):
"""Select input source.
This method must be run in the event loop and returns a coroutine.
"""
return self.hass.async_add_job(self.select_source, source)
def clear_playlist(self):
"""Clear players playlist."""
raise NotImplementedError()
def async_clear_playlist(self):
"""Clear players playlist.
This method must be run in the event loop and returns a coroutine.
"""
return self.hass.async_add_job(self.clear_playlist)
def set_shuffle(self, shuffle):
"""Enable/disable shuffle mode."""
raise NotImplementedError()
def async_set_shuffle(self, shuffle):
"""Enable/disable shuffle mode.
This method must be run in the event loop and returns a coroutine.
"""
return self.hass.async_add_job(self.set_shuffle, shuffle)
# No need to overwrite these.
@property
def support_play(self):
"""Boolean if play is supported."""
return bool(self.supported_features & SUPPORT_PLAY)
@property
def support_pause(self):
"""Boolean if pause is supported."""
return bool(self.supported_features & SUPPORT_PAUSE)
@property
def support_stop(self):
"""Boolean if stop is supported."""
return bool(self.supported_features & SUPPORT_STOP)
@property
def support_seek(self):
"""Boolean if seek is supported."""
return bool(self.supported_features & SUPPORT_SEEK)
@property
def support_volume_set(self):
"""Boolean if setting volume is supported."""
return bool(self.supported_features & SUPPORT_VOLUME_SET)
@property
def support_volume_mute(self):
"""Boolean if muting volume is supported."""
return bool(self.supported_features & SUPPORT_VOLUME_MUTE)
@property
def support_previous_track(self):
"""Boolean if previous track command supported."""
return bool(self.supported_features & SUPPORT_PREVIOUS_TRACK)
@property
def support_next_track(self):
"""Boolean if next track command supported."""
return bool(self.supported_features & SUPPORT_NEXT_TRACK)
@property
def support_play_media(self):
"""Boolean if play media command supported."""
return bool(self.supported_features & SUPPORT_PLAY_MEDIA)
@property
def support_select_source(self):
"""Boolean if select source command supported."""
return bool(self.supported_features & SUPPORT_SELECT_SOURCE)
@property
def support_clear_playlist(self):
"""Boolean if clear playlist command supported."""
return bool(self.supported_features & SUPPORT_CLEAR_PLAYLIST)
@property
def support_shuffle_set(self):
"""Boolean if shuffle is supported."""
return bool(self.supported_features & SUPPORT_SHUFFLE_SET)
def async_toggle(self):
"""Toggle the power on the media player.
This method must be run in the event loop and returns a coroutine.
"""
if hasattr(self, 'toggle'):
# pylint: disable=no-member
return self.hass.async_add_job(self.toggle)
if self.state in [STATE_OFF, STATE_IDLE]:
return self.async_turn_on()
return self.async_turn_off()
@asyncio.coroutine
def async_volume_up(self):
"""Turn volume up for media player.
This method is a coroutine.
"""
if hasattr(self, 'volume_up'):
# pylint: disable=no-member
yield from self.hass.async_add_job(self.volume_up)
return
if self.volume_level < 1:
yield from self.async_set_volume_level(
min(1, self.volume_level + .1))
@asyncio.coroutine
def async_volume_down(self):
"""Turn volume down for media player.
This method is a coroutine.
"""
if hasattr(self, 'volume_down'):
# pylint: disable=no-member
yield from self.hass.async_add_job(self.volume_down)
return
if self.volume_level > 0:
yield from self.async_set_volume_level(
max(0, self.volume_level - .1))
def async_media_play_pause(self):
"""Play or pause the media player.
This method must be run in the event loop and returns a coroutine.
"""
if hasattr(self, 'media_play_pause'):
# pylint: disable=no-member
return self.hass.async_add_job(self.media_play_pause)
if self.state == STATE_PLAYING:
return self.async_media_pause()
return self.async_media_play()
@property
def entity_picture(self):
"""Return image of the media playing."""
if self.state == STATE_OFF:
return None
image_hash = self.media_image_hash
if image_hash is None:
return None
return ENTITY_IMAGE_URL.format(
self.entity_id, self.access_token, image_hash)
@property
def state_attributes(self):
"""Return the state attributes."""
if self.state == STATE_OFF:
return None
state_attr = {
attr: getattr(self, attr) for attr
in ATTR_TO_PROPERTY if getattr(self, attr) is not None
}
return state_attr
def preload_media_image_url(self, url):
"""Preload and cache a media image for future use."""
run_coroutine_threadsafe(
_async_fetch_image(self.hass, url), self.hass.loop
).result()
@asyncio.coroutine
def _async_fetch_image(hass, url):
"""Fetch image.
Images are cached in memory (the images are typically 10-100kB in size).
"""
cache_images = ENTITY_IMAGE_CACHE[CACHE_IMAGES]
cache_maxsize = ENTITY_IMAGE_CACHE[CACHE_MAXSIZE]
if url not in cache_images:
cache_images[url] = {CACHE_LOCK: asyncio.Lock(loop=hass.loop)}
with (yield from cache_images[url][CACHE_LOCK]):
if CACHE_CONTENT in cache_images[url]:
return cache_images[url][CACHE_CONTENT]
content, content_type = (None, None)
websession = async_get_clientsession(hass)
try:
with async_timeout.timeout(10, loop=hass.loop):
response = yield from websession.get(url)
if response.status == 200:
content = yield from response.read()
content_type = response.headers.get(CONTENT_TYPE)
if content_type:
content_type = content_type.split(';')[0]
cache_images[url][CACHE_CONTENT] = content, content_type
except asyncio.TimeoutError:
pass
while len(cache_images) > cache_maxsize:
cache_images.popitem(last=False)
return content, content_type
class MediaPlayerImageView(HomeAssistantView):
"""Media player view to serve an image."""
requires_auth = False
url = '/api/media_player_proxy/{entity_id}'
name = 'api:media_player:image'
def __init__(self, entities):
"""Initialize a media player view."""
self.entities = entities
@asyncio.coroutine
def get(self, request, entity_id):
"""Start a get request."""
player = self.entities.get(entity_id)
if player is None:
status = 404 if request[KEY_AUTHENTICATED] else 401
return web.Response(status=status)
authenticated = (request[KEY_AUTHENTICATED] or
request.query.get('token') == player.access_token)
if not authenticated:
return web.Response(status=401)
data, content_type = yield from player.async_get_media_image()
if data is None:
return web.Response(status=500)
headers = {CACHE_CONTROL: 'max-age=3600'}
return web.Response(
body=data, content_type=content_type, headers=headers)
| |
"""
Dogleg algorithm with rectangular trust regions for least-squares minimization.
The description of the algorithm can be found in [Voglis]_. The algorithm does
trust-region iterations, but the shape of trust regions is rectangular as
opposed to conventional elliptical. The intersection of a trust region and
an initial feasible region is again some rectangle. Thus, on each iteration a
bound-constrained quadratic optimization problem is solved.
A quadratic problem is solved by well-known dogleg approach, where the
function is minimized along piecewise-linear "dogleg" path [NumOpt]_,
Chapter 4. If Jacobian is not rank-deficient then the function is decreasing
along this path, and optimization amounts to simply following along this
path as long as a point stays within the bounds. A constrained Cauchy step
(along the anti-gradient) is considered for safety in rank deficient cases,
in this situations the convergence might be slow.
If during iterations some variable hit the initial bound and the component
of anti-gradient points outside the feasible region, then a next dogleg step
won't make any progress. At this state such variables satisfy first-order
optimality conditions and they are excluded before computing a next dogleg
step.
Gauss-Newton step can be computed exactly by `numpy.linalg.lstsq` (for dense
Jacobian matrices) or by iterative procedure `scipy.sparse.linalg.lsmr` (for
dense and sparse matrices, or Jacobian being LinearOperator). The second
option allows to solve very large problems (up to couple of millions of
residuals on a regular PC), provided the Jacobian matrix is sufficiently
sparse. But note that dogbox is not very good for solving problems with
large number of constraints, because of variables exclusion-inclusion on each
iteration (a required number of function evaluations might be high or accuracy
of a solution will be poor), thus its large-scale usage is probably limited
to unconstrained problems.
References
----------
.. [Voglis] C. Voglis and I. E. Lagaris, "A Rectangular Trust Region Dogleg
Approach for Unconstrained and Bound Constrained Nonlinear
Optimization", WSEAS International Conference on Applied
Mathematics, Corfu, Greece, 2004.
.. [NumOpt] J. Nocedal and S. J. Wright, "Numerical optimization, 2nd edition".
"""
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.linalg import lstsq, norm
from scipy.sparse.linalg import LinearOperator, aslinearoperator, lsmr
from scipy.optimize import OptimizeResult
from .common import (
step_size_to_bound, in_bounds, update_tr_radius, evaluate_quadratic,
build_quadratic_1d, minimize_quadratic_1d, compute_grad,
compute_jac_scale, check_termination, scale_for_robust_loss_function,
print_header_nonlinear, print_iteration_nonlinear)
def lsmr_operator(Jop, d, active_set):
"""Compute LinearOperator to use in LSMR by dogbox algorithm.
`active_set` mask is used to excluded active variables from computations
of matrix-vector products.
"""
m, n = Jop.shape
def matvec(x):
x_free = x.ravel().copy()
x_free[active_set] = 0
return Jop.matvec(x * d)
def rmatvec(x):
r = d * Jop.rmatvec(x)
r[active_set] = 0
return r
return LinearOperator((m, n), matvec=matvec, rmatvec=rmatvec, dtype=float)
def find_intersection(x, tr_bounds, lb, ub):
"""Find intersection of trust-region bounds and initial bounds.
Returns
-------
lb_total, ub_total : ndarray with shape of x
Lower and upper bounds of the intersection region.
orig_l, orig_u : ndarray of bool with shape of x
True means that an original bound is taken as a corresponding bound
in the intersection region.
tr_l, tr_u : ndarray of bool with shape of x
True means that a trust-region bound is taken as a corresponding bound
in the intersection region.
"""
lb_centered = lb - x
ub_centered = ub - x
lb_total = np.maximum(lb_centered, -tr_bounds)
ub_total = np.minimum(ub_centered, tr_bounds)
orig_l = np.equal(lb_total, lb_centered)
orig_u = np.equal(ub_total, ub_centered)
tr_l = np.equal(lb_total, -tr_bounds)
tr_u = np.equal(ub_total, tr_bounds)
return lb_total, ub_total, orig_l, orig_u, tr_l, tr_u
def dogleg_step(x, newton_step, g, a, b, tr_bounds, lb, ub):
"""Find dogleg step in a rectangular region.
Returns
-------
step : ndarray, shape (n,)
Computed dogleg step.
bound_hits : ndarray of int, shape (n,)
Each component shows whether a corresponding variable hits the
initial bound after the step is taken:
* 0 - a variable doesn't hit the bound.
* -1 - lower bound is hit.
* 1 - upper bound is hit.
tr_hit : bool
Whether the step hit the boundary of the trust-region.
"""
lb_total, ub_total, orig_l, orig_u, tr_l, tr_u = find_intersection(
x, tr_bounds, lb, ub
)
bound_hits = np.zeros_like(x, dtype=int)
if in_bounds(newton_step, lb_total, ub_total):
return newton_step, bound_hits, False
to_bounds, _ = step_size_to_bound(np.zeros_like(x), -g, lb_total, ub_total)
# The classical dogleg algorithm would check if Cauchy step fits into
# the bounds, and just return it constrained version if not. But in a
# rectangular trust region it makes sense to try to improve constrained
# Cauchy step too. Thus, we don't distinguish these two cases.
cauchy_step = -minimize_quadratic_1d(a, b, 0, to_bounds)[0] * g
step_diff = newton_step - cauchy_step
step_size, hits = step_size_to_bound(cauchy_step, step_diff,
lb_total, ub_total)
bound_hits[(hits < 0) & orig_l] = -1
bound_hits[(hits > 0) & orig_u] = 1
tr_hit = np.any((hits < 0) & tr_l | (hits > 0) & tr_u)
return cauchy_step + step_size * step_diff, bound_hits, tr_hit
def dogbox(fun, jac, x0, f0, J0, lb, ub, ftol, xtol, gtol, max_nfev, x_scale,
loss_function, tr_solver, tr_options, verbose):
f = f0
f_true = f.copy()
nfev = 1
J = J0
njev = 1
if loss_function is not None:
rho = loss_function(f)
cost = 0.5 * np.sum(rho[0])
J, f = scale_for_robust_loss_function(J, f, rho)
else:
cost = 0.5 * np.dot(f, f)
g = compute_grad(J, f)
jac_scale = isinstance(x_scale, str) and x_scale == 'jac'
if jac_scale:
scale, scale_inv = compute_jac_scale(J)
else:
scale, scale_inv = x_scale, 1 / x_scale
Delta = norm(x0 * scale_inv, ord=np.inf)
if Delta == 0:
Delta = 1.0
on_bound = np.zeros_like(x0, dtype=int)
on_bound[np.equal(x0, lb)] = -1
on_bound[np.equal(x0, ub)] = 1
x = x0
step = np.empty_like(x0)
if max_nfev is None:
max_nfev = x0.size * 100
termination_status = None
iteration = 0
step_norm = None
actual_reduction = None
if verbose == 2:
print_header_nonlinear()
while True:
active_set = on_bound * g < 0
free_set = ~active_set
g_free = g[free_set]
g_full = g.copy()
g[active_set] = 0
g_norm = norm(g, ord=np.inf)
if g_norm < gtol:
termination_status = 1
if verbose == 2:
print_iteration_nonlinear(iteration, nfev, cost, actual_reduction,
step_norm, g_norm)
if termination_status is not None or nfev == max_nfev:
break
x_free = x[free_set]
lb_free = lb[free_set]
ub_free = ub[free_set]
scale_free = scale[free_set]
# Compute (Gauss-)Newton and build quadratic model for Cauchy step.
if tr_solver == 'exact':
J_free = J[:, free_set]
newton_step = lstsq(J_free, -f, rcond=-1)[0]
# Coefficients for the quadratic model along the anti-gradient.
a, b = build_quadratic_1d(J_free, g_free, -g_free)
elif tr_solver == 'lsmr':
Jop = aslinearoperator(J)
# We compute lsmr step in scaled variables and then
# transform back to normal variables, if lsmr would give exact lsq
# solution, this would be equivalent to not doing any
# transformations, but from experience it's better this way.
# We pass active_set to make computations as if we selected
# the free subset of J columns, but without actually doing any
# slicing, which is expensive for sparse matrices and impossible
# for LinearOperator.
lsmr_op = lsmr_operator(Jop, scale, active_set)
newton_step = -lsmr(lsmr_op, f, **tr_options)[0][free_set]
newton_step *= scale_free
# Components of g for active variables were zeroed, so this call
# is correct and equivalent to using J_free and g_free.
a, b = build_quadratic_1d(Jop, g, -g)
actual_reduction = -1.0
while actual_reduction <= 0 and nfev < max_nfev:
tr_bounds = Delta * scale_free
step_free, on_bound_free, tr_hit = dogleg_step(
x_free, newton_step, g_free, a, b, tr_bounds, lb_free, ub_free)
step.fill(0.0)
step[free_set] = step_free
if tr_solver == 'exact':
predicted_reduction = -evaluate_quadratic(J_free, g_free,
step_free)
elif tr_solver == 'lsmr':
predicted_reduction = -evaluate_quadratic(Jop, g, step)
x_new = x + step
f_new = fun(x_new)
nfev += 1
step_h_norm = norm(step * scale_inv, ord=np.inf)
if not np.all(np.isfinite(f_new)):
Delta = 0.25 * step_h_norm
continue
# Usual trust-region step quality estimation.
if loss_function is not None:
cost_new = loss_function(f_new, cost_only=True)
else:
cost_new = 0.5 * np.dot(f_new, f_new)
actual_reduction = cost - cost_new
Delta, ratio = update_tr_radius(
Delta, actual_reduction, predicted_reduction,
step_h_norm, tr_hit
)
step_norm = norm(step)
termination_status = check_termination(
actual_reduction, cost, step_norm, norm(x), ratio, ftol, xtol)
if termination_status is not None:
break
if actual_reduction > 0:
on_bound[free_set] = on_bound_free
x = x_new
# Set variables exactly at the boundary.
mask = on_bound == -1
x[mask] = lb[mask]
mask = on_bound == 1
x[mask] = ub[mask]
f = f_new
f_true = f.copy()
cost = cost_new
J = jac(x, f)
njev += 1
if loss_function is not None:
rho = loss_function(f)
J, f = scale_for_robust_loss_function(J, f, rho)
g = compute_grad(J, f)
if jac_scale:
scale, scale_inv = compute_jac_scale(J, scale_inv)
else:
step_norm = 0
actual_reduction = 0
iteration += 1
if termination_status is None:
termination_status = 0
return OptimizeResult(
x=x, cost=cost, fun=f_true, jac=J, grad=g_full, optimality=g_norm,
active_mask=on_bound, nfev=nfev, njev=njev, status=termination_status)
| |
import re, sys
class Symbol:
def __init__(self, nonterm, term=None, var=None):
assert not (term != None and var != None)
self.tag = nonterm
self.token = term
self.variable = var
def is_variable(self):
return self.variable != None
def __eq__(self, other):
return self.tag == other.tag and self.token == other.token and self.variable == other.variable
def __ne__(self, other):
return not (self == other)
def __hash__(self):
return hash((self.tag, self.token, self.variable))
def __repr__(self):
return str(self)
def __cmp__(self, other):
return cmp((self.tag, self.token, self.variable),
(other.tag, other.token, other.variable))
def __str__(self):
parts = []
if False: # DEPENDENCY
if self.token:
parts.append(str(self.token))
elif self.variable != None:
parts.append('#%d' % self.variable)
if self.tag:
parts.append(str(self.tag))
return '/'.join(parts)
else:
if self.tag:
parts.append(str(self.tag))
if self.token:
parts.append(str(self.token))
elif self.variable != None:
parts.append('#%d' % self.variable)
return ' '.join(parts)
class TreeNode:
def __init__(self, data, children=None, order=-1):
self.data = data
self.children = []
self.order = order
self.parent = None
if children: self.children = children
def insert(self, child):
self.children.append(child)
child.parent = self
def leaves(self):
ls = []
for node in self.xtraversal():
if not node.children:
ls.append(node.data)
return ls
def leaf_nodes(self):
ls = []
for node in self.xtraversal():
if not node.children:
ls.append(node)
return ls
def max_depth(self):
d = 1
for child in self.children:
d = max(d, 1 + child.max_depth())
if not self.children and self.data.token:
d = 2
return d
def max_width(self):
w = 0
for child in self.children:
w += child.max_width()
return max(1, w)
def num_internal_nodes(self):
if self.children:
n = 1
for child in self.children:
n += child.num_internal_nodes()
return n
elif self.data.token:
return 1
else:
return 0
def postorder_traversal(self, visit):
"""
Postorder traversal; no guarantee that terminals will be read in the
correct order for dep. trees.
"""
for child in self.children:
child.traversal(visit)
visit(self)
def traversal(self, visit):
"""
Preorder for phrase structure trees, and inorder for dependency trees.
In both cases the terminals will be read off in the correct order.
"""
visited_self = False
if self.order <= 0:
visited_self = True
visit(self)
for i, child in enumerate(self.children):
child.traversal(visit)
if i + 1 == self.order:
visited_self = True
visit(self)
assert visited_self
def xpostorder_traversal(self):
for child in self.children:
for node in child.xpostorder_traversal():
yield node
yield self
def xtraversal(self):
visited_self = False
if self.order <= 0:
visited_self = True
yield self
for i, child in enumerate(self.children):
for d in child.xtraversal():
yield d
if i + 1 == self.order:
visited_self = True
yield self
assert visited_self
def xpostorder_traversal(self):
for i, child in enumerate(self.children):
for d in child.xpostorder_traversal():
yield d
yield self
def edges(self):
es = []
self.traverse_edges(lambda h,c: es.append((h,c)))
return es
def traverse_edges(self, visit):
for child in self.children:
visit(self.data, child.data)
child.traverse_edges(visit)
def subtrees(self, include_self=False):
st = []
if include_self:
stack = [self]
else:
stack = self.children[:]
while stack:
node = stack.pop()
st.append(node)
stack.extend(node.children)
return st
def find_parent(self, node):
try:
index = self.children.index(node)
return self, index
except ValueError:
for child in self.children:
if isinstance(child, TreeNode):
r = child.find_parent(node)
if r: return r
return None
def is_ancestor_of(self, node):
if self == node:
return True
for child in self.children:
if child.is_ancestor_of(child):
return True
return False
def find(self, node):
if self == node:
return self
for child in self.children:
if isinstance(child, TreeNode):
r = child.find(node)
if r: return r
else:
if child == node:
return r
return None
def equals_ignorecase(self, other):
if not isinstance(other, TreeNode):
return False
if self.data != other.data:
return False
if len(self.children) != len(other.children):
return False
for mc, oc in zip(self.children, other.children):
if isinstance(mc, TreeNode):
if not mc.equals_ignorecase(oc):
return False
else:
if mc.lower() != oc.lower():
return False
return True
def node_number(self, numbering, next=0):
if self.order <= 0:
numbering[id(self)] = next
next += 1
for i, child in enumerate(self.children):
next = child.node_number(numbering, next)
if i + 1 == self.order:
numbering[id(self)] = next
next += 1
return next
def display_conll(self, out):
numbering = {}
self.node_number(numbering)
next = 0
self.children[0].traversal(lambda x: \
out.write('%d\t%s\t%s\t%s\t%s\t_\t%d\tLAB\n' \
% (numbering[id(x)], x.data.token, x.data.token,
x.data.tag, x.data.tag, numbering[id(x.parent)])))
out.write('\n')
def size(self):
sz = 1
for child in self.children:
sz += child.size()
return sz
def __eq__(self, other):
if isinstance(other, TreeNode) and self.data == other.data \
and self.children == other.children:
return True
return False
def __cmp__(self, other):
if not isinstance(other, TreeNode): return 1
n = cmp(self.data, other.data)
if n != 0: return n
n = len(self.children) - len(other.children)
if n != 0: return n
for sc, oc in zip(self.children, other.children):
n = cmp(sc, oc)
if n != 0: return n
return 0
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash((self.data, tuple(self.children)))
def __repr__(self):
return str(self)
def __str__(self):
s = '('
space = False
if self.order <= 0:
s += str(self.data)
space = True
for i, child in enumerate(self.children):
if space: s += ' '
s += str(child)
space = True
if i+1 == self.order:
s += ' ' + str(self.data)
return s + ')'
def read_PSTs(fname):
infile = open(fname)
trees = []
for line in infile:
trees.append(parse_PST(line.strip()))
infile.close()
return trees
def parse_PST_multiline(infile, hash_is_var=True):
buf = ''
num_open = 0
while True:
line = infile.readline()
if not line:
return None
buf += ' ' + line.rstrip()
num_open += line.count('(') - line.count(')')
if num_open == 0:
break
return parse_PST(buf, hash_is_var)
def parse_PST(line, hash_is_var=True):
line = line.rstrip()
if not line or line.lower() == 'null':
return None
# allow either (a/DT) or (DT a)
#parts_re = re.compile(r'(\(*)([^/)]*)(?:/([^)]*))?(\)*)$')
# only allow (DT a)
parts_re = re.compile(r'(\(*)([^)]*)(\)*)$')
root = TreeNode(Symbol('TOP'))
stack = [root]
for part in line.rstrip().split():
m = parts_re.match(part)
#opening, tok_or_tag, tag, closing = m.groups()
opening, tok_or_tag, closing = m.groups()
tag = None
#print 'token', part, 'bits', m.groups()
for i in opening:
node = TreeNode(Symbol(None))
stack[-1].insert(node)
stack.append(node)
if tag:
stack[-1].data.tag = tag
if hash_is_var and tok_or_tag.startswith('#'):
stack[-1].data.variable = int(tok_or_tag[1:])
else:
stack[-1].data.token = tok_or_tag
else:
if stack[-1].data.tag == None:
stack[-1].data.tag = tok_or_tag
else:
if hash_is_var and tok_or_tag.startswith('#'):
try:
stack[-1].data.variable = int(tok_or_tag[1:])
except ValueError: # it's really a token!
#print >>sys.stderr, 'Warning: # used for token:', tok_or_tag
stack[-1].data.token = tok_or_tag
else:
stack[-1].data.token = tok_or_tag
for i in closing:
stack.pop()
#assert str(root.children[0]) == line
return root.children[0]
def read_DTs(fname):
infile = open(fname)
trees = []
while True:
t = parse_DT(infile)
if t: trees.append(t)
else: break
infile.close()
return trees
def read_bracketed_DTs(fname):
infile = open(fname)
trees = []
for line in infile:
trees.append(parse_bracketed_DT(line))
infile.close()
return trees
def parse_DT(infile):
tokens = [Symbol('ROOT')]
children = {}
for line in infile:
parts = line.rstrip().split()
#print parts
if not parts: break
index = len(tokens)
token = parts[1]
tag = parts[3]
parent = int(parts[6])
if token.startswith('#'):
tokens.append(Symbol(tag, var=int(token[1:])))
else:
tokens.append(Symbol(tag, token))
children.setdefault(parent, set()).add(index)
if len(tokens) == 1: return None
root = TreeNode(Symbol('ROOT'), [], 0)
schedule = []
for child in sorted(children[0]):
schedule.append((root, child))
while schedule:
parent, index = schedule[0]
del schedule[0]
node = TreeNode(tokens[index])
node.order = 0
parent.insert(node)
for child in sorted(children.get(index, [])):
schedule.append((node, child))
if child < index:
node.order += 1
return root
_bracket_split_re = re.compile(r'([(]*)([^)/]*)(?:/([^)]*))?([)]*)')
def parse_bracketed_DT(line, insert_root=True):
line = line.rstrip()
if not line or line == 'NULL': return None
#print line
root = TreeNode(Symbol('ROOT'))
stack = [root]
for part in line.rstrip().split():
m = _bracket_split_re.match(part)
for c in m.group(1):
node = TreeNode(Symbol(None))
stack[-1].insert(node)
stack.append(node)
if m.group(3) != None:
if m.group(2).startswith('#'):
stack[-1].data.variable = int(m.group(2)[1:])
else:
stack[-1].data.token = m.group(2)
stack[-1].data.tag = m.group(3)
else:
stack[-1].data.tag = m.group(2)
stack[-1].order = len(stack[-1].children)
# FIXME: also check for vars
for c in m.group(4):
stack.pop()
assert len(stack) == 1
if not insert_root or root.children[0].data.tag == 'ROOT':
return root.children[0]
else:
return root
_bracket_split_notag_re = re.compile(r'([(]*)([^)/]*)([)]*)')
def parse_bracketed_untagged_DT(line):
line = line.rstrip()
if not line or line == 'NULL': return None
root = TreeNode(Symbol('TOP'))
stack = [root]
for part in line.rstrip().split():
m = _bracket_split_notag_re.match(part)
for c in m.group(1):
node = TreeNode(Symbol(None))
stack[-1].insert(node)
stack.append(node)
if stack[-1].data.token == None:
stack[-1].data.token = m.group(2)
stack[-1].order = len(stack[-1].children)
else:
child = TreeNode(Symbol(nonterm=None, term=m.group(2)))
stack[-1].insert(child)
for c in m.group(3):
stack.pop()
return root.children[0]
| |
# Copyright (C) Mesosphere, Inc. See LICENSE file for details.
import copy
import logging
import os
from contextlib import contextmanager
import requests
from mocker.endpoints.mesos import AGENT1_ID
from util import LineBufferFilter
log = logging.getLogger(__name__)
def ping_mesos_agent(ar,
auth_header,
endpoint_id='http://127.0.0.2:15001',
expect_status=200,
agent_id=AGENT1_ID,
timeout=60,
):
"""Test if agent is reachable or not
Helper function meant to simplify checking mesos agent reachability/mesos
agent related testing.
Arguments:
ar: Admin Router object, an instance of runner.(ee|open).Nginx
auth_header (dict): headers dict that contains JWT. The auth data it
contains is invalid.
expect_status (int): HTTP status to expect
endpoint_id (str): if expect_status==200 - id of the endpoint that
should respoind to the request
agent_id (str): id of the agent to ping
"""
url = ar.make_url_from_path('/agent/{}/blah/blah'.format(agent_id))
resp = requests.get(url,
allow_redirects=False,
headers=auth_header,
timeout=timeout)
assert resp.status_code == expect_status
if expect_status == 200:
req_data = resp.json()
assert req_data['endpoint_id'] == endpoint_id
def generic_no_slash_redirect_test(ar, path, code=301):
"""Test if request for location without trailing slash is redirected
Helper function meant to simplify writing multiple tests testing the
same thing for different endpoints.
Arguments:
ar: Admin Router object, an instance of runner.(ee|open).Nginx
path (str): path for which request should be made
code (int): expected http redirect code
"""
url = ar.make_url_from_path(path)
r = requests.get(url, allow_redirects=False)
assert r.status_code == code
assert r.headers['Location'] == url + '/'
def generic_response_headers_verify_test(
ar, auth_header, path, assert_headers=None, assert_headers_absent=None):
"""Test if response sent by AR is correct
Helper function meant to simplify writing multiple tests testing the
same thing for different endpoints.
Arguments:
ar: Admin Router object, an instance of runner.(ee|open).Nginx
auth_header (dict): headers dict that contains JWT. The auth data it
contains is valid and the request should be accepted.
path (str): path for which request should be made
assert_headers (dict): additional headers to test where key is the
asserted header name and value is expected value
assert_headers_absent (dict): headers that *MUST NOT* be present in the
upstream request
"""
url = ar.make_url_from_path(path)
resp = requests.get(url,
allow_redirects=False,
headers=auth_header)
assert resp.status_code == 200
if assert_headers is not None:
for name, value in assert_headers.items():
verify_header(resp.headers.items(), name, value)
if assert_headers_absent is not None:
for name in assert_headers_absent:
header_is_absent(resp.headers.items(), name)
def generic_upstream_headers_verify_test(
ar, auth_header, path, assert_headers=None, assert_headers_absent=None):
"""Test if headers sent upstream are correct
Helper function meant to simplify writing multiple tests testing the
same thing for different endpoints.
Arguments:
ar: Admin Router object, an instance of runner.(ee|open).Nginx
auth_header (dict): headers dict that contains JWT. The auth data it
contains is valid and the request should be accepted.
path (str): path for which request should be made
assert_headers (dict): additional headers to test where key is the
asserted header name and value is expected value
assert_headers_absent (dict): headers that *MUST NOT* be present in the
upstream request
"""
url = ar.make_url_from_path(path)
resp = requests.get(url,
allow_redirects=False,
headers=auth_header)
assert resp.status_code == 200
req_data = resp.json()
verify_header(req_data['headers'], 'X-Forwarded-For', '127.0.0.1')
verify_header(req_data['headers'], 'X-Forwarded-Proto', 'http')
verify_header(req_data['headers'], 'X-Real-IP', '127.0.0.1')
if assert_headers is not None:
for name, value in assert_headers.items():
verify_header(req_data['headers'], name, value)
if assert_headers_absent is not None:
for name in assert_headers_absent:
header_is_absent(req_data['headers'], name)
def generic_correct_upstream_dest_test(ar, auth_header, path, endpoint_id):
"""Test if upstream request has been sent to correct upstream
Helper function meant to simplify writing multiple tests testing the
same thing for different endpoints.
Arguments:
ar: Admin Router object, an instance of runner.(ee|open).Nginx
auth_header (dict): headers dict that contains JWT. The auth data it
contains is valid and the request should be accepted.
path (str): path for which request should be made
endpoint_id (str): id of the endpoint where the upstream request should
have been sent
"""
url = ar.make_url_from_path(path)
resp = requests.get(url,
allow_redirects=False,
headers=auth_header)
assert resp.status_code == 200
req_data = resp.json()
assert req_data['endpoint_id'] == endpoint_id
def generic_correct_upstream_request_test(
ar, auth_header, given_path, expected_path, http_ver='HTTP/1.0'):
"""Test if path component of the request sent upstream is correct.
Helper function meant to simplify writing multiple tests testing the
same thing for different endpoints.
Arguments:
ar: Admin Router object, an instance of runner.(ee|open).Nginx
auth_header (dict): headers dict that contains JWT. The auth data it
contains is valid and the request should be accepted.
given_path (str): path for which request should be made
expected_path (str): path that is expected to be sent to upstream
http_ver (str): http version string that the upstream request should be
made with
"""
h = copy.deepcopy(auth_header)
if http_ver == 'HTTP/1.1':
# In case of HTTP/1.1 connections, we also need to test if Connection
# header is cleared.
h['Connection'] = 'close'
elif http_ver == 'websockets':
h['Connection'] = 'close'
h['Upgrade'] = 'Websockets'
url = ar.make_url_from_path(given_path)
resp = requests.get(url,
allow_redirects=False,
headers=h)
assert resp.status_code == 200
req_data = resp.json()
assert req_data['method'] == 'GET'
assert req_data['path'] == expected_path
if http_ver == 'HTTP/1.1':
header_is_absent(req_data['headers'], 'Connection')
assert req_data['request_version'] == 'HTTP/1.1'
elif http_ver == 'websockets':
verify_header(req_data['headers'], 'Connection', 'upgrade')
verify_header(req_data['headers'], 'Upgrade', 'Websockets')
assert req_data['request_version'] == 'HTTP/1.1'
else:
assert req_data['request_version'] == http_ver
def generic_location_header_during_redirect_is_adjusted_test(
ar,
mocker,
auth_header,
endpoint_id,
basepath,
location_set,
location_expected,
):
mocker.send_command(endpoint_id=endpoint_id,
func_name='always_redirect',
aux_data=location_set)
url = ar.make_url_from_path(basepath)
r = requests.get(url, allow_redirects=False, headers=auth_header)
assert r.status_code == 307
assert r.headers['Location'] == location_expected
def header_is_absent(headers, header_name):
"""Test if given header is present in the request headers list
Arguments:
headers (list): list of tuples containing all the headers present in
the reflected request data
header_name (string): name of the header that should not be present/must
not be set.
Raises:
AssertionError: header with the name "header_name" was found in
supplied header list.
"""
for header in headers:
assert header[0] != header_name
def verify_header(headers, header_name, header_value):
"""Asserts that particular header exists and has correct value.
Helper function for checking if header with given name has been defined
with correct value in given headers list. The headers list is in format
defined by requests module.
Presence of more than one header with given name or incorrect value raises
assert statement.
Args:
header_name (str): header name to seek
header_value (str): expected value of the header
headers (obj: [('h1', 'v1'), ('h2', 'v2'), ...]): a list of header
name-val tuples
Raises:
AssertionError: header has not been found, there is more than one header
with given name or header has incorrect value
"""
matching_headers = list()
for header in headers:
if header[0] == header_name:
matching_headers.append(header)
# Hmmm....
if len(matching_headers) != 1:
if len(matching_headers) == 0:
msg = "Header `{}` has not been found".format(header_name)
elif len(matching_headers) > 1:
msg = "More than one `{}` header has been found".format(header_name)
assert len(matching_headers) == 1, msg
assert matching_headers[0][1] == header_value
def assert_endpoint_response(
ar,
path,
code,
assert_stderr=None,
headers=None,
cookies=None,
assertions=None
):
"""Asserts response code and log messages in Admin Router stderr for
request against specified path.
Arguments:
ar (Nginx): Running instance of the AR
code (int): Expected response code
assert_stderr (dict): LineBufferFilter compatible definition of messages
to assert
cookies (dict): Optionally provide request cookies
headers (dict): Optionally provide request headers
assertions (List[lambda r]) Optionally provide additional assertions
for the response
"""
def body():
r = requests.get(
ar.make_url_from_path(path),
headers=headers,
cookies=cookies,
)
assert r.status_code == code
if assertions:
for func in assertions:
assert func(r)
if assert_stderr is not None:
lbf = LineBufferFilter(assert_stderr, line_buffer=ar.stderr_line_buffer)
with lbf:
body()
assert lbf.extra_matches == {}
else:
body()
@contextmanager
def overriden_file_content(file_path, new_content=None):
"""Context manager meant to simplify static files testsing
While inside the context, file can be modified and/or modified content
may be injected by the context manager itself. Right after context is
exited, the original file contents are restored.
Arguments:
file_path: path the the file that should be "guarded"
new_content: new content for the file. If None - file contents are not
changed, "string" objects are translated to binary blob first,
assuming utf-8 encoding.
"""
if new_content is not None and not isinstance(new_content, bytes):
new_content = new_content.encode('utf-8')
with open(file_path, 'rb+') as fh:
old_content = fh.read()
if new_content is not None:
fh.seek(0)
fh.write(new_content)
fh.truncate()
yield
with open(file_path, 'wb') as fh:
fh.write(old_content)
def repo_is_ee():
"""Determine the flavour of the repository
Return:
True if repository is EE
"""
cur_dir = os.path.dirname(__file__)
ee_tests_dir = os.path.abspath(os.path.join(cur_dir, "..", "..", "tests", "ee"))
open_tests_dir = os.path.abspath(os.path.join(cur_dir, "..", "..", "tests", "open"))
is_ee = os.path.isdir(ee_tests_dir) and not os.path.isdir(open_tests_dir)
is_open = os.path.isdir(open_tests_dir) and not os.path.isdir(ee_tests_dir)
assert is_ee or is_open, "Unable to determine the variant of the repo"
return is_ee
| |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import traceback
import time
import util
import sys
from prometheus_client import Counter, Summary, Gauge
from ratelimit import limits, RateLimitException
# We keep track of the following metrics
class Stats(object):
metrics_label_names = ['tenant', 'namespace', 'name', 'instance_id', 'cluster', 'fqfn']
exception_metrics_label_names = metrics_label_names + ['error', 'ts']
PULSAR_FUNCTION_METRICS_PREFIX = "pulsar_function_"
USER_METRIC_PREFIX = "user_metric_";
TOTAL_SUCCESSFULLY_PROCESSED = 'processed_successfully_total'
TOTAL_SYSTEM_EXCEPTIONS = 'system_exceptions_total'
TOTAL_USER_EXCEPTIONS = 'user_exceptions_total'
PROCESS_LATENCY_MS = 'process_latency_ms'
LAST_INVOCATION = 'last_invocation'
TOTAL_RECEIVED = 'received_total'
TOTAL_SUCCESSFULLY_PROCESSED_1min = 'processed_successfully_total_1min'
TOTAL_SYSTEM_EXCEPTIONS_1min = 'system_exceptions_total_1min'
TOTAL_USER_EXCEPTIONS_1min = 'user_exceptions_total_1min'
PROCESS_LATENCY_MS_1min = 'process_latency_ms_1min'
TOTAL_RECEIVED_1min = 'received_total_1min'
# Declare Prometheus
stat_total_processed_successfully = Counter(PULSAR_FUNCTION_METRICS_PREFIX + TOTAL_SUCCESSFULLY_PROCESSED,
'Total number of messages processed successfully.', metrics_label_names)
stat_total_sys_exceptions = Counter(PULSAR_FUNCTION_METRICS_PREFIX+ TOTAL_SYSTEM_EXCEPTIONS, 'Total number of system exceptions.',
metrics_label_names)
stat_total_user_exceptions = Counter(PULSAR_FUNCTION_METRICS_PREFIX + TOTAL_USER_EXCEPTIONS, 'Total number of user exceptions.',
metrics_label_names)
stat_process_latency_ms = Summary(PULSAR_FUNCTION_METRICS_PREFIX + PROCESS_LATENCY_MS, 'Process latency in milliseconds.', metrics_label_names)
stat_last_invocation = Gauge(PULSAR_FUNCTION_METRICS_PREFIX + LAST_INVOCATION, 'The timestamp of the last invocation of the function.', metrics_label_names)
stat_total_received = Counter(PULSAR_FUNCTION_METRICS_PREFIX + TOTAL_RECEIVED, 'Total number of messages received from source.', metrics_label_names)
# 1min windowed metrics
stat_total_processed_successfully_1min = Counter(PULSAR_FUNCTION_METRICS_PREFIX + TOTAL_SUCCESSFULLY_PROCESSED_1min,
'Total number of messages processed successfully in the last 1 minute.', metrics_label_names)
stat_total_sys_exceptions_1min = Counter(PULSAR_FUNCTION_METRICS_PREFIX + TOTAL_SYSTEM_EXCEPTIONS_1min,
'Total number of system exceptions in the last 1 minute.',
metrics_label_names)
stat_total_user_exceptions_1min = Counter(PULSAR_FUNCTION_METRICS_PREFIX + TOTAL_USER_EXCEPTIONS_1min,
'Total number of user exceptions in the last 1 minute.',
metrics_label_names)
stat_process_latency_ms_1min = Summary(PULSAR_FUNCTION_METRICS_PREFIX + PROCESS_LATENCY_MS_1min,
'Process latency in milliseconds in the last 1 minute.', metrics_label_names)
stat_total_received_1min = Counter(PULSAR_FUNCTION_METRICS_PREFIX + TOTAL_RECEIVED_1min,
'Total number of messages received from source in the last 1 minute.', metrics_label_names)
# exceptions
user_exceptions = Gauge(PULSAR_FUNCTION_METRICS_PREFIX + 'user_exception', 'Exception from user code.', exception_metrics_label_names)
system_exceptions = Gauge(PULSAR_FUNCTION_METRICS_PREFIX + 'system_exception', 'Exception from system code.', exception_metrics_label_names)
latest_user_exception = []
latest_sys_exception = []
def __init__(self, metrics_labels):
self.metrics_labels = metrics_labels;
self.process_start_time = None
# as optimization
self._stat_total_processed_successfully = self.stat_total_processed_successfully.labels(*self.metrics_labels)
self._stat_total_sys_exceptions = self.stat_total_sys_exceptions.labels(*self.metrics_labels)
self._stat_total_user_exceptions = self.stat_total_user_exceptions.labels(*self.metrics_labels)
self._stat_process_latency_ms = self.stat_process_latency_ms.labels(*self.metrics_labels)
self._stat_last_invocation = self.stat_last_invocation.labels(*self.metrics_labels)
self._stat_total_received = self.stat_total_received.labels(*self.metrics_labels)
self._stat_total_processed_successfully_1min = self.stat_total_processed_successfully_1min.labels(*self.metrics_labels)
self._stat_total_sys_exceptions_1min = self.stat_total_sys_exceptions_1min.labels(*self.metrics_labels)
self._stat_total_user_exceptions_1min = self.stat_total_user_exceptions_1min.labels(*self.metrics_labels)
self._stat_process_latency_ms_1min = self.stat_process_latency_ms_1min.labels(*self.metrics_labels)
self._stat_total_received_1min = self.stat_total_received_1min.labels(*self.metrics_labels)
# start time for windowed metrics
util.FixedTimer(60, self.reset, name="windowed-metrics-timer").start()
def get_total_received(self):
return self._stat_total_received._value.get();
def get_total_processed_successfully(self):
return self._stat_total_processed_successfully._value.get();
def get_total_sys_exceptions(self):
return self._stat_total_sys_exceptions._value.get();
def get_total_user_exceptions(self):
return self._stat_total_user_exceptions._value.get();
def get_avg_process_latency(self):
process_latency_ms_count = self._stat_process_latency_ms._count.get()
process_latency_ms_sum = self._stat_process_latency_ms._sum.get()
return 0.0 \
if process_latency_ms_count <= 0.0 \
else process_latency_ms_sum / process_latency_ms_count
def get_total_processed_successfully_1min(self):
return self._stat_total_processed_successfully_1min._value.get()
def get_total_sys_exceptions_1min(self):
return self._stat_total_sys_exceptions_1min._value.get()
def get_total_user_exceptions_1min(self):
return self._stat_total_user_exceptions_1min._value.get()
def get_total_received_1min(self):
return self._stat_total_received_1min._value.get()
def get_avg_process_latency_1min(self):
process_latency_ms_count = self._stat_process_latency_ms_1min._count.get()
process_latency_ms_sum = self._stat_process_latency_ms_1min._sum.get()
return 0.0 \
if process_latency_ms_count <= 0.0 \
else process_latency_ms_sum / process_latency_ms_count
def get_last_invocation(self):
return self._stat_last_invocation._value.get()
def incr_total_processed_successfully(self):
self._stat_total_processed_successfully.inc()
self._stat_total_processed_successfully_1min.inc()
def incr_total_sys_exceptions(self, exception):
self._stat_total_sys_exceptions.inc()
self._stat_total_sys_exceptions_1min.inc()
self.add_sys_exception(exception)
def incr_total_user_exceptions(self, exception):
self._stat_total_user_exceptions.inc()
self._stat_total_user_exceptions_1min.inc()
self.add_user_exception(exception)
def incr_total_received(self):
self._stat_total_received.inc()
self._stat_total_received_1min.inc()
def process_time_start(self):
self.process_start_time = time.time();
def process_time_end(self):
if self.process_start_time:
duration = (time.time() - self.process_start_time) * 1000.0
self._stat_process_latency_ms.observe(duration)
self._stat_process_latency_ms_1min.observe(duration)
def set_last_invocation(self, time):
self._stat_last_invocation.set(time * 1000.0)
def add_user_exception(self, exception):
error = traceback.format_exc()
ts = int(time.time() * 1000) if sys.version_info.major >= 3 else long(time.time() * 1000)
self.latest_user_exception.append((error, ts))
if len(self.latest_user_exception) > 10:
self.latest_user_exception.pop(0)
# report exception via prometheus
try:
self.report_user_exception_prometheus(exception, ts)
except RateLimitException:
pass
@limits(calls=5, period=60)
def report_user_exception_prometheus(self, exception, ts):
exception_metric_labels = self.metrics_labels + [str(exception), str(ts)]
self.user_exceptions.labels(*exception_metric_labels).set(1.0)
def add_sys_exception(self, exception):
error = traceback.format_exc()
ts = int(time.time() * 1000) if sys.version_info.major >= 3 else long(time.time() * 1000)
self.latest_sys_exception.append((error, ts))
if len(self.latest_sys_exception) > 10:
self.latest_sys_exception.pop(0)
# report exception via prometheus
try:
self.report_system_exception_prometheus(exception, ts)
except RateLimitException:
pass
@limits(calls=5, period=60)
def report_system_exception_prometheus(self, exception, ts):
exception_metric_labels = self.metrics_labels + [str(exception), str(ts)]
self.system_exceptions.labels(*exception_metric_labels).set(1.0)
def reset(self):
self._stat_total_processed_successfully_1min._value.set(0.0)
self._stat_total_user_exceptions_1min._value.set(0.0)
self._stat_total_sys_exceptions_1min._value.set(0.0)
self._stat_process_latency_ms_1min._sum.set(0.0)
self._stat_process_latency_ms_1min._count.set(0.0)
self._stat_total_received_1min._value.set(0.0)
| |
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import unittest
from libcloud.utils.py3 import httplib
try:
import simplejson as json
except ImportError:
import json
from libcloud.common.types import InvalidCredsError
from libcloud.compute.drivers.gridspot import GridspotNodeDriver
from libcloud.compute.types import NodeState
from libcloud.test import MockHttp
from libcloud.test.compute import TestCaseMixin
from libcloud.test.secrets import GRIDSPOT_PARAMS
class GridspotTest(unittest.TestCase, TestCaseMixin):
def setUp(self):
GridspotNodeDriver.connectionCls.conn_class = GridspotMockHttp
GridspotMockHttp.type = None
self.driver = GridspotNodeDriver(*GRIDSPOT_PARAMS)
def test_invalid_creds(self):
"""
Tests the error-handling for passing a bad API Key to the Gridspot API
"""
GridspotMockHttp.type = "BAD_AUTH"
self.assertRaises(InvalidCredsError, self.driver.list_nodes)
def test_list_nodes(self):
nodes = self.driver.list_nodes()
self.assertEqual(len(nodes), 2)
running_node = nodes[0]
starting_node = nodes[1]
self.assertEqual(running_node.id, "inst_CP2WrQi2WIS4iheyAVkQYw")
self.assertEqual(running_node.state, NodeState.RUNNING)
self.assertTrue("69.4.239.74" in running_node.public_ips)
self.assertEqual(running_node.extra["port"], 62394)
self.assertEqual(running_node.extra["vm_ram"], 1429436743)
self.assertEqual(running_node.extra["start_state_time"], 1342108905)
self.assertEqual(running_node.extra["vm_num_logical_cores"], 8)
self.assertEqual(running_node.extra["vm_num_physical_cores"], 4)
self.assertEqual(
running_node.extra["winning_bid_id"], "bid_X5xhotGYiGUk7_RmIqVafA"
)
self.assertFalse("ended_state_time" in running_node.extra)
self.assertEqual(running_node.extra["running_state_time"], 1342108989)
self.assertEqual(starting_node.id, "inst_CP2WrQi2WIS4iheyAVkQYw2")
self.assertEqual(starting_node.state, NodeState.PENDING)
self.assertTrue("69.4.239.74" in starting_node.public_ips)
self.assertEqual(starting_node.extra["port"], 62395)
self.assertEqual(starting_node.extra["vm_ram"], 1429436744)
self.assertEqual(starting_node.extra["start_state_time"], 1342108906)
self.assertEqual(starting_node.extra["vm_num_logical_cores"], 7)
self.assertEqual(starting_node.extra["vm_num_physical_cores"], 5)
self.assertEqual(
starting_node.extra["winning_bid_id"], "bid_X5xhotGYiGUk7_RmIqVafA1"
)
self.assertFalse("ended_state_time" in starting_node.extra)
self.assertEqual(starting_node.extra["running_state_time"], 1342108990)
def test_create_node(self):
"""
Gridspot does not implement this functionality
"""
pass
def test_destroy_node(self):
"""
Test destroy_node for Gridspot driver
"""
node = self.driver.list_nodes()[0]
self.assertTrue(self.driver.destroy_node(node))
def test_destroy_node_failure(self):
"""
Gridspot does not fail a destroy node unless the parameters are bad, in
which case it 404s
"""
pass
def test_reboot_node(self):
"""
Gridspot does not implement this functionality
"""
pass
def test_reboot_node_failure(self):
"""
Gridspot does not implement this functionality
"""
pass
def test_resize_node(self):
"""
Gridspot does not implement this functionality
"""
pass
def test_reboot_node_response(self):
"""
Gridspot does not implement this functionality
"""
pass
def test_list_images_response(self):
"""
Gridspot does not implement this functionality
"""
pass
def test_create_node_response(self):
"""
Gridspot does not implement this functionality
"""
pass
def test_destroy_node_response(self):
"""
Gridspot does not implement this functionality
"""
pass
def test_list_sizes_response(self):
"""
Gridspot does not implement this functionality
"""
pass
def test_resize_node_failure(self):
"""
Gridspot does not implement this functionality
"""
pass
def test_list_images(self):
"""
Gridspot does not implement this functionality
"""
pass
def test_list_sizes(self):
"""
Gridspot does not implement this functionality
"""
pass
def test_list_locations(self):
"""
Gridspot does not implement this functionality
"""
pass
def test_list_locations_response(self):
"""
Gridspot does not implement this functionality
"""
pass
class GridspotMockHttp(MockHttp):
def _compute_api_v1_list_instances_BAD_AUTH(self, method, url, body, headers):
return (httplib.NOT_FOUND, "", {}, httplib.responses[httplib.NOT_FOUND])
def _compute_api_v1_list_instances(self, method, url, body, headers):
body = json.dumps(
{
"instances": [
{
"instance_id": "inst_CP2WrQi2WIS4iheyAVkQYw",
"vm_num_logical_cores": 8,
"vm_num_physical_cores": 4,
"winning_bid_id": "bid_X5xhotGYiGUk7_RmIqVafA",
"vm_ram": 1429436743,
"start_state_time": 1342108905,
"vm_ssh_wan_ip_endpoint": "69.4.239.74:62394",
"current_state": "Running",
"ended_state_time": "null",
"running_state_time": 1342108989,
},
{
"instance_id": "inst_CP2WrQi2WIS4iheyAVkQYw2",
"vm_num_logical_cores": 7,
"vm_num_physical_cores": 5,
"winning_bid_id": "bid_X5xhotGYiGUk7_RmIqVafA1",
"vm_ram": 1429436744,
"start_state_time": 1342108906,
"vm_ssh_wan_ip_endpoint": "69.4.239.74:62395",
"current_state": "Starting",
"ended_state_time": "null",
"running_state_time": 1342108990,
},
],
"exception_name": "",
}
)
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _compute_api_v1_stop_instance(self, method, url, body, headers):
body = json.dumps({"exception_name": ""})
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
if __name__ == "__main__":
sys.exit(unittest.main())
| |
#!/usr/bin/env python
import datetime
import errno
import io
import itertools
import os
import re
import docutils.examples
import jinja2
import lxml.html
import pytz
import vobject
#import yaml
def parse_date(s):
"""Returns a datetime.date, from a date in the tabular schedule
>>> parse_date('Friday 18th September 2015')
datetime.date(2015, 9, 18)
"""
m = re.match(r"""
(?P<dayname> [A-Za-z]+)
\s
(?P<day> [0-9]+)
[a-z]{0,2} # Discard any suffix, e.g. th, st
\s
(?P<monthname> [A-Za-z]+)
\s
(?P<year> [0-9]+)
""",
s, re.VERBOSE,
)
s = '{day} {monthname} {year}'.format(**m.groupdict())
return datetime.datetime.strptime(s, '%d %B %Y').date()
def parse_time(s):
"""Returns a datetime.time, from an event time in the tabular schedule
"""
return datetime.datetime.strptime(s, '%H:%M').time()
def parse_days(tree):
"""Yields (day, table) for each day covered by the tabular schedule
"""
for section in tree.xpath('''
//div[@class="section"
and (starts-with(@id, "friday")
or starts-with(@id, "saturday")
or starts-with(@id, "sunday")
or starts-with(@id, "monday")
)
]'''):
day = parse_date(section[0].text)
for table in section.xpath('.//table'):
yield day, table
def collapse_whitespace(s):
"""Strips surrounding & repeated whitespace from s
"""
return re.sub(r'\s+', ' ', s.strip())
def repeat_none(value, times):
for i in xrange(times):
yield None
def colspan_cells(cells, fillfunc=itertools.repeat):
"""Yields td or th elements, repeating them as necessary for colspan=n
"""
for cell in cells:
yield cell
colspan = int(cell.get('colspan', 1))
for item in fillfunc(cell, colspan-1):
yield item
def rowspan_cells(cells, rowspans, fillfunc=itertools.repeat):
"""Yields td or th elements, repeating them as necessary for colspan=n
& rowspan=n.
"""
cells = iter(cells)
col = 0
while True:
try:
ttl, cell = rowspans.pop(col)
for item in fillfunc(cell, 1):
yield item
except KeyError:
cell = next(cells)
ttl = int(cell.get('rowspan', 1))
yield cell
ttl -= 1
if ttl > 0:
rowspans[col] = (ttl, cell)
colspan = int(cell.get('colspan', 1))
for item in fillfunc(cell, colspan-1):
yield item
col += colspan
def parse_rooms(table):
"""Yields the rooms used in a single schedule table
"""
row1 = colspan_cells(table.xpath('./thead/tr[1]/th')[1:])
row2 = colspan_cells(table.xpath('./thead/tr[2]/th')[1:])
for th1, th2 in itertools.izip_longest(row1, row2):
text1 = collapse_whitespace(th1.text)
text2 = collapse_whitespace(th2.text) if th2 is not None else ''
if text2:
yield '%s (%s)' % (text1, text2)
else:
yield text1
def parse_event_href(href):
"""Returns a dict of information from the URL of recognized events
>>> parse_event_href('/demos/fooing-the-bar-with-only-2-quuxs')['type']
'demo'
>>> parse_event_href('/panels/quuxers-question-time/')['slug']
quuxers-question-time'
>>> parse_event_href('/ceremonies/changing-of-the-royal-quux')
{}
"""
m = re.match(r'''
/(?P<type>demo|panel|sprint|talk|workshop)s # NB: Hacky unpluralizing
/(?P<slug>[a-z0-9-]+)
/?
''',
href,
re.VERBOSE)
if m:
return m.groupdict()
return {}
def parse_abstract(href):
"""Returns info about an event, parsed from the Markdown abstract
"""
info = parse_event_href(href)
if not info:
return info
path = 'content/{type}s/{slug}.md'.format(**info)
with io.open(path, encoding='utf-8') as f:
markdown = f.read(1024)
m = re.search(r'^### +?(?P<speaker>[\w][^\n]+?)\n',
markdown, re.UNICODE | re.MULTILINE)
if m:
info.update(m.groupdict())
m = re.search(r'^### +?\[(?P<speaker>[\w][^\]]+?)\]',
markdown, re.UNICODE | re.MULTILINE)
if m:
info.update(m.groupdict())
return info
def parse_event_title(title, default_room):
"""Parse an event title that may contain extra room information
>>> parse_event_title('Foo', 'Broom closet')
('Foo', 'Broom closet')
>>> parse_event_title('Foo (in the Atrium)', 'Broom closet')
('Foo', 'Atrium')
>>> parse_event_title('Foo (in The Atrium)', 'Broom closet')
('Foo', 'The Atrium')
"""
title = collapse_whitespace(title)
m = re.match(r'(?P<title>.+) \((?:in the|in) (?P<room>[^)]+)\)',
title, re.UNICODE)
if m:
return m.group('title'), m.group('room')
if re.match(r'(?:registration[ /&]+)?breakfast', title, re.IGNORECASE):
return title, 'Cafeteria'
return title, default_room
def parse_event(td, default_room=None):
"""Returns the details of an event, parsed from a table cell
"""
anchors = list(td.xpath('a'))
href = anchors[0].get('href') if len(anchors) == 1 else None
abstract_info = parse_abstract(href) if href is not None else {}
speaker = abstract_info.get('speaker')
type_ = abstract_info.get('type')
title, room = parse_event_title(td.text_content(), default_room)
return href, title, room, speaker, type_
def stringify_children(node):
# http://stackoverflow.com/a/28173933/293340
parts = ([node.text]
+ list(itertools.chain(*([lxml.html.tostring(c, with_tail=False),
c.tail] for c in node.getchildren())
))
+ [node.tail])
# filter removes possible Nones in texts and tails
return ''.join(part for part in parts if part is not None)
def events(table):
"""Yields event dicts parsed from a single schedule table
"""
rooms = [room for room in parse_rooms(table)]
trs = [tr for tr in table.xpath('./tbody/tr')
if tr.find('./td').text != u'\N{NO-BREAK SPACE}']
times = [parse_time(tr.find('./td').text) for tr in trs]
rowspans = {}
for i, (start_time, tr) in enumerate(zip(times, trs)):
tds = tr.xpath('./td')[1:]
for j, td in enumerate(rowspan_cells(tds, rowspans, fillfunc=repeat_none)):
if td is None:
continue
rowspan = int(td.get('rowspan', 1))
try:
finish_time = times[i+rowspan]
except IndexError:
finish_time = None
href, title, room, speaker, type_ = parse_event(td, rooms[j])
rawhtml = stringify_children(td)
event = {
'start': start_time,
'finish': finish_time,
'href': href,
'location': room,
'title': title,
'speaker': speaker,
'type': type_,
'rawhtml': rawhtml,
}
yield event
def parse_tabular_schedule(tree):
"""Yields event dicts, parsed from the tabular schedule
"""
for day, table in parse_days(tree):
for event in events(table):
event['day'] = day
event['start'] = datetime.datetime.combine(day, event['start'])
if event['finish'] is not None:
event['finish'] = datetime.datetime.combine(day, event['finish'])
event['duration'] = event['finish'] - event['start']
# This is a hack because 'Lightning PyKids' appears twice in the same row.
if event['title'] == 'Lightning PyKids' and 'Bistro' in event['location']:
continue
yield event
def days_hours_minutes_seconds(td):
"""Returns a tuple for the number of (days, hours, minutes, seconds) in a
`datetime.timedelta`.
>>> days_hours_minutes_seconds(datetime.timedelta(seconds=5400))
(0, 1, 30, 0)
"""
hours, remainder = divmod(td.seconds, 3600)
minutes, seconds = divmod(remainder, 60)
return (td.days, hours, minutes, seconds)
def format_duration(duration, sep=' ', units='dhms', default='0s'):
"""Returns a minimal string, representing a `datetime.timedelta`
>>> format_duration(datetime.timedelta(seconds=5400))
'1h 30m'
"""
qtys = days_hours_minutes_seconds(duration)
result = ('%i%s' % (qty, unit) for qty, unit in zip(qtys, units) if qty)
return sep.join(result) or default
def ordinal_suffix(i, default='th'):
"""Returns the appropriate suffix for an integer i
"""
suffixes = {1:'st', 2:'nd', 3:'rd',
11:'th', 12:'th', 13:'th'}
if i in suffixes:
return suffixes[i]
elif i % 100 in suffixes:
return suffixes[i % 100]
elif i % 10 in suffixes:
return suffixes[i % 10]
return default
def format_day_id(day):
"""Returns a string matching the id made from schedule.rst by docutils
>>> format_day_id(datetime.datetime(2015, 9, 18))
'friday-18th-september-2015'
"""
date_format = '%A-%d{suffix}-%B-%Y'.format(suffix=ordinal_suffix(day.day))
return day.strftime(date_format).lower()
def render_schedule(schedule, template_dir):
env = jinja2.Environment(
loader=jinja2.FileSystemLoader(template_dir),
autoescape=True,
lstrip_blocks=True,
trim_blocks=True,
)
env.filters.update({
'ordinal_suffix': ordinal_suffix,
'days_hours_minutes_seconds': days_hours_minutes_seconds,
'format_day_id': format_day_id,
'format_duration': format_duration,
})
templ = env.get_template('flat_schedule.html')
html = templ.render({
'page': {
'title': 'Schedule',
'slug': 'schedule',
'body_class_hack': 'talks',
},
'schedule': schedule,
})
return html
def read_html_tabular_schedule(config):
"""Returns a list of events, from a Tabular HTML schedule
"""
tab_sched_dir = os.path.join(config['output_dir'], 'schedule')
tab_sched_path = os.path.join(tab_sched_dir, 'index.html')
tab_sched_etree = lxml.html.parse(tab_sched_path)
return list(parse_tabular_schedule(tab_sched_etree))
def read_rst_tabular_schedule(config):
"""Returns a list of events, from a Tabular reStructuredText schedule
"""
tab_sched_rst = io.open('content/schedule.rst', encoding='utf-8').read()
tab_sched_rst = tab_sched_rst.split('---\n', 1)[1]
tab_sched_html = docutils.examples.html_body(tab_sched_rst)
tab_sched_etree = lxml.html.fromstring(tab_sched_html)
return list(parse_tabular_schedule(tab_sched_etree))
def mkdirs(path):
try:
os.mkdir(path)
except os.error as exc:
if exc.errno != errno.EEXIST:
raise
def write_flat_schedule(schedule, config):
schedule_html = render_schedule(schedule, config['template_dir'])
schedule_dir = os.path.join(config['output_dir'], 'schedule', 'flat')
schedule_path = os.path.join(schedule_dir, 'index.html')
mkdirs(schedule_dir)
with io.open(schedule_path, 'w', encoding='utf-8') as f:
f.write(schedule_html)
def write_ical_schedule(schedule, config):
cal = vobject.iCalendar()
cal.add('x-wr-calname').value = 'PyCon UK 2015 Schedule'
def add_tz(dt):
# datetimes are in Europe/London time, but vobject blows up if we use
# pytz.timezone('Europe/London').localize(dt)
return pytz.UTC.localize(dt - datetime.timedelta(hours=1))
for event in schedule:
vevent = cal.add('vevent')
if event['start']:
vevent.add('dtstart').value = add_tz(event['start'])
if event['finish']:
vevent.add('dtend').value = add_tz(event['finish'])
title = event['title']
type_ = event['type']
speaker = event['speaker']
if type_ or speaker:
title += ' (' + ' by '.join(filter(None, [type_, speaker])) + ')'
vevent.add('summary').value = title
vevent.add('location').value = event['location']
if event['href']:
href = 'http://www.pyconuk.org' + event['href']
# TODO: convince clients to show text/html descriptions
description = vevent.add('description')
description.value = href
# Does anything show these?
vevent.add('url').value = href
ics_path = os.path.join(config['output_dir'], 'schedule.ics')
with io.open(ics_path, 'w', encoding='utf-8') as f:
f.write(cal.serialize().decode('utf-8'))
def create_flat_schedule(config):
schedule = read_html_tabular_schedule(config)
write_ical_schedule(schedule, config)
write_flat_schedule(schedule, config)
if __name__ == '__main__':
config = {'template_dir': 'templates', 'output_dir': 'output'}
schedule = read_rst_tabular_schedule(config)
write_ical_schedule(schedule, config)
write_flat_schedule(schedule, config)
| |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for coefficient-wise operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes as dtypes_lib
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_grad # pylint: disable=unused-import
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
_ADD = lambda x, y: x + y
_SUB = lambda x, y: x - y
_MUL = lambda x, y: x * y
_POW = lambda x, y: x**y
_TRUEDIV = lambda x, y: x / y
_FLOORDIV = lambda x, y: x // y
_MOD = lambda x, y: x % y
_LT = lambda x, y: x < y
_LE = lambda x, y: x <= y
_GT = lambda x, y: x > y
_GE = lambda x, y: x >= y
_AND = lambda x, y: x & y
_OR = lambda x, y: x | y
_XOR = lambda x, y: x ^ y
_INV = lambda x: ~x
# TODO(zongheng): it'd be great to factor out this function and various random
# SparseTensor gen funcs.
def _sparsify(x, thresh=0.5, index_dtype=np.int64):
x[x < thresh] = 0
non_zero = np.where(x)
x_indices = np.vstack(non_zero).astype(index_dtype).T
x_values = x[non_zero]
x_shape = x.shape
return sparse_tensor.SparseTensor(
indices=x_indices, values=x_values, dense_shape=x_shape), x_values
def _default_tolerance(dtype):
"""Returns a sensible default tolerance for comparing results of a given type.
Args:
dtype: A datatype.
"""
if dtype == np.float16:
return 5e-3
elif dtype in (np.float32, np.complex64):
return 1e-3
elif dtype in (np.float64, np.complex128):
return 1e-5
else:
return None # Fail fast for unexpected types
class ComparisonOpTest(test.TestCase):
def _compareScalar(self, func, x, y, dtype):
with test_util.use_gpu():
out = func(
ops.convert_to_tensor(np.array([x]).astype(dtype)),
ops.convert_to_tensor(np.array([y]).astype(dtype)))
ret = self.evaluate(out)
return ret[0]
def testScalarCompareScalar(self):
dtypes = [np.float16, np.float32, np.float64, np.int32, np.int64]
data = [-1, 0, 1]
for t in dtypes:
for x in data:
for y in data:
self.assertEqual(self._compareScalar(math_ops.less, x, y, t), x < y)
self.assertEqual(
self._compareScalar(math_ops.less_equal, x, y, t), x <= y)
self.assertEqual(
self._compareScalar(math_ops.greater, x, y, t), x > y)
self.assertEqual(
self._compareScalar(math_ops.greater_equal, x, y, t), x >= y)
self.assertEqual(self._compareScalar(math_ops.equal, x, y, t), x == y)
self.assertEqual(
self._compareScalar(math_ops.not_equal, x, y, t), x != y)
data = [-1, 0, 1, -1j, 1j, 1 + 1j, 1 - 1j]
for t in [np.complex64, np.complex128]:
for x in data:
for y in data:
self.assertEqual(self._compareScalar(math_ops.equal, x, y, t), x == y)
self.assertEqual(
self._compareScalar(math_ops.not_equal, x, y, t), x != y)
def _compare(self, x, y, np_func, tf_func):
np_ans = np_func(x, y)
with test_util.use_gpu():
out = tf_func(ops.convert_to_tensor(x), ops.convert_to_tensor(y))
tf_ans = self.evaluate(out)
self.assertAllEqual(np_ans, tf_ans)
def testTensorCompareTensor(self):
x = np.linspace(-15, 15, 6).reshape(1, 3, 2)
y = np.linspace(20, -10, 6).reshape(1, 3, 2)
for t in [np.float16, np.float32, np.float64, np.int32, np.int64]:
xt = x.astype(t)
yt = y.astype(t)
self._compare(xt, yt, np.less, math_ops.less)
self._compare(xt, yt, np.less_equal, math_ops.less_equal)
self._compare(xt, yt, np.greater, math_ops.greater)
self._compare(xt, yt, np.greater_equal, math_ops.greater_equal)
self._compare(xt, yt, np.equal, math_ops.equal)
self._compare(xt, yt, np.not_equal, math_ops.not_equal)
# Complex types do not support ordering but do support equality tests.
for t in [np.complex64, np.complex128]:
xt = x.astype(t)
xt -= 1j * xt
yt = y.astype(t)
yt -= 1j * yt
self._compare(xt, yt, np.equal, math_ops.equal)
self._compare(xt, yt, np.not_equal, math_ops.not_equal)
def _compareBCast(self, xs, ys, dtype, np_func, tf_func):
x = np.linspace(-15, 15, np.prod(xs)).astype(dtype).reshape(xs)
y = np.linspace(20, -10, np.prod(ys)).astype(dtype).reshape(ys)
if dtype in (np.complex64, np.complex128):
x -= 1j * x
y -= 1j * y
self._compare(x, y, np_func, tf_func)
self._compare(y, x, np_func, tf_func)
def _testBCastByFunc(self, np_func, tf_func, include_complex=False):
shapes = [
([1, 3, 2], [1]),
([1, 3, 2], [2]),
([1, 3, 2], [3, 2]),
([1, 3, 2], [3, 1]),
([1, 3, 2], [1, 3, 2]),
([1, 3, 2], [2, 3, 1]),
([1, 3, 2], [2, 1, 1]),
([1, 3, 2], [1, 3, 1]),
([2, 1, 5], [2, 3, 1]),
([2, 0, 5], [2, 0, 1]),
([2, 3, 0], [2, 3, 1]),
]
dtypes = [
np.float16,
np.float32,
np.float64,
np.int32,
np.int64,
]
if include_complex:
dtypes.extend([np.complex64, np.complex128])
for (xs, ys) in shapes:
for dtype in dtypes:
self._compareBCast(xs, ys, dtype, np_func, tf_func)
def testBCastLess(self):
self._testBCastByFunc(np.less, math_ops.less)
def testBCastLessEqual(self):
self._testBCastByFunc(np.less_equal, math_ops.less_equal)
def testBCastGreater(self):
self._testBCastByFunc(np.greater, math_ops.greater)
def testBCastGreaterEqual(self):
self._testBCastByFunc(np.greater_equal, math_ops.greater_equal)
def testBCastEqual(self):
self._testBCastByFunc(np.equal, math_ops.equal, include_complex=True)
def testBCastNotEqual(self):
self._testBCastByFunc(
np.not_equal, math_ops.not_equal, include_complex=True)
@test_util.run_deprecated_v1
def testShapeMismatch(self):
dtypes = [np.float16, np.float32, np.float64, np.int32, np.int64]
funcs = [
math_ops.less, math_ops.less_equal, math_ops.greater,
math_ops.greater_equal, math_ops.equal, math_ops.not_equal
]
x = np.arange(0, 10).reshape([2, 5])
y = np.arange(0, 10).reshape([5, 2])
for t in dtypes:
for f in funcs:
with self.assertRaisesWithPredicateMatch(
ValueError, lambda e: "Dimensions must" in str(e)):
f(x.astype(t), y.astype(t))
class LogicalOpTest(test.TestCase):
def _compareBinary(self, x, y, np_func, tf_func, use_gpu=False):
np_ans = np_func(x, y)
with test_util.device(use_gpu=use_gpu):
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
out = tf_func(inx, iny)
tf_val = self.evaluate(out)
self.assertEqual(out.dtype, dtypes_lib.bool)
self.assertAllEqual(np_ans, tf_val)
self.assertShapeEqual(np_ans, out)
def _not(self, x, use_gpu=False):
np_ans = np.logical_not(x)
with test_util.device(use_gpu=use_gpu):
out = math_ops.logical_not(ops.convert_to_tensor(x))
tf_val = self.evaluate(out)
self.assertEqual(out.dtype, dtypes_lib.bool)
self.assertAllEqual(np_ans, tf_val)
self.assertShapeEqual(np_ans, out)
def testScalar(self):
data = [np.array([True]), np.array([False])]
for use_gpu in [True, False]:
for x in data:
self._not(x, use_gpu)
for x in data:
for y in data:
self._compareBinary(x, y, np.logical_and, math_ops.logical_and,
use_gpu)
self._compareBinary(x, y, np.logical_or, math_ops.logical_or, use_gpu)
self._compareBinary(x, y, np.logical_xor, math_ops.logical_xor,
use_gpu)
def testTensor(self):
x = np.random.randint(0, 2, 6).astype(np.bool).reshape(1, 3, 2)
y = np.random.randint(0, 2, 6).astype(np.bool).reshape(1, 3, 2)
for use_gpu in [True, False]:
self._not(x, use_gpu)
self._compareBinary(x, y, np.logical_and, math_ops.logical_and, use_gpu)
self._compareBinary(x, y, np.logical_or, math_ops.logical_or, use_gpu)
self._compareBinary(x, y, np.logical_xor, math_ops.logical_xor, use_gpu)
def testBCast(self):
shapes = [
([1, 3, 2], [1]),
([1, 3, 2], [2]),
([1, 3, 2], [3, 2]),
([1, 3, 2], [3, 1]),
([1, 3, 2], [1, 3, 2]),
([1, 3, 2], [2, 3, 1]),
([1, 3, 2], [2, 1, 1]),
([1, 3, 2], [1, 3, 1]),
([2, 1, 5], [2, 3, 1]),
([2, 0, 5], [2, 0, 1]),
([2, 3, 0], [2, 3, 1]),
]
for (xs, ys) in shapes:
x = np.random.randint(0, 2, np.prod(xs)).astype(np.bool).reshape(xs)
y = np.random.randint(0, 2, np.prod(ys)).astype(np.bool).reshape(ys)
for use_gpu in [True, False]:
self._compareBinary(x, y, np.logical_and, math_ops.logical_and, use_gpu)
self._compareBinary(x, y, np.logical_or, math_ops.logical_or, use_gpu)
self._compareBinary(x, y, np.logical_xor, math_ops.logical_xor, use_gpu)
@test_util.run_deprecated_v1
def testShapeMismatch(self):
x = np.random.randint(0, 2, 6).astype(np.bool).reshape(1, 3, 2)
y = np.random.randint(0, 2, 6).astype(np.bool).reshape(3, 2, 1)
for f in [math_ops.logical_and, math_ops.logical_or, math_ops.logical_xor]:
with self.assertRaisesWithPredicateMatch(
ValueError, lambda e: "Dimensions must" in str(e)):
f(x, y)
@test_util.run_deprecated_v1
def testUsingAsPythonValueFails(self):
# Ensure that we raise an error when the user attempts to treat a
# `Tensor` as a Python `bool`.
b = constant_op.constant(False)
with self.assertRaises(TypeError):
if b:
pass
x = constant_op.constant(3)
y = constant_op.constant(4)
with self.assertRaises(TypeError):
if x > y:
pass
z = constant_op.constant(7)
# The chained comparison should fail because Python computes `x <
# y` and short-circuits the comparison with `z` if it is `False`.
with self.assertRaises(TypeError):
_ = x < y < z
class SelectOpTest(test.TestCase):
def _compare(self, c, x, y, use_gpu):
np_ans = np.where(c, x, y)
with test_util.device(use_gpu=use_gpu):
out = array_ops.where(c, x, y)
tf_ans = self.evaluate(out)
self.assertAllEqual(np_ans, tf_ans)
self.assertShapeEqual(np_ans, out)
def _compareGradientX(self, c, x, y, numeric_gradient_type=None):
with self.cached_session():
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
out = array_ops.where(c, inx, iny)
s = list(np.shape(c))
jacob_t, jacob_n = gradient_checker.compute_gradient(
inx, s, out, s, x_init_value=x)
if numeric_gradient_type is not None:
xf = x.astype(numeric_gradient_type)
yf = y.astype(numeric_gradient_type)
inxf = ops.convert_to_tensor(xf)
inyf = ops.convert_to_tensor(yf)
outf = array_ops.where(c, inxf, inyf)
_, jacob_n = gradient_checker.compute_gradient(
inxf, s, outf, s, x_init_value=xf)
jacob_n = jacob_n.astype(x.dtype)
if x.dtype == np.float16:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
elif x.dtype == np.float32:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
elif x.dtype == np.float64:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-5, atol=1e-5)
def _compareGradientY(self, c, x, y, numeric_gradient_type=None):
with self.cached_session():
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
out = array_ops.where(c, inx, iny)
s = list(np.shape(c))
jacob_t, jacob_n = gradient_checker.compute_gradient(
iny, s, out, s, x_init_value=y, delta=1.0)
if numeric_gradient_type is not None:
xf = x.astype(numeric_gradient_type)
yf = y.astype(numeric_gradient_type)
inxf = ops.convert_to_tensor(xf)
inyf = ops.convert_to_tensor(yf)
outf = array_ops.where(c, inxf, inyf)
_, jacob_n = gradient_checker.compute_gradient(
inyf, s, outf, s, x_init_value=yf)
jacob_n = jacob_n.astype(x.dtype)
if x.dtype == np.float16:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
elif x.dtype == np.float32:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
elif x.dtype == np.float64:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-5, atol=1e-5)
def testScalar(self):
c = True
x = np.random.rand(1, 3, 2) * 100
y = np.random.rand(1, 3, 2) * 100
for t in [
np.float16, np.float32, np.float64, np.int32, np.int64, np.complex64,
np.complex128
]:
xt = x.astype(t)
yt = y.astype(t)
self._compare(c, xt, yt, use_gpu=False)
if t in [np.float16, np.float32, np.float64]:
self._compare(c, xt, yt, use_gpu=True)
def testBasic(self):
c = np.random.randint(0, 2, 6).astype(np.bool).reshape(1, 3, 2)
x = np.random.rand(1, 3, 2) * 100
y = np.random.rand(1, 3, 2) * 100
for t in [
np.float16, np.float32, np.float64, np.int32, np.int64, np.complex64,
np.complex128
]:
xt = x.astype(t)
yt = y.astype(t)
self._compare(c, xt, yt, use_gpu=False)
if t in [np.float16, np.float32, np.float64]:
self._compare(c, xt, yt, use_gpu=True)
@test_util.run_deprecated_v1
def testGradients(self):
c = np.random.randint(0, 2, 6).astype(np.bool).reshape(1, 3, 2)
x = np.random.rand(1, 3, 2) * 100
y = np.random.rand(1, 3, 2) * 100
for t in [np.float16, np.float32, np.float64]:
xt = x.astype(t)
yt = y.astype(t)
if t == np.float16:
# Compare fp16 theoretical gradients to fp32 numerical gradients,
# since fp16 numerical gradients are too imprecise unless great
# care is taken with choosing the inputs and the delta. This is
# a weaker check (in particular, it does not test the op itself,
# only its gradient), but it's much better than nothing.
self._compareGradientX(c, xt, yt, np.float)
self._compareGradientY(c, xt, yt, np.float)
else:
self._compareGradientX(c, xt, yt)
self._compareGradientY(c, xt, yt)
@test_util.run_deprecated_v1
def testShapeMismatch(self):
c = np.random.randint(0, 2, 6).astype(np.bool).reshape(1, 3, 2)
x = np.random.rand(1, 3, 2) * 100
y = np.random.rand(2, 5, 3) * 100
for t in [
np.float16, np.float32, np.float64, np.int32, np.int64, np.complex64,
np.complex128
]:
xt = x.astype(t)
yt = y.astype(t)
with self.assertRaises(ValueError):
array_ops.where(c, xt, yt)
@test_util.run_deprecated_v1
def testEmptyTensor(self):
c = np.random.randint(0, 3, 0).astype(np.bool).reshape(1, 3, 0)
x = np.random.rand(1, 3, 0) * 100
y = np.random.rand(1, 3, 0) * 100
z_expected = np.zeros((1, 3, 0), dtype=np.float32)
with self.cached_session():
xt = x.astype(np.float32)
yt = y.astype(np.float32)
z = array_ops.where(c, xt, yt).eval()
self.assertAllEqual(z_expected, z)
@test_util.run_deprecated_v1
def testNan(self):
"""Verify that nans don't propagate where they shouldn't."""
with self.cached_session():
for c in False, True:
for a in 7.0, np.nan:
for b in 5.0, np.nan:
x = array_ops.where(c, a, b).eval()
y = a if c else b
self.assertEqual(np.isnan(x), np.isnan(y))
class BatchSelectOpTest(test.TestCase):
"""Test broadcasting of Select when 'c' is a vec and 't' &'e' are rank2+."""
def _compare(self, c, x, y, use_gpu):
np_ans = np.dstack(
[x_i if c_i else y_i for c_i, x_i, y_i in zip(c, x, y)]).transpose(
[2, 0, 1])
with test_util.device(use_gpu=use_gpu):
out = array_ops.where(c, x, y)
tf_ans = self.evaluate(out)
self.assertAllEqual(np_ans, tf_ans)
self.assertShapeEqual(np_ans, out)
def _compareGradientX(self, c, x, y, numeric_gradient_type=None):
with self.cached_session():
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
out = array_ops.where(c, inx, iny)
s = list(np.shape(x))
jacob_t, jacob_n = gradient_checker.compute_gradient(
inx, s, out, s, x_init_value=x)
if numeric_gradient_type is not None:
xf = x.astype(numeric_gradient_type)
yf = y.astype(numeric_gradient_type)
inxf = ops.convert_to_tensor(xf)
inyf = ops.convert_to_tensor(yf)
outf = array_ops.where(c, inxf, inyf)
_, jacob_n = gradient_checker.compute_gradient(
inxf, s, outf, s, x_init_value=xf)
jacob_n = jacob_n.astype(x.dtype)
if x.dtype == np.float16:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
elif x.dtype == np.float32:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
elif x.dtype == np.float64:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-5, atol=1e-5)
def _compareGradientY(self, c, x, y, numeric_gradient_type=None):
with self.cached_session():
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
out = array_ops.where(c, inx, iny)
s = list(np.shape(x))
jacob_t, jacob_n = gradient_checker.compute_gradient(
iny, s, out, s, x_init_value=y)
if numeric_gradient_type is not None:
xf = x.astype(numeric_gradient_type)
yf = y.astype(numeric_gradient_type)
inxf = ops.convert_to_tensor(xf)
inyf = ops.convert_to_tensor(yf)
outf = array_ops.where(c, inxf, inyf)
_, jacob_n = gradient_checker.compute_gradient(
inyf, s, outf, s, x_init_value=yf)
jacob_n = jacob_n.astype(x.dtype)
if x.dtype == np.float16:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
elif x.dtype == np.float32:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
elif x.dtype == np.float64:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-5, atol=1e-5)
def testBasic(self):
c = np.random.randint(0, 2, 16).astype(np.bool)
x = np.random.rand(16, 2, 8) * 100
y = np.random.rand(16, 2, 8) * 100
for t in [
np.float16, np.float32, np.float64, np.int32, np.int64, np.complex64,
np.complex128
]:
xt = x.astype(t)
yt = y.astype(t)
self._compare(c, xt, yt, use_gpu=False)
if t in [np.float16, np.float32, np.float64]:
self._compare(c, xt, yt, use_gpu=True)
@test_util.run_deprecated_v1
def testGradients(self):
c = np.random.randint(0, 2, 16).astype(np.bool)
x = np.random.rand(16, 2, 8) * 100
y = np.random.rand(16, 2, 8) * 100
for t in [np.float16, np.float32, np.float64]:
xt = x.astype(t)
yt = y.astype(t)
if t == np.float16:
# Compare fp16 theoretical gradients to fp32 numerical gradients,
# since fp16 numerical gradients are too imprecise unless great
# care is taken with choosing the inputs and the delta. This is
# a weaker check (in particular, it does not test the op itself,
# only its gradient), but it's much better than nothing.
self._compareGradientX(c, xt, yt, np.float)
self._compareGradientY(c, xt, yt, np.float)
else:
self._compareGradientX(c, xt, yt)
self._compareGradientY(c, xt, yt)
@test_util.run_deprecated_v1
def testShapeMismatch(self):
c = np.random.randint(0, 2, 8).astype(np.bool)
x = np.random.rand(16, 3, 2) * 100
y = np.random.rand(16, 3, 2) * 100
for t in [
np.float16, np.float32, np.float64, np.int32, np.int64, np.complex64,
np.complex128
]:
xt = x.astype(t)
yt = y.astype(t)
with self.assertRaises(ValueError):
array_ops.where(c, xt, yt)
class MinMaxOpTest(test.TestCase):
def _compare(self, x, y, use_gpu):
np_min, np_max = np.minimum(x, y), np.maximum(x, y)
with test_util.device(use_gpu=use_gpu):
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
omin, omax = math_ops.minimum(inx, iny), math_ops.maximum(inx, iny)
tf_min, tf_max = self.evaluate([omin, omax])
self.assertAllEqual(np_min, tf_min)
self.assertAllEqual(np_max, tf_max)
def testBasic(self):
x = np.random.rand(1, 3, 2) * 100.
y = np.random.rand(1, 3, 2) * 100.
for t in [np.float16, np.float32, np.float64, np.int32, np.int64]:
self._compare(x.astype(t), y.astype(t), use_gpu=False)
self._compare(x.astype(t), y.astype(t), use_gpu=True)
def testDifferentShapes(self):
x = np.random.rand(1, 3, 2) * 100.
y = np.random.rand(2) * 100. # should broadcast
for t in [np.float16, np.float32, np.float64, np.int32, np.int64]:
self._compare(x.astype(t), y.astype(t), use_gpu=False)
self._compare(x.astype(t), y.astype(t), use_gpu=True)
def testScalar(self):
x = np.random.rand(1, 3, 2) * 100.
y = np.random.rand(1).item() * 100. # should broadcast
# dropped np.float64, int64 because TF automatically converts to 32 bit
for t in [np.float32, np.int32]:
self._compare(x.astype(t), t(y), use_gpu=False)
self._compare(x.astype(t), t(y), use_gpu=True)
def _compareGradientX(self, func, x, y):
with self.cached_session():
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
out = func(inx, iny)
s = list(np.shape(x))
jacob_t, jacob_n = gradient_checker.compute_gradient(
inx, s, out, s, x_init_value=x)
if x.dtype == np.float16:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
elif x.dtype == np.float32:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
elif x.dtype == np.float64:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-5, atol=1e-5)
def _compareGradientY(self, func, x, y):
with self.cached_session():
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
out = func(inx, iny)
s = list(np.shape(x))
jacob_t, jacob_n = gradient_checker.compute_gradient(
iny, s, out, s, x_init_value=y)
if x.dtype == np.float16:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
elif x.dtype == np.float32:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
elif x.dtype == np.float64:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-5, atol=1e-5)
@test_util.run_deprecated_v1
def testGradients(self):
x = np.random.rand(1, 3, 2) * 100.
# ensure x != y
y = x + (np.random.randint(2, size=x.shape) - .5) * 2 # -1 or +1
self._compareGradientX(math_ops.maximum, x, y)
self._compareGradientY(math_ops.maximum, x, y)
self._compareGradientX(math_ops.minimum, x, y)
self._compareGradientY(math_ops.minimum, x, y)
class MathOpsOverloadTest(test.TestCase):
def _computeTensorAndLiteral(self, x, y, dtype, func):
with test_util.force_cpu():
inx = ops.convert_to_tensor(x, dtype=dtype)
z = func(inx, y) # Should use __add__, __sub__, etc.
return self.evaluate(z)
def _computeLiteralAndTensor(self, x, y, dtype, func):
with test_util.force_cpu():
iny = ops.convert_to_tensor(y, dtype=dtype)
z = func(x, iny) # Should use __radd__, __rsub__, etc.
return self.evaluate(z)
def _compareBinary(self, x, y, dtype, np_func, tf_func):
np_ans = np_func(x, y).astype(dtype.as_numpy_dtype)
self.assertAllClose(np_ans,
self._computeTensorAndLiteral(x, y, dtype, tf_func))
self.assertAllClose(np_ans,
self._computeLiteralAndTensor(x, y, dtype, tf_func))
def _compareUnary(self, x, dtype, np_func, tf_func):
np_ans = np_func(x).astype(dtype.as_numpy_dtype)
with test_util.force_cpu():
self.assertAllClose(
np_ans, self.evaluate(tf_func(ops.convert_to_tensor(x, dtype=dtype))))
def testOverload(self):
dtypes = [
dtypes_lib.float16,
dtypes_lib.float32,
dtypes_lib.float64,
dtypes_lib.int32,
dtypes_lib.int64,
dtypes_lib.complex64,
dtypes_lib.complex128,
]
funcs = [
(np.add, _ADD),
(np.subtract, _SUB),
(np.multiply, _MUL),
(np.power, _POW),
(np.true_divide, _TRUEDIV),
(np.floor_divide, _FLOORDIV),
]
for dtype in dtypes:
for np_func, tf_func in funcs:
if dtype in (dtypes_lib.complex64,
dtypes_lib.complex128) and tf_func == _FLOORDIV:
continue # floordiv makes no sense for complex
self._compareBinary(10, 5, dtype, np_func, tf_func)
# Mod only works for int32 and int64.
for dtype in [dtypes_lib.int32, dtypes_lib.int64]:
self._compareBinary(10, 3, dtype, np.mod, _MOD)
def testOverloadComparisons(self):
dtypes = [
dtypes_lib.float16,
dtypes_lib.float32,
dtypes_lib.float64,
dtypes_lib.int32,
dtypes_lib.int64,
]
funcs = [
(np.less, _LT),
(np.less_equal, _LE),
(np.greater, _GT),
(np.greater_equal, _GE),
]
for dtype in dtypes:
for np_func, tf_func in funcs:
self._compareBinary(10, 5, dtype, np_func, tf_func)
logical_funcs = [(np.logical_and, _AND), (np.logical_or, _OR),
(np.logical_xor, _XOR), (np.equal, math_ops.equal),
(np.not_equal, math_ops.not_equal)]
for np_func, tf_func in logical_funcs:
self._compareBinary(True, False, dtypes_lib.bool, np_func, tf_func)
self._compareBinary(True, True, dtypes_lib.bool, np_func, tf_func)
self._compareBinary(False, False, dtypes_lib.bool, np_func, tf_func)
self._compareBinary(False, True, dtypes_lib.bool, np_func, tf_func)
self._compareBinary([True, True, False, False],
[True, False, True, False], dtypes_lib.bool, np_func,
tf_func)
self._compareUnary(True, dtypes_lib.bool, np.logical_not, _INV)
self._compareUnary(False, dtypes_lib.bool, np.logical_not, _INV)
self._compareUnary([True, False], dtypes_lib.bool, np.logical_not, _INV)
class IsFiniteInfNanTest(test.TestCase):
def _compare(self, x, use_gpu):
np_finite, np_inf, np_nan = np.isfinite(x), np.isinf(x), np.isnan(x)
with test_util.device(use_gpu=use_gpu):
inx = ops.convert_to_tensor(x)
ofinite, oinf, onan = math_ops.is_finite(inx), math_ops.is_inf(
inx), math_ops.is_nan(inx)
tf_finite, tf_inf, tf_nan = self.evaluate([ofinite, oinf, onan])
self.assertAllEqual(np_inf, tf_inf)
self.assertAllEqual(np_nan, tf_nan)
self.assertAllEqual(np_finite, tf_finite)
self.assertShapeEqual(np_inf, oinf)
self.assertShapeEqual(np_nan, onan)
self.assertShapeEqual(np_finite, ofinite)
def _testDtype(self, dtype):
fi = np.finfo(dtype)
data = np.array([
0, -1, 1, fi.resolution, -fi.resolution, fi.min, fi.max, -np.inf,
np.inf, np.nan
]).astype(dtype)
self._compare(data, use_gpu=False)
self._compare(data, use_gpu=True)
def testHalf(self):
self._testDtype(np.float16)
def testFloat(self):
self._testDtype(np.float32)
def testDouble(self):
self._testDtype(np.float64)
def testSqrt(self):
for dtype in [np.float16, np.float32, np.float64]:
fi = np.finfo(dtype)
for size in [1, 3, 4, 7, 8, 63, 64, 65]:
# For float32 Eigen uses Carmack's fast vectorized sqrt algorithm.
# It is not accurate for very large arguments, so we test for
# fi.max/100 instead of fi.max here.
for value in [fi.min, -2, -1, 0, fi.tiny, 1, 2, 1000, fi.max / 100]:
x = np.full((size,), value, dtype=dtype)
np_y = np.sqrt(x)
np_nan = np.isnan(np_y)
with test_util.use_gpu():
tf_y = math_ops.sqrt(x)
tf_nan = math_ops.is_nan(tf_y)
if value < 0:
self.assertAllEqual(np_nan, self.evaluate(tf_nan))
else:
self.assertAllCloseAccordingToType(np_y, self.evaluate(tf_y))
class RoundingTest(test.TestCase):
def _compare_values(self, x, y=None):
y = np.rint(x) if y is None else np.asarray(y)
tf_rint = math_ops.rint(x)
np_rint = self.evaluate(tf_rint)
self.assertAllEqual(y, np_rint)
self.assertShapeEqual(y, tf_rint)
def _compare(self, x):
np_floor, np_ceil = np.floor(x), np.ceil(x)
inx = ops.convert_to_tensor(x)
ofloor, oceil = math_ops.floor(inx), math_ops.ceil(inx)
tf_floor, tf_ceil = self.evaluate([ofloor, oceil])
self.assertAllEqual(np_floor, tf_floor)
self.assertAllEqual(np_ceil, tf_ceil)
self.assertShapeEqual(np_floor, ofloor)
self.assertShapeEqual(np_ceil, oceil)
def _testDtype(self, dtype):
data = (np.arange(-3, 3) / 4.).reshape(1, 3, 2).astype(dtype)
self._compare(data)
# TODO: rint op is not supported for float16
if dtype is np.float16:
return
self._compare_values(data)
x = [0.5, 0.5000001]
y = [0.0, 1.0]
self._compare_values(x, y=y)
# numpy example
x = [-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0]
y = [-2., -2., -0., 0., 2., 2., 2.]
self._compare_values(x, y=y)
def testTypes(self):
for dtype in [np.float16, np.float32, np.float64]:
self._testDtype(dtype)
class ComplexMakeRealImagTest(test.TestCase):
def _compareMake(self, real, imag, use_gpu):
np_ans = real + (1j) * imag
with test_util.device(use_gpu=use_gpu):
real = ops.convert_to_tensor(real)
imag = ops.convert_to_tensor(imag)
tf_ans = math_ops.complex(real, imag)
out = self.evaluate(tf_ans)
self.assertAllEqual(np_ans, out)
self.assertShapeEqual(np_ans, tf_ans)
def testMake(self):
real = (np.arange(-3, 3) / 4.).reshape([1, 3, 2]).astype(np.float32)
imag = (np.arange(-3, 3) / 5.).reshape([1, 3, 2]).astype(np.float32)
for use_gpu in [False, True]:
self._compareMake(real, imag, use_gpu)
self._compareMake(real, 12.0, use_gpu)
self._compareMake(23.0, imag, use_gpu)
def _compareRealImag(self, cplx, use_gpu):
np_real, np_imag = np.real(cplx), np.imag(cplx)
np_zeros = np_real * 0
with test_util.device(use_gpu=use_gpu):
inx = ops.convert_to_tensor(cplx)
tf_real = math_ops.real(inx)
tf_imag = math_ops.imag(inx)
tf_real_real = math_ops.real(tf_real)
tf_imag_real = math_ops.imag(tf_real)
self.assertAllEqual(np_real, self.evaluate(tf_real))
self.assertAllEqual(np_imag, self.evaluate(tf_imag))
self.assertAllEqual(np_real, self.evaluate(tf_real_real))
self.assertAllEqual(np_zeros, self.evaluate(tf_imag_real))
def testRealImag64(self):
real = (np.arange(-3, 3) / 4.).reshape([1, 3, 2]).astype(np.float32)
imag = (np.arange(-3, 3) / 5.).reshape([1, 3, 2]).astype(np.float32)
cplx = real + 1j * imag
self._compareRealImag(cplx, use_gpu=False)
self._compareRealImag(cplx, use_gpu=True)
def testRealImag128(self):
real = (np.arange(-3, 3) / 4.).reshape([1, 3, 2]).astype(np.float64)
imag = (np.arange(-3, 3) / 5.).reshape([1, 3, 2]).astype(np.float64)
cplx = real + 1j * imag
self._compareRealImag(cplx, use_gpu=False)
self._compareRealImag(cplx, use_gpu=True)
def _compareAngle(self, cplx, use_gpu):
np_angle = np.angle(cplx)
with test_util.device(use_gpu=use_gpu):
inx = ops.convert_to_tensor(cplx)
tf_angle = math_ops.angle(inx)
tf_angle_val = self.evaluate(tf_angle)
self.assertAllClose(np_angle, tf_angle_val)
self.assertShapeEqual(np_angle, tf_angle)
def testAngle64(self):
real = (np.arange(-3, 3) / 4.).reshape([1, 3, 2]).astype(np.float32)
imag = (np.arange(-3, 3) / 5.).reshape([1, 3, 2]).astype(np.float32)
cplx = real + 1j * imag
self._compareAngle(cplx, use_gpu=False)
self._compareAngle(cplx, use_gpu=True)
def testAngle(self):
real = (np.arange(-3, 3) / 4.).reshape([1, 3, 2]).astype(np.float64)
imag = (np.arange(-3, 3) / 5.).reshape([1, 3, 2]).astype(np.float64)
cplx = real + 1j * imag
self._compareAngle(cplx, use_gpu=False)
self._compareAngle(cplx, use_gpu=True)
@test_util.run_deprecated_v1
def testRealReal(self):
for dtype in (dtypes_lib.int32, dtypes_lib.int64, dtypes_lib.float32,
dtypes_lib.float64):
x = array_ops.placeholder(dtype)
y = math_ops.real(x)
self.assertEqual(x, y)
def _compareConj(self, cplx, use_gpu):
np_ans = np.conj(cplx)
with test_util.device(use_gpu=use_gpu):
inx = ops.convert_to_tensor(cplx)
tf_conj = math_ops.conj(inx)
tf_ans = self.evaluate(tf_conj)
self.assertAllEqual(np_ans, tf_ans)
self.assertShapeEqual(np_ans, tf_conj)
def testConj64(self):
real = (np.arange(-3, 3) / 4.).reshape([1, 3, 2]).astype(np.float32)
imag = (np.arange(-3, 3) / 5.).reshape([1, 3, 2]).astype(np.float32)
cplx = real + 1j * imag
self._compareConj(cplx, use_gpu=False)
self._compareConj(cplx, use_gpu=True)
def testConj128(self):
real = (np.arange(-3, 3) / 4.).reshape([1, 3, 2]).astype(np.float64)
imag = (np.arange(-3, 3) / 5.).reshape([1, 3, 2]).astype(np.float64)
cplx = real + 1j * imag
self._compareConj(cplx, use_gpu=False)
self._compareConj(cplx, use_gpu=True)
@test_util.run_deprecated_v1
def testConjReal(self):
for dtype in (dtypes_lib.int32, dtypes_lib.int64, dtypes_lib.float16,
dtypes_lib.float32, dtypes_lib.float64):
x = array_ops.placeholder(dtype)
y = math_ops.conj(x)
self.assertEqual(x, y)
@test_util.run_deprecated_v1
def testConjString(self):
x = array_ops.placeholder(dtypes_lib.string)
with self.assertRaisesRegexp(TypeError,
r"Expected numeric or variant tensor"):
math_ops.conj(x)
def _compareGradient(self, x):
# x[:, 0] is real, x[:, 1] is imag. We combine real and imag into
# complex numbers. Then, we extract real and imag parts and
# computes the squared sum. This is obviously the same as sum(real
# * real) + sum(imag * imag). We just want to make sure the
# gradient function is checked.
with self.cached_session():
inx = ops.convert_to_tensor(x)
real, imag = array_ops.split(value=inx, num_or_size_splits=2, axis=1)
real, imag = array_ops.reshape(real, [-1]), array_ops.reshape(imag, [-1])
cplx = math_ops.complex(real, imag)
cplx = math_ops.conj(cplx)
loss = math_ops.reduce_sum(math_ops.square(
math_ops.real(cplx))) + math_ops.reduce_sum(
math_ops.square(math_ops.imag(cplx)))
epsilon = 1e-3
jacob_t, jacob_n = gradient_checker.compute_gradient(
inx, list(x.shape), loss, [1], x_init_value=x, delta=epsilon)
self.assertAllClose(jacob_t, jacob_n, rtol=epsilon, atol=epsilon)
def _compareBroadcastGradient(self, x):
x_ = ops.convert_to_tensor(x)
epsilon = 1e-3
with self.cached_session():
for args in [(x_, 0.), (0., x_)]:
z = math_ops.reduce_sum(math_ops.abs(math_ops.complex(*args)))
jacob_t, jacob_n = gradient_checker.compute_gradient(
x_, list(x.shape), z, [1], x_init_value=x, delta=epsilon)
self.assertAllClose(jacob_t, jacob_n, rtol=epsilon, atol=epsilon)
@test_util.run_deprecated_v1
def testGradient(self):
# complex64
data = np.arange(1, 2, 0.10).reshape([5, 2]).astype(np.float32)
self._compareGradient(data)
self._compareBroadcastGradient(data)
# complex128
data = np.arange(1, 2, 0.10).reshape([5, 2]).astype(np.float64)
self._compareGradient(data)
def _compareMulGradient(self, data):
# data is a float matrix of shape [n, 4]. data[:, 0], data[:, 1],
# data[:, 2], data[:, 3] are real parts of x, imaginary parts of
# x, real parts of y and imaginary parts of y.
with self.cached_session():
inp = ops.convert_to_tensor(data)
xr, xi, yr, yi = array_ops.split(value=inp, num_or_size_splits=4, axis=1)
def vec(x): # Reshape to a vector
return array_ops.reshape(x, [-1])
xr, xi, yr, yi = vec(xr), vec(xi), vec(yr), vec(yi)
def cplx(r, i): # Combine to a complex vector
return math_ops.complex(r, i)
x, y = cplx(xr, xi), cplx(yr, yi)
# z is x times y in complex plane.
z = x * y
# Defines the loss function as the sum of all coefficients of z.
loss = math_ops.reduce_sum(math_ops.real(z) + math_ops.imag(z))
epsilon = 0.005
jacob_t, jacob_n = gradient_checker.compute_gradient(
inp, list(data.shape), loss, [1], x_init_value=data, delta=epsilon)
self.assertAllClose(jacob_t, jacob_n, rtol=epsilon, atol=epsilon)
@test_util.run_deprecated_v1
def testMulGradient(self):
data = np.arange(1, 2, 0.125).reshape([2, 4]).astype(np.float32)
self._compareMulGradient(data)
class AccumulateTest(test.TestCase):
def testSimple(self):
with self.cached_session():
random_arrays = [
np.random.rand(16, 16, 16, 16).astype(np.float32) for _ in range(20)
]
random_tensors = [
ops.convert_to_tensor(x, dtype=dtypes_lib.float32)
for x in random_arrays
]
tf_val = math_ops.accumulate_n(random_tensors)
np_val = random_arrays[0]
for random_array in random_arrays[1:]:
np_val += random_array
self.assertAllClose(np_val, self.evaluate(tf_val))
def testZeroArgs(self):
with self.cached_session():
with self.assertRaises(ValueError):
tf_val = math_ops.accumulate_n([])
self.evaluate(tf_val)
def testWrongShape(self):
with self.cached_session():
with self.assertRaises(ValueError):
a = variables.Variable(0.2)
b = variables.Variable(0.1)
math_ops.accumulate_n([a, b], shape=[2, 2]) # Should be shape=[]
def testWrongType(self):
with self.cached_session():
with self.assertRaises(TypeError):
a = variables.Variable(0.2, dtype=np.float32)
b = variables.Variable(0.1, dtype=np.float32)
math_ops.accumulate_n([a, b], tensor_dtype=np.int32)
def testWrongTypeOneInput(self):
# Scenario that used to trigger a bug, even when testWrongType() worked
with self.cached_session():
with self.assertRaises(TypeError):
a = variables.Variable(0.2, dtype=np.float32)
math_ops.accumulate_n([a], tensor_dtype=np.int32)
class PolyvalTest(test.TestCase):
def _runtest(self, dtype, degree):
x = np.random.rand(2, 2).astype(dtype)
coeffs = [np.random.rand(2, 2).astype(dtype) for _ in range(degree + 1)]
np_val = np.polyval(coeffs, x)
with self.cached_session():
tf_val = math_ops.polyval(coeffs, x)
self.assertAllClose(np_val, self.evaluate(tf_val))
def testSimple(self):
for dtype in [
np.int32, np.float32, np.float64, np.complex64, np.complex128
]:
for degree in range(5):
self._runtest(dtype, degree)
def testBroadcast(self):
dtype = np.float32
degree = 3
shapes = [(1,), (2, 1), (1, 2), (2, 2)]
for x_shape in shapes:
for coeff_shape in shapes:
x = np.random.rand(*x_shape).astype(dtype)
coeffs = [
np.random.rand(*coeff_shape).astype(dtype)
for _ in range(degree + 1)
]
np_val = np.polyval(coeffs, x)
with self.cached_session():
tf_val = math_ops.polyval(coeffs, x)
self.assertAllClose(np_val, self.evaluate(tf_val))
def testEmpty(self):
x = np.random.rand(2, 2).astype(np.float32)
coeffs = []
np_val = np.polyval(coeffs, x)
with self.cached_session():
tf_val = math_ops.polyval(coeffs, x)
self.assertAllClose(np_val, self.evaluate(tf_val))
if __name__ == "__main__":
test.main()
| |
from importlib import import_module
import os
import pkgutil
from threading import local
import warnings
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.utils.deprecation import RemovedInDjango18Warning, RemovedInDjango19Warning
from django.utils.functional import cached_property
from django.utils.module_loading import import_string
from django.utils._os import upath
from django.utils import six
DEFAULT_DB_ALIAS = 'default'
class Error(Exception if six.PY3 else StandardError):
pass
class InterfaceError(Error):
pass
class DatabaseError(Error):
pass
class DataError(DatabaseError):
pass
class OperationalError(DatabaseError):
pass
class IntegrityError(DatabaseError):
pass
class InternalError(DatabaseError):
pass
class ProgrammingError(DatabaseError):
pass
class NotSupportedError(DatabaseError):
pass
class DatabaseErrorWrapper(object):
"""
Context manager and decorator that re-throws backend-specific database
exceptions using Django's common wrappers.
"""
def __init__(self, wrapper):
"""
wrapper is a database wrapper.
It must have a Database attribute defining PEP-249 exceptions.
"""
self.wrapper = wrapper
def __enter__(self):
pass
def __exit__(self, exc_type, exc_value, traceback):
if exc_type is None:
return
for dj_exc_type in (
DataError,
OperationalError,
IntegrityError,
InternalError,
ProgrammingError,
NotSupportedError,
DatabaseError,
InterfaceError,
Error,
):
db_exc_type = getattr(self.wrapper.Database, dj_exc_type.__name__)
if issubclass(exc_type, db_exc_type):
dj_exc_value = dj_exc_type(*exc_value.args)
dj_exc_value.__cause__ = exc_value
# Only set the 'errors_occurred' flag for errors that may make
# the connection unusable.
if dj_exc_type not in (DataError, IntegrityError):
self.wrapper.errors_occurred = True
six.reraise(dj_exc_type, dj_exc_value, traceback)
def __call__(self, func):
# Note that we are intentionally not using @wraps here for performance
# reasons. Refs #21109.
def inner(*args, **kwargs):
with self:
return func(*args, **kwargs)
return inner
def load_backend(backend_name):
# Look for a fully qualified database backend name
try:
return import_module('%s.base' % backend_name)
except ImportError as e_user:
# The database backend wasn't found. Display a helpful error message
# listing all possible (built-in) database backends.
backend_dir = os.path.join(os.path.dirname(upath(__file__)), 'backends')
try:
builtin_backends = [
name for _, name, ispkg in pkgutil.iter_modules([backend_dir])
if ispkg and name != 'dummy']
except EnvironmentError:
builtin_backends = []
if backend_name not in ['django.db.backends.%s' % b for b in
builtin_backends]:
backend_reprs = map(repr, sorted(builtin_backends))
error_msg = ("%r isn't an available database backend.\n"
"Try using 'django.db.backends.XXX', where XXX "
"is one of:\n %s\nError was: %s" %
(backend_name, ", ".join(backend_reprs), e_user))
raise ImproperlyConfigured(error_msg)
else:
# If there's some other error, this must be an error in Django
raise
class ConnectionDoesNotExist(Exception):
pass
class ConnectionHandler(object):
def __init__(self, databases=None):
"""
databases is an optional dictionary of database definitions (structured
like settings.DATABASES).
"""
self._databases = databases
self._connections = local()
@cached_property
def databases(self):
if self._databases is None:
self._databases = settings.DATABASES
if self._databases == {}:
self._databases = {
DEFAULT_DB_ALIAS: {
'ENGINE': 'django.db.backends.dummy',
},
}
if DEFAULT_DB_ALIAS not in self._databases:
raise ImproperlyConfigured("You must define a '%s' database" % DEFAULT_DB_ALIAS)
return self._databases
def ensure_defaults(self, alias):
"""
Puts the defaults into the settings dictionary for a given connection
where no settings is provided.
"""
try:
conn = self.databases[alias]
except KeyError:
raise ConnectionDoesNotExist("The connection %s doesn't exist" % alias)
conn.setdefault('ATOMIC_REQUESTS', False)
if settings.TRANSACTIONS_MANAGED:
warnings.warn(
"TRANSACTIONS_MANAGED is deprecated. Use AUTOCOMMIT instead.",
RemovedInDjango18Warning, stacklevel=2)
conn.setdefault('AUTOCOMMIT', False)
conn.setdefault('AUTOCOMMIT', True)
conn.setdefault('ENGINE', 'django.db.backends.dummy')
if conn['ENGINE'] == 'django.db.backends.' or not conn['ENGINE']:
conn['ENGINE'] = 'django.db.backends.dummy'
conn.setdefault('CONN_MAX_AGE', 0)
conn.setdefault('OPTIONS', {})
conn.setdefault('TIME_ZONE', 'UTC' if settings.USE_TZ else settings.TIME_ZONE)
for setting in ['NAME', 'USER', 'PASSWORD', 'HOST', 'PORT']:
conn.setdefault(setting, '')
TEST_SETTING_RENAMES = {
'CREATE': 'CREATE_DB',
'USER_CREATE': 'CREATE_USER',
'PASSWD': 'PASSWORD',
}
TEST_SETTING_RENAMES_REVERSE = {v: k for k, v in TEST_SETTING_RENAMES.items()}
def prepare_test_settings(self, alias):
"""
Makes sure the test settings are available in the 'TEST' sub-dictionary.
"""
try:
conn = self.databases[alias]
except KeyError:
raise ConnectionDoesNotExist("The connection %s doesn't exist" % alias)
test_dict_set = 'TEST' in conn
test_settings = conn.setdefault('TEST', {})
old_test_settings = {}
for key, value in six.iteritems(conn):
if key.startswith('TEST_'):
new_key = key[5:]
new_key = self.TEST_SETTING_RENAMES.get(new_key, new_key)
old_test_settings[new_key] = value
if old_test_settings:
if test_dict_set:
if test_settings != old_test_settings:
raise ImproperlyConfigured(
"Connection '%s' has mismatched TEST and TEST_* "
"database settings." % alias)
else:
test_settings.update(old_test_settings)
for key, _ in six.iteritems(old_test_settings):
warnings.warn("In Django 1.9 the TEST_%s connection setting will be moved "
"to a %s entry in the TEST setting" %
(self.TEST_SETTING_RENAMES_REVERSE.get(key, key), key),
RemovedInDjango19Warning, stacklevel=2)
for key in list(conn.keys()):
if key.startswith('TEST_'):
del conn[key]
# Check that they didn't just use the old name with 'TEST_' removed
for key, new_key in six.iteritems(self.TEST_SETTING_RENAMES):
if key in test_settings:
warnings.warn("Test setting %s was renamed to %s; specified value (%s) ignored" %
(key, new_key, test_settings[key]), stacklevel=2)
for key in ['CHARSET', 'COLLATION', 'NAME', 'MIRROR']:
test_settings.setdefault(key, None)
def __getitem__(self, alias):
if hasattr(self._connections, alias):
return getattr(self._connections, alias)
self.ensure_defaults(alias)
self.prepare_test_settings(alias)
db = self.databases[alias]
backend = load_backend(db['ENGINE'])
conn = backend.DatabaseWrapper(db, alias)
setattr(self._connections, alias, conn)
return conn
def __setitem__(self, key, value):
setattr(self._connections, key, value)
def __delitem__(self, key):
delattr(self._connections, key)
def __iter__(self):
return iter(self.databases)
def all(self):
return [self[alias] for alias in self]
class ConnectionRouter(object):
def __init__(self, routers=None):
"""
If routers is not specified, will default to settings.DATABASE_ROUTERS.
"""
self._routers = routers
@cached_property
def routers(self):
if self._routers is None:
self._routers = settings.DATABASE_ROUTERS
routers = []
for r in self._routers:
if isinstance(r, six.string_types):
router = import_string(r)()
else:
router = r
routers.append(router)
return routers
def _router_func(action):
def _route_db(self, model, **hints):
chosen_db = None
for router in self.routers:
try:
method = getattr(router, action)
except AttributeError:
# If the router doesn't have a method, skip to the next one.
pass
else:
chosen_db = method(model, **hints)
if chosen_db:
return chosen_db
try:
return hints['instance']._state.db or DEFAULT_DB_ALIAS
except KeyError:
return DEFAULT_DB_ALIAS
return _route_db
db_for_read = _router_func('db_for_read')
db_for_write = _router_func('db_for_write')
def allow_relation(self, obj1, obj2, **hints):
for router in self.routers:
try:
method = router.allow_relation
except AttributeError:
# If the router doesn't have a method, skip to the next one.
pass
else:
allow = method(obj1, obj2, **hints)
if allow is not None:
return allow
return obj1._state.db == obj2._state.db
def allow_migrate(self, db, model):
for router in self.routers:
try:
try:
method = router.allow_migrate
except AttributeError:
method = router.allow_syncdb
warnings.warn(
'Router.allow_syncdb has been deprecated and will stop working in Django 1.9. '
'Rename the method to allow_migrate.',
RemovedInDjango19Warning, stacklevel=2)
except AttributeError:
# If the router doesn't have a method, skip to the next one.
pass
else:
allow = method(db, model)
if allow is not None:
return allow
return True
def get_migratable_models(self, app_config, db, include_auto_created=False):
"""
Return app models allowed to be synchronized on provided db.
"""
models = app_config.get_models(include_auto_created=include_auto_created)
return [model for model in models if self.allow_migrate(db, model)]
| |
import numpy as np
def dataprep(puf, Stage_I_factors, Stage_II_targets, year):
print("Preparing coefficient matrix for year {} .....".format(year))
s006 = np.where(
puf.e02400 > 0,
puf.s006 * Stage_I_factors[year]["APOPSNR"] / 100,
puf.s006 * Stage_I_factors[year]["ARETS"] / 100,
)
single_return = np.where((puf.mars == 1) & (puf.filer == 1), s006, 0)
joint_return = np.where(
((puf.mars == 2) | (puf.mars == 3)) & (puf.filer == 1), s006, 0
)
hh_return = np.where((puf.mars == 4) & (puf.filer == 1), s006, 0)
return_w_SS = np.where((puf.e02400 > 0) & (puf.filer == 1), s006, 0)
dependent_exempt_num = (puf.xocah + puf.xocawh + puf.xoodep + puf.xopar) * s006
interest = puf.e00300 * s006
dividend = puf.e00600 * s006
biz_income = np.where(puf.e00900 > 0, puf.e00900, 0) * s006
biz_loss = np.where(puf.e00900 < 0, -puf.e00900, 0) * s006
cap_gain = (
np.where((puf.p23250 + puf.p22250) > 0, puf.p23250 + puf.p22250, 0) * s006
)
annuity_pension = puf.e01700 * s006
sch_e_income = np.where(puf.e02000 > 0, puf.e02000, 0) * s006
sch_e_loss = np.where(puf.e02000 < 0, -puf.e02000, 0) * s006
ss_income = np.where(puf.filer == 1, puf.e02400, 0) * s006
unemployment_comp = puf.e02300 * s006
# Wage distribution
wage_1 = np.where(puf.e00100 <= 0, puf.e00200, 0) * s006
wage_2 = np.where((puf.e00100 > 0) & (puf.e00100 <= 10000), puf.e00200, 0) * s006
wage_3 = (
np.where((puf.e00100 > 10000) & (puf.e00100 <= 20000), puf.e00200, 0) * s006
)
wage_4 = (
np.where((puf.e00100 > 20000) & (puf.e00100 <= 30000), puf.e00200, 0) * s006
)
wage_5 = (
np.where((puf.e00100 > 30000) & (puf.e00100 <= 40000), puf.e00200, 0) * s006
)
wage_6 = (
np.where((puf.e00100 > 40000) & (puf.e00100 <= 50000), puf.e00200, 0) * s006
)
wage_7 = (
np.where((puf.e00100 > 50000) & (puf.e00100 <= 75000), puf.e00200, 0) * s006
)
wage_8 = (
np.where((puf.e00100 > 75000) & (puf.e00100 <= 100000), puf.e00200, 0) * s006
)
wage_9 = (
np.where((puf.e00100 > 100000) & (puf.e00100 <= 200000), puf.e00200, 0) * s006
)
wage_10 = (
np.where((puf.e00100 > 200000) & (puf.e00100 <= 500000), puf.e00200, 0) * s006
)
wage_11 = (
np.where((puf.e00100 > 500000) & (puf.e00100 <= 1000000), puf.e00200, 0) * s006
)
wage_12 = np.where(puf.e00100 > 1000000, puf.e00200, 0) * s006
# Set up the matrix
One_half_LHS = np.vstack(
(
single_return,
joint_return,
hh_return,
return_w_SS,
dependent_exempt_num,
interest,
dividend,
biz_income,
biz_loss,
cap_gain,
annuity_pension,
sch_e_income,
sch_e_loss,
ss_income,
unemployment_comp,
wage_1,
wage_2,
wage_3,
wage_4,
wage_5,
wage_6,
wage_7,
wage_8,
wage_9,
wage_10,
wage_11,
wage_12,
)
)
# Coefficients for r and s
A1 = np.array(One_half_LHS)
A2 = np.array(-One_half_LHS)
print("Preparing targets for year {} .....".format(year))
APOPN = Stage_I_factors[year]["APOPN"]
b = []
ystr = "{}".format(year)
b.append(Stage_II_targets[ystr]["Single Returns"] - single_return.sum())
b.append(Stage_II_targets[ystr]["Joint Returns"] - joint_return.sum())
target_name = "Head of Household Returns"
b.append(Stage_II_targets[ystr][target_name] - hh_return.sum())
target_name = "Number of Returns w/ Gross Security Income"
b.append(Stage_II_targets[ystr][target_name] - return_w_SS.sum())
target_name = "Number of Dependent Exemptions"
b.append(Stage_II_targets[ystr][target_name] - dependent_exempt_num.sum())
AINTS = Stage_I_factors[year]["AINTS"]
INTEREST = (
Stage_II_targets[ystr]["Taxable Interest Income"] * APOPN / AINTS * 1000
- interest.sum()
)
ADIVS = Stage_I_factors[year]["ADIVS"]
DIVIDEND = (
Stage_II_targets[ystr]["Ordinary Dividends"] * APOPN / ADIVS * 1000
- dividend.sum()
)
ASCHCI = Stage_I_factors[year]["ASCHCI"]
BIZ_INCOME = (
Stage_II_targets[ystr]["Business Income (Schedule C)"] * APOPN / ASCHCI * 1000
- biz_income.sum()
)
ASCHCL = Stage_I_factors[year]["ASCHCL"]
BIZ_LOSS = (
Stage_II_targets[ystr]["Business Loss (Schedule C)"] * APOPN / ASCHCL * 1000
- biz_loss.sum()
)
ACGNS = Stage_I_factors[year]["ACGNS"]
CAP_GAIN = (
Stage_II_targets[ystr]["Net Capital Gains in AGI"] * APOPN / ACGNS * 1000
- cap_gain.sum()
)
ATXPY = Stage_I_factors[year]["ATXPY"]
target_name = "Taxable Pensions and Annuities"
ANNUITY_PENSION = (
Stage_II_targets[ystr][target_name] * APOPN / ATXPY * 1000
- annuity_pension.sum()
)
ASCHEI = Stage_I_factors[year]["ASCHEI"]
target_name = "Supplemental Income (Schedule E)"
SCH_E_INCOME = (
Stage_II_targets[ystr][target_name] * APOPN / ASCHEI * 1000 - sch_e_income.sum()
)
ASCHEL = Stage_I_factors[year]["ASCHEL"]
SCH_E_LOSS = (
Stage_II_targets[ystr]["Supplemental Loss (Schedule E)"] * APOPN / ASCHEL * 1000
- sch_e_loss.sum()
)
ASOCSEC = Stage_I_factors[year]["ASOCSEC"]
APOPSNR = Stage_I_factors[year]["APOPSNR"]
SS_INCOME = (
Stage_II_targets[ystr]["Gross Social Security Income"]
* APOPSNR
/ ASOCSEC
* 1000
- ss_income.sum()
)
AUCOMP = Stage_I_factors[year]["AUCOMP"]
UNEMPLOYMENT_COMP = (
Stage_II_targets[ystr]["Unemployment Compensation"] * APOPN / AUCOMP * 1000
- unemployment_comp.sum()
)
AWAGE = Stage_I_factors[year]["AWAGE"]
target_name = "Wages and Salaries: Zero or Less"
WAGE_1 = Stage_II_targets[ystr][target_name] * APOPN / AWAGE * 1000 - wage_1.sum()
target_name = "Wages and Salaries: $1 Less Than $10,000"
WAGE_2 = Stage_II_targets[ystr][target_name] * APOPN / AWAGE * 1000 - wage_2.sum()
target_name = "Wages and Salaries: $10,000 Less Than $20,000"
WAGE_3 = Stage_II_targets[ystr][target_name] * APOPN / AWAGE * 1000 - wage_3.sum()
target_name = "Wages and Salaries: $20,000 Less Than $30,000"
WAGE_4 = Stage_II_targets[ystr][target_name] * APOPN / AWAGE * 1000 - wage_4.sum()
target_name = "Wages and Salaries: $30,000 Less Than $40,000"
WAGE_5 = Stage_II_targets[ystr][target_name] * APOPN / AWAGE * 1000 - wage_5.sum()
target_name = "Wages and Salaries: $40,000 Less Than $50,000"
WAGE_6 = Stage_II_targets[ystr][target_name] * APOPN / AWAGE * 1000 - wage_6.sum()
target_name = "Wages and Salaries: $50,000 Less Than $75,000"
WAGE_7 = Stage_II_targets[ystr][target_name] * APOPN / AWAGE * 1000 - wage_7.sum()
target_name = "Wages and Salaries: $75,000 Less Than $100,000"
WAGE_8 = Stage_II_targets[ystr][target_name] * APOPN / AWAGE * 1000 - wage_8.sum()
target_name = "Wages and Salaries: $100,000 Less Than $200,000"
WAGE_9 = Stage_II_targets[ystr][target_name] * APOPN / AWAGE * 1000 - wage_9.sum()
target_name = "Wages and Salaries: $200,000 Less Than $500,000"
WAGE_10 = Stage_II_targets[ystr][target_name] * APOPN / AWAGE * 1000 - wage_10.sum()
target_name = "Wages and Salaries: $500,000 Less Than $1 Million"
WAGE_11 = Stage_II_targets[ystr][target_name] * APOPN / AWAGE * 1000 - wage_11.sum()
target_name = "Wages and Salaries: $1 Million and Over"
WAGE_12 = Stage_II_targets[ystr][target_name] * APOPN / AWAGE * 1000 - wage_12.sum()
temp = [
INTEREST,
DIVIDEND,
BIZ_INCOME,
BIZ_LOSS,
CAP_GAIN,
ANNUITY_PENSION,
SCH_E_INCOME,
SCH_E_LOSS,
SS_INCOME,
UNEMPLOYMENT_COMP,
WAGE_1,
WAGE_2,
WAGE_3,
WAGE_4,
WAGE_5,
WAGE_6,
WAGE_7,
WAGE_8,
WAGE_9,
WAGE_10,
WAGE_11,
WAGE_12,
]
for m in temp:
b.append(m)
# export to .npz file
np.savez(str(str(year) + "_input.npz"), A1=A1, A2=A2, b=b)
| |
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 25 10:43:58 2017
@author: Salem and Wife
"""
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 17 06:14:39 2017
@author: Salem
starting with a single square I want to see if I can add points (which I will call grey matter) to it that will make it's 4 nodes move in a desired way.
All the points will be connected, when the cost is minimized some of the spring constants will be allowed to go to zero, (2 N - 4) of them to be specific.
Elastic energy is minimized first, then the cost function brings this energy to zero for the desired motion.
normalizeVec, connect_all_verts, makeRigidityMat are defined in LatticeMaking
Methods:
find_desired_square(deformationType = DispType.random, edgeType = EdgeTypes.all_connected,
num_of_added_verts = NUM_OF_ADDED_VERTS, squareDisp = None)
initialize_square(num_of_added_points)
TODO fill this up
"""
import numpy as np
import numpy.random as npr
import LatticeMaking
from numpy import linalg as la
from matplotlib import pyplot as plt
import scipy.optimize as op
import importlib
importlib.reload(LatticeMaking)
from LatticeMaking import * #custom
from enum import Enum
NUM_OF_ADDED_VERTS = 5;
NUM_OF_DIMENSIONS = 2;
#maximum number of trials before adding more vertices to the gray matter
MAX_TRIALS = 30
# the coupling constant for the energy gap in the cost function
EIG_VAL_REPULSION = 1
# the potential barier of the walls
WAll_BARRIER = 1000;
# this is the part we want to control the motion of, these vertices will be fixed.
LITTLE_SQUARE = np.array([[0.0, 0.0], [0, 1.0] , [np.cos(0*np.pi/13), np.sin(0*np.pi/13)], [np.cos(0*np.pi/13), 1 - np.sin(0*np.pi/13)]])
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#This enum represents the different types of deformations that you can have
#TODO this def might fit in lattice making
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
class DispType(Enum):
random = 1
isotropic = 2
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#this enumumerates the possible ways to connect the added vertices to each other and the square
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
class EdgeTypes(Enum):
all_connected = 1
all_to_square = 2
#square_lattice = 3
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#================================================================================================================================================
# Runs the minimization procedure to return the results for the spring constants and the positions
#================================================================================================================================================
def find_desired_square(deformationType = DispType.random, edgeType = EdgeTypes.all_connected,
num_of_added_verts = NUM_OF_ADDED_VERTS, squareDisp = None, square_ID = None):
"""
minimizes over the spring constants and positions of the added returns the result of minimization after testing.
deformationType: this is an option for generating the desired displacement field. This is overrided if squareDisp is given
There are two deformations options now:
DispType.random: random displacements.
DispType.isotropic: contraction or expansion towards the origin.
edgeType: type of connectivity of the network
EdgeTypes.all_connected: everything is connected to everything.
EdgeTypes.all_to_square: every added points is connected to all the vertices of the square.
EdgeTypes.square_lattice: an additional square lattice in the interior. corresponding corners connected.
"""
#initialize test results so that the while loop goes at least once
test_result = True
#how many times the minimization procedure ran
trial_num = 0
#initialize the lattice
vertices, edge_array = initialize_square(edgeType, num_of_added_verts)
num_of_verts = vertices.size//2
num_of_edges = edge_array.size//2
#generate displacement field for the square. outside loop because we don't want to keep changing this
U = make_desired_disp(vertices, num_of_vertices=num_of_verts, DeformType=deformationType)
if(squareDisp is not None):
U[:LITTLE_SQUARE.size - 3] = squareDisp
while (test_result):
# connectivity dependent matrices that are used to calculate the rigidity matrix
edgeMat1 = makeEdgeMatrix1(edge_array, numOfEdges=num_of_edges, numOfVerts=num_of_verts)
edgeMat2 = makeEdgeMatrix2(edge_array, numOfEdges=num_of_edges, numOfVerts=num_of_verts)
#initialize var: points and spring constants
k0 = npr.rand(num_of_edges)
var0 = np.hstack((vertices.flatten(), k0))
#the square spring constants are fixed
var0[2*num_of_verts:2*num_of_verts + 4] = np.ones(4)
#minimize cost funcion
res = op.minimize(cost_function, var0, method='BFGS',args=(U, edgeMat1, edgeMat2, num_of_edges, num_of_verts), options={'disp': False})
#this works if we are working with the script of Many little squares
if(square_ID is not None):
print("Working on square number ", square_ID)
trial_num += 1; print("Trial Number: ", trial_num, "\n")
#if this returns true then keep trying, checks if U is close to the minimum on the LITTLE_SQUARE
test_result = test_results(res.x, U, edgeMat1, edgeMat2, num_of_edges, num_of_verts)
#initialize the lattice again, adds a new vertex for every max_trials trials
vertices, edge_array = initialize_square(edgeType, num_of_added_verts + trial_num//MAX_TRIALS)
#if the number of trials increases the maximum then add a gray matter vertex
if (np.mod(trial_num, MAX_TRIALS) == 0):
#update num of verts and edges
num_of_verts = vertices.size//2
num_of_edges = edge_array.size//2
# add the initial displacement for the extra vertex, it's essentialy a place holder
U = np.hstack((U, npr.rand(2) - 0.5))
#get the new vertices from the results
newVertices = res.x[:2*num_of_verts]
#the square ones are fixed
newVertices[:LITTLE_SQUARE.size] = LITTLE_SQUARE.flatten()
newVertices = newVertices.reshape((num_of_verts, 2))
#the resulting values of the spring constant
newK = (res.x[2*num_of_verts:]**2)
newK = newK/np.max(newK)
#the square spring constants are fixed
newK[:4] = np.ones(4)
return [newVertices, edge_array, newK]
#================================================================================================================================================
# The cost function penalizes energy of the desired displacement of the square vertices
#================================================================================================================================================
def cost_function(var, disp_field, eMat1, eMat2, num_of_edges,num_of_vertices):
"""
var is the combined variables to be minimized over. It represents all the vertices and spring constants
var[:2*num_of_vertices] are the points
var[2*num_of_vertices:] are the spring constants
"""
#the square positions are fixed
var[:LITTLE_SQUARE.size] = LITTLE_SQUARE.flatten()
#the square spring constants are fixed
var[2*num_of_vertices:2*num_of_vertices + 4] = np.ones(4)
# var[:2*num_of_vertices] are the points of the lattice
# var[2*num_of_vertices:] are the spring constants
rigidityMatrix = makeRigidityMat(var[:2*num_of_vertices], edgeMat1=eMat1, edgeMat2=eMat2)[:, 3:]
#calculate the dynamical matrix
DynMat = makeDynamicalMat(RigidityMat= rigidityMatrix,
springK=var[2*num_of_vertices:], numOfVerts=num_of_vertices, numOfEdges=num_of_edges)
# minimize the energy subject to the constraint that the square displacements are fixed
res0 = op.minimize(energy, disp_field, method='Newton-CG', args=(DynMat, disp_field[:LITTLE_SQUARE.size - 3]), jac=energy_Der,
hess=energy_Hess, options={'xtol': 1e-8, 'disp': False})
#lowestEigVector = normalizeVec(la.eigh(DynMat)[1][:5,0])
lowestEs = lowestEigenVals(DynMat)
#Wall_Cost = WAll_BARRIER*np.heaviside(-var[LITTLE_SQUARE.size:2*num_of_vertices:2], 1) + np.sum(var[LITTLE_SQUARE.size:2*num_of_vertices:2] - 0.5)**2
# minimize this energy with respect to the lowest energy eigenvalue
return res0.fun/lowestEs[0] + EIG_VAL_REPULSION * (lowestEs[0]/lowestEs[1])**2 #+ np.sum(Wall_Cost)
#================================================================================================================================================
#================================================================================================================================================
# Initializing the lattice
#================================================================================================================================================
def initialize_square(edgeType = EdgeTypes.all_connected, num_of_added_verts = NUM_OF_ADDED_VERTS):
"""
This method returns an array of position vectors (vertices) and an array of edge vectors (edge_array).
The vertices include a square with of unit width and (num_of_added_points) extra points that are inserted at random positions in a square
of width = 2. The square vertices must be the first 0,1,2,3.
Every point is connected to every other point so it generates the maximum number of edges.
Example: initialize_square(2)
Out[45]:
(array([[ 0. , 0. ],
[ 0. , 1. ],
[ 1. , 1. ],
[ 1. , 0. ],
[ 0.49850383, 0.26623088]]), array([[0, 1],
[0, 2],
[0, 3],
[0, 4],
[1, 2],
[1, 3],
[1, 4],
[2, 3],
[2, 4],
[3, 4]]))
"""
# this part I call grey matter, these are the added to the square vertices
gray_matter = npr.rand(num_of_added_verts, NUM_OF_DIMENSIONS)*2.0 - 0.5
# add them together to get the entire list of vertices
vertices = np.vstack((LITTLE_SQUARE, gray_matter))
if(edgeType == EdgeTypes.all_connected):
# make the edge array, connect all points for now
edge_array = connect_all_of_square(get_num_of_verts(vertices)) #connects all points of the square and gray matter. edges in definite order
elif(edgeType == EdgeTypes.all_to_square):
#connect each gray matter vertex to the square vertices
edge_array = connect_all_to_square(num_of_added_verts)
return vertices, edge_array
#================================================================================================================================================
#================================================================================================================================================
# generate the displacement field wanted
#================================================================================================================================================
def make_desired_disp(vertices, DeformType = DispType.random, num_of_vertices = -1):
"""
DispType.random: Makes a random displacement field. The first 3 degrees of freedom are assumed to
be zero in order to fix rotation and translation of the lattice.
DispType.isotropic: Every point moves towards the origin with an amount propotional to the distance from the origin
"""
if(DeformType == DispType.random):
if(num_of_vertices < 1):
get_num_of_verts(vertices)
return normalizeVec(npr.rand(2*num_of_vertices - 3))
elif(DeformType == DispType.isotropic):
return normalizeVec(vertices.flatten()[3:])
#================================================================================================================================================
#================================================================================================================================================
# After setting the boundary indices to the desired values, calculates the energy using the edge matrix.
#================================================================================================================================================
def energy(u, DynMat, squareDisp):
"""
Be careful about using this in different scripts, because this assumes boundary conditions when computing the energy.
The vertices of the squares have fixed displacements, the rest will be allowed to relax to minimum energy
TODO: A more general energy function that takes in the boundary conditions directly
energy(u, DynMat, BInds = boundaryIndices): calculates the energy after setting the boundary indices to the correct values.
"""
u[:LITTLE_SQUARE.size - 3] = squareDisp #this assumes that the square vertex indices are 0,1,2,3
u = normalizeVec(u)
return 0.5*np.dot(np.dot(u.transpose(), DynMat), u)
#================================================================================================================================================
#================================================================================================================================================
# After setting the boundary indices to the desired values, calculates the energy gradient from the dynamical matrix.
#================================================================================================================================================
def energy_Der(u, DynMat, squareDisp):
"""
Be careful about using this in different scripts, because this assumes boundary conditions when computing the energy.
TO DO: A more general energy function that takes in the boundary conditions directly
"""
u[:LITTLE_SQUARE.size - 3] = squareDisp
u = normalizeVec(u)
return np.dot(DynMat, u)
#================================================================================================================================================
#================================================================================================================================================
# After setting the boundary indices to the desired values, calculates the energy Hessian from the dynamical matrix.
#================================================================================================================================================
def energy_Hess(u, DynMat, squareDisp):
return DynMat
#================================================================================================================================================
#================================================================================================================================================
# Returns the lowest eignevalue of the dynamical matrix, exluding the rigid motions of course.
#================================================================================================================================================
def lowestEigenVals(DynMat, num_of_eigs = 2):
return (la.eigvalsh(0.5*DynMat)[:num_of_eigs])
#================================================================================================================================================
#================================================================================================================================================
# Returns the lowest eignevalue of the dynamical matrix, exluding the rigid motions of course.
#================================================================================================================================================
def lowestEigenVal(DynMat):
return (la.eigvalsh(0.5*DynMat)[0])
#================================================================================================================================================
#================================================================================================================================================
# Test the results of the minimization procedure
#================================================================================================================================================
def test_results(new_var, disp_field, eMat1, eMat2, num_of_edges, num_of_vertices):
"""
this returns True if the dot product between the desired diplacement and the lowest eigen vector after minimization satisfies dotproduct < 0.95.
this will result in trying the minimization procedure again.
var is the combined variables to be minimized over. It represents all the vertices and spring constants
var[:2*num_of_vertices] are the points
var[2*num_of_vertices:] are the spring constants
"""
#the square positions are fixed
new_var[:LITTLE_SQUARE.size] = LITTLE_SQUARE.flatten()
# var[:num_of_vertices] are the points of the lattice
# var[num_of_vertices:] are the spring constants
rigidityMatrix = makeRigidityMat(new_var[:2*num_of_vertices], edgeMat1=eMat1, edgeMat2=eMat2)[:, 3:]
#calculate the dynamical matrix
DynMat = makeDynamicalMat(RigidityMat= rigidityMatrix,
springK=new_var[2*num_of_vertices:], numOfVerts=num_of_vertices, numOfEdges=num_of_edges)
# minimize the energy subject to the constraint that the square displacements are fixed
res0 = op.minimize(energy, disp_field, method='Newton-CG', args=(DynMat, disp_field[:LITTLE_SQUARE.size - 3]), jac=energy_Der,
hess=energy_Hess, options={'xtol': 1e-8, 'disp': False})
lowestEigVector = normalizeVec(la.eigh(DynMat)[1][:LITTLE_SQUARE.size - 3,0])
secondEigVector = normalizeVec(la.eigh(DynMat)[1][:LITTLE_SQUARE.size - 3,1])
#return false if the vectors are not close enough
dotProduct = np.dot(lowestEigVector, normalizeVec(res0.x[:LITTLE_SQUARE.size - 3]))
lowestEigVector *= np.sign(dotProduct)
dotProduct *= np.sign(dotProduct)
gap = (lowestEigenVals(DynMat, 2)[1] - lowestEigenVals(DynMat, 2)[0])/lowestEigenVals(DynMat, 2)[0]
if((dotProduct < 0.995) or gap < 4):
print("dot produce: ", dotProduct, "\n")
print("square disps in lowest energy: ", normalizeVec(lowestEigVector[:LITTLE_SQUARE.size - 3]), "\n")
print("square disps in desired motion: ", normalizeVec(res0.x[:LITTLE_SQUARE.size - 3]), "\n")
print("eigenvalues: ", lowestEigenVals(DynMat, 5), "\n")
print("gap: ", gap, "\n")
print("trying again ... \n\n")
return True;
print("Number of edges: ", rigidityMatrix.shape[0], "\n")
print("energy: ", energy(normalizeVec(res0.x), DynMat, disp_field[:LITTLE_SQUARE.size - 3]), "\n")
print("eigenvalues: ", lowestEigenVals(DynMat, 5), "\n")
print("dot produce: ", dotProduct, "\n")
print("gap: ", gap, "\n")
print("square disps in lowest energy: ", lowestEigVector, "\n")
print("square disps in desired motion: ", normalizeVec(res0.x[:LITTLE_SQUARE.size - 3]), "\n")
print("square disps in next to lowest: ", normalizeVec(secondEigVector[:LITTLE_SQUARE.size - 3]), "\n")
#plotPoints(new_var[:2*num_of_vertices], num_of_vertices)
return False
#================================================================================================================================================
#================================================================================================================================================
# plots the points as a scatter plot
#================================================================================================================================================
def plotPoints(flattenedPoints, num_of_verts = -1):
"""
Takes in a list of point positions which is then reshaped into a list 2-vectors.
A different color and size is chosen for the original square vertices.
"""
if (num_of_verts < 0):
num_of_verts = flattenedPoints.size//2
#reshape the points to look like a list of vectors
Points = flattenedPoints.reshape(num_of_verts, 2)
#chose the area of the square vertices to be bigger
area = 200*np.ones(num_of_verts)
area[4:] *= 0.4
#also a different color for the square vertices
color = np.copy(area)
plt.scatter(Points[:,0], Points[:,1], s=area, c=color)
#================================================================================================================================================
| |
# -*- coding: utf-8 -*-
# Copyright 2015 Spanish National Research Council
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
import webob.exc
import ooi.api.base
import ooi.api.helpers
from ooi import exception
from ooi.occi.core import collection
from ooi.occi.infrastructure import compute
from ooi.occi.infrastructure import network
from ooi.occi.infrastructure import storage
from ooi.occi.infrastructure import storage_link
from ooi.occi import validator as occi_validator
from ooi.openstack import contextualization
from ooi.openstack import helpers
from ooi.openstack import network as os_network
from ooi.openstack import templates
def _create_network_link(addr, comp, net_id):
net = network.NetworkResource(title="network", id=net_id)
return os_network.OSNetworkInterface(comp, net,
addr["OS-EXT-IPS-MAC:mac_addr"],
addr["addr"])
class Controller(ooi.api.base.Controller):
def __init__(self, *args, **kwargs):
super(Controller, self).__init__(*args, **kwargs)
self.compute_actions = compute.ComputeResource.actions
self.os_helper = ooi.api.helpers.OpenStackHelper(
self.app,
self.openstack_version
)
def _get_compute_resources(self, servers):
occi_compute_resources = []
if servers:
for s in servers:
s = compute.ComputeResource(title=s["name"], id=s["id"])
occi_compute_resources.append(s)
return occi_compute_resources
def index(self, req):
servers = self.os_helper.index(req)
occi_compute_resources = self._get_compute_resources(servers)
return collection.Collection(resources=occi_compute_resources)
def run_action(self, req, id, body):
action = req.GET.get("action", None)
occi_actions = [a.term for a in compute.ComputeResource.actions]
if action is None or action not in occi_actions:
raise exception.InvalidAction(action=action)
parser = req.get_parser()(req.headers, req.body)
obj = parser.parse()
server = self.os_helper.get_server(req, id)
if action == "stop":
scheme = {"category": compute.stop}
elif action == "start":
scheme = {"category": compute.start}
if server["status"] == "SUSPENDED":
action = "resume"
elif server["status"] == "PAUSED":
action = "unpause"
elif action == "restart":
scheme = {"category": compute.restart}
elif action == "suspend":
scheme = {"category": compute.suspend}
else:
raise exception.NotImplemented
validator = occi_validator.Validator(obj)
validator.validate(scheme)
self.os_helper.run_action(req, action, id)
return []
def _build_block_mapping(self, req, obj):
mappings = []
for l in obj.get("links", {}).values():
if l["rel"] == storage.StorageResource.kind.type_id:
_, vol_id = ooi.api.helpers.get_id_with_kind(
req,
l.get("occi.core.target"),
storage.StorageResource.kind)
mapping = {
"source_type": "volume",
"uuid": vol_id,
"delete_on_termination": False,
}
try:
mapping['device_name'] = l['occi.storagelink.deviceid']
except KeyError:
pass
mappings.append(mapping)
# this needs to be there if we have a mapping
if mappings:
image = obj["schemes"][templates.OpenStackOSTemplate.scheme][0]
mappings.insert(0, {
"source_type": "image",
"destination_type": "local",
"boot_index": 0,
"delete_on_termination": True,
"uuid": image,
})
return mappings
def _get_network_from_req(self, req, obj):
networks = []
for l in obj.get("links", {}).values():
if l["rel"] == network.NetworkResource.kind.type_id:
_, net_id = ooi.api.helpers.get_id_with_kind(
req,
l.get("occi.core.target"),
network.NetworkResource.kind)
net = {'uuid': net_id}
networks.append(net)
return networks
def create(self, req, body):
parser = req.get_parser()(req.headers, req.body)
scheme = {
"category": compute.ComputeResource.kind,
"mixins": [
templates.OpenStackOSTemplate,
templates.OpenStackResourceTemplate,
],
"optional_mixins": [
contextualization.user_data,
contextualization.public_key,
],
"optional_links": [
storage.StorageResource.kind,
network.NetworkResource.kind,
]
}
obj = parser.parse()
validator = occi_validator.Validator(obj)
validator.validate(scheme)
attrs = obj.get("attributes", {})
name = attrs.get("occi.core.title", "OCCI_VM")
image = obj["schemes"][templates.OpenStackOSTemplate.scheme][0]
flavor = obj["schemes"][templates.OpenStackResourceTemplate.scheme][0]
user_data, key_name, key_data = None, None, None
create_key, create_key_tmp = False, False
if contextualization.user_data.scheme in obj["schemes"]:
user_data = attrs.get("org.openstack.compute.user_data")
if contextualization.public_key.scheme in obj["schemes"]:
key_name = attrs.get("org.openstack.credentials.publickey.name")
key_data = attrs.get("org.openstack.credentials.publickey.data")
if key_name and key_data:
create_key = True
elif not key_name and key_data:
# NOTE(orviz) To be occi-os compliant, not
# raise exception.MissingKeypairName
key_name = uuid.uuid4().hex
create_key = True
create_key_tmp = True
if create_key:
# add keypair: if key_name already exists, a 409 HTTP code
# will be returned by OpenStack
self.os_helper.keypair_create(req, key_name,
public_key=key_data)
block_device_mapping_v2 = self._build_block_mapping(req, obj)
networks = self._get_network_from_req(req, obj)
server = self.os_helper.create_server(
req,
name,
image,
flavor,
user_data=user_data,
key_name=key_name,
block_device_mapping_v2=block_device_mapping_v2,
networks=networks
)
# The returned JSON does not contain the server name
server["name"] = name
occi_compute_resources = self._get_compute_resources([server])
if create_key_tmp:
self.os_helper.keypair_delete(req, key_name)
return collection.Collection(resources=occi_compute_resources)
def show(self, req, id):
# get info from server
s = self.os_helper.get_server(req, id)
# get info from flavor
flavor = self.os_helper.get_flavor(req, s["flavor"]["id"])
res_tpl = templates.OpenStackResourceTemplate(flavor["id"],
flavor["name"],
flavor["vcpus"],
flavor["ram"],
flavor["disk"])
# get info from image
img_id = s["image"]["id"]
try:
image = self.os_helper.get_image(req, img_id)
except webob.exc.HTTPNotFound:
image = {
"id": img_id,
"name": "None (Image with ID '%s' not found)" % img_id,
}
os_tpl = templates.OpenStackOSTemplate(image["id"],
image["name"])
# build the compute object
comp = compute.ComputeResource(title=s["name"], id=s["id"],
cores=flavor["vcpus"],
hostname=s["name"],
memory=flavor["ram"],
state=helpers.vm_state(s["status"]),
mixins=[os_tpl, res_tpl])
# storage links
vols = self.os_helper.get_server_volumes_link(req, s["id"])
for v in vols:
st = storage.StorageResource(title="storage", id=v["volumeId"])
comp.add_link(storage_link.StorageLink(comp, st,
deviceid=v["device"]))
# network links
addresses = s.get("addresses", {})
if addresses:
for addr_set in addresses.values():
for addr in addr_set:
# TODO(jorgesece): add pool information
if addr["OS-EXT-IPS:type"] == "floating":
net_id = helpers.PUBLIC_NETWORK
else:
try:
net_id = self.os_helper.get_network_id(
req, addr['OS-EXT-IPS-MAC:mac_addr'], id
)
except webob.exc.HTTPNotFound:
net_id = "FIXED"
comp.add_link(_create_network_link(addr, comp, net_id))
return [comp]
def _get_server_floating_ips(self, req, server_id):
s = self.os_helper.get_server(req, server_id)
addresses = s.get("addresses", {})
floating_ips = []
if addresses:
for addr_set in addresses.values():
for addr in addr_set:
if addr["OS-EXT-IPS:type"] == "floating":
floating_ips.append(addr["addr"])
return floating_ips
def _release_floating_ips(self, req, server_id):
server_ips = self._get_server_floating_ips(req, server_id)
if server_ips:
floating_ips = self.os_helper.get_floating_ips(req)
for server_ip in server_ips:
for ip in floating_ips:
if server_ip == ip["ip"]:
self.os_helper.remove_floating_ip(req, server_id,
ip["ip"])
self.os_helper.release_floating_ip(req, ip["id"])
def _delete(self, req, server_ids):
for server_id in server_ids:
self._release_floating_ips(req, server_id)
self.os_helper.delete(req, server_id)
return []
def delete(self, req, id):
return self._delete(req, [id])
def delete_all(self, req):
ids = [s["id"] for s in self.os_helper.index(req)]
return self._delete(req, ids)
| |
#!/usr/bin/env python
#
# $Id: l10n-report.py 1132657 2011-06-06 14:23:36Z julianfoad $
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
#
"""Usage: l10n-report.py [OPTION...]
Send the l10n translation status report to an email address. If the
email address is not specified, print in stdout.
Options:
-h, --help Show this help message.
-m, --to-email-id Send the l10n translation status report to this
email address.
"""
import sys
import getopt
import os
import re
import subprocess
FROM_ADDRESS = "Subversion Translation Status <noreply@subversion.apache.org>"
LIST_ADDRESS = "dev@subversion.apache.org"
SUBJECT_TEMPLATE = "[l10n] Translation status report for %s r%s"
MAIL_THREAD_ID = '<translation_status_report_for_%s@subversion.apache.org>'
def _rev():
dollar = "$Revision: 1132657 $"
return int(re.findall('[0-9]+', dollar)[0]);
def usage_and_exit(errmsg=None):
"""Print a usage message, plus an ERRMSG (if provided), then exit.
If ERRMSG is provided, the usage message is printed to stderr and
the script exits with a non-zero error code. Otherwise, the usage
message goes to stdout, and the script exits with a zero
errorcode."""
if errmsg is None:
stream = sys.stdout
else:
stream = sys.stderr
stream.write("%s\n" % __doc__)
stream.flush()
if errmsg:
stream.write("\nError: %s\n" % errmsg)
stream.flush()
sys.exit(2)
sys.exit(0)
class l10nReport:
def __init__(self, to_email_id=""):
self.to_email_id = to_email_id
self.from_email_id = "<%s>" % LIST_ADDRESS
def safe_command(self, cmd_and_args, cmd_in=""):
[stdout, stderr] = subprocess.Popen(cmd_and_args, \
stdin=subprocess.PIPE, \
stdout=subprocess.PIPE, \
stderr=subprocess.PIPE).communicate(input=cmd_in)
return stdout, stderr
def match(self, pattern, string):
if isinstance(pattern, basestring):
pattern = re.compile(pattern)
match = re.compile(pattern).search(string)
if match and match.groups():
return match.group(1)
else:
return None
def get_msgattribs(self, file):
msgout = self.safe_command(['msgattrib', '--translated', file])[0]
grepout = self.safe_command(['grep', '-E', '^msgid *"'], msgout)[0]
sedout = self.safe_command(['sed', '1d'], grepout)[0]
trans = self.safe_command(['wc', '-l'], sedout)[0]
msgout = self.safe_command(['msgattrib', '--untranslated', file])[0]
grepout = self.safe_command(['grep', '-E', '^msgid *"'], msgout)[0]
sedout = self.safe_command(['sed', '1d'], grepout)[0]
untrans = self.safe_command(['wc', '-l'], sedout)[0]
msgout = self.safe_command(['msgattrib', '--only-fuzzy', file])[0]
grepout = self.safe_command(['grep', '-E', '^msgid *"'], msgout)[0]
sedout = self.safe_command(['sed', '1d'], grepout)[0]
fuzzy = self.safe_command(['wc', '-l'], sedout)[0]
msgout = self.safe_command(['msgattrib', '--only-obsolete', file])[0]
grepout = self.safe_command(['grep', '-E', '^#~ msgid *"'], msgout)[0]
obsolete = self.safe_command(['wc', '-l'], grepout)[0]
return int(trans), int(untrans), int(fuzzy), int(obsolete)
def pre_l10n_report(self):
# svn revert --recursive subversion/po
cmd = ['svn', 'revert', '--recursive', 'subversion/po']
stderr = self.safe_command(cmd)[1]
if stderr:
sys.stderr.write("\nError: %s\n" % stderr)
sys.stderr.flush()
sys.exit(0)
# svn update
cmd = ['svn', 'update']
stderr = self.safe_command(cmd)[1]
if stderr:
sys.stderr.write("\nError: %s\n" % stderr)
sys.stderr.flush()
sys.exit(0)
# tools/po/po-update.sh
cmd = ['sh', 'tools/po/po-update.sh']
self.safe_command(cmd)
def bar_graph(nominal_length, trans, untrans, fuzzy, obsolete):
"""Format the given four counts into a bar graph string in which the
total length of the bars representing the TRANS, UNTRANS and FUZZY
counts is NOMINAL_LENGTH characters, and the bar representing the
OBSOLETE count extends beyond that."""
total_count = trans + untrans + fuzzy # don't include 'obsolete'
accum_bar = 0
accum_count = 0
s = ''
for count, letter in [(trans, '+'), (untrans, 'U'), (fuzzy, '~'),
(obsolete, 'o')]:
accum_count += count
new_bar_end = nominal_length * accum_count / total_count
s += letter * (new_bar_end - accum_bar)
accum_bar = new_bar_end
return s
def main():
# Parse the command-line options and arguments.
try:
opts, args = getopt.gnu_getopt(sys.argv[1:], "hm:",
["help",
"to-email-id=",
])
except getopt.GetoptError, msg:
usage_and_exit(msg)
to_email_id = None
for opt, arg in opts:
if opt in ("-h", "--help"):
usage_and_exit()
elif opt in ("-m", "--to-email-id"):
to_email_id = arg
l10n = l10nReport()
os.chdir("%s/../.." % os.path.dirname(os.path.abspath(sys.argv[0])))
l10n.pre_l10n_report()
[info_out, info_err] = l10n.safe_command(['svn', 'info'])
if info_err:
sys.stderr.write("\nError: %s\n" % info_err)
sys.stderr.flush()
sys.exit(0)
po_dir = 'subversion/po'
branch_name = l10n.match('URL:.*/asf/subversion/(\S+)', info_out)
[info_out, info_err] = l10n.safe_command(['svnversion', po_dir])
if info_err:
sys.stderr.write("\nError: %s\n" % info_err)
sys.stderr.flush()
sys.exit(0)
wc_version = re.sub('[MS]', '', info_out.strip())
title = "Translation status report for %s@r%s" % \
(branch_name, wc_version)
os.chdir(po_dir)
files = sorted(os.listdir('.'))
format_head = "\n%6s %7s %7s %7s %7s" % ("lang", "trans", "untrans",
"fuzzy", "obs")
format_line = "--------------------------------------"
print("\n%s\n%s\n%s" % (title, format_head, format_line))
body = ""
po_pattern = re.compile('(.*).po$')
for file in files:
lang = l10n.match(po_pattern, file)
if not lang:
continue
[trans, untrans, fuzzy, obsolete] = l10n.get_msgattribs(file)
po_format = "%6s %7d %7d %7d %7d" %\
(lang, trans, untrans, fuzzy, obsolete)
po_format += " " + bar_graph(30, trans, untrans, fuzzy, obsolete)
body += "%s\n" % po_format
print(po_format)
if to_email_id:
import smtplib
# Ensure compatibility of the email module all the way to Python 2.3
try:
from email.message import Message
except ImportError:
from email.Message import Message
msg = Message()
msg["From"] = FROM_ADDRESS
msg["To"] = to_email_id
msg["Subject"] = SUBJECT_TEMPLATE % (branch_name, wc_version)
msg["X-Mailer"] = "l10n-report.py r%s" % _rev()
msg["Reply-To"] = LIST_ADDRESS
msg["Mail-Followup-To"] = LIST_ADDRESS
msg["In-Reply-To"] = MAIL_THREAD_ID % (branch_name.replace('/', '_'))
msg["References"] = msg["In-Reply-To"]
# http://www.iana.org/assignments/auto-submitted-keywords/auto-submitted-keywords.xhtml
msg["Auto-Submitted"] = 'auto-generated'
msg.set_type("text/plain")
msg.set_payload("\n".join((title, format_head, format_line, body)))
server = smtplib.SMTP('localhost')
server.sendmail("From: " + FROM_ADDRESS,
"To: " + to_email_id,
msg.as_string())
print("The report is sent to '%s' email id." % to_email_id)
else:
print("\nYou have not passed '-m' option, so email is not sent.")
if __name__ == "__main__":
main()
| |
from datetime import date, time, datetime
from django.conf import settings
from django.db.models.fields import NOT_PROVIDED
from django.db.models.sql import aggregates as sqlaggregates
from django.db.models.sql.compiler import SQLCompiler
from django.db.models.sql.constants import LOOKUP_SEP, MULTI, SINGLE
from django.db.models.sql.where import AND, OR
from django.db.utils import DatabaseError, IntegrityError
from django.utils.tree import Node
import random
EMULATED_OPS = {
'exact': lambda x, y: y in x if isinstance(x, (list,tuple)) else x == y,
'iexact': lambda x, y: x.lower() == y.lower(),
'startswith': lambda x, y: x.startswith(y),
'istartswith': lambda x, y: x.lower().startswith(y.lower()),
'isnull': lambda x, y: x is None if y else x is not None,
'in': lambda x, y: x in y,
'lt': lambda x, y: x < y,
'lte': lambda x, y: x <= y,
'gt': lambda x, y: x > y,
'gte': lambda x, y: x >= y,
}
class NonrelQuery(object):
# ----------------------------------------------
# Public API
# ----------------------------------------------
def __init__(self, compiler, fields):
self.fields = fields
self.compiler = compiler
self.connection = compiler.connection
self.query = self.compiler.query
self._negated = False
def fetch(self, low_mark=0, high_mark=None):
raise NotImplementedError('Not implemented')
def count(self, limit=None):
raise NotImplementedError('Not implemented')
def delete(self):
raise NotImplementedError('Not implemented')
def order_by(self, ordering):
raise NotImplementedError('Not implemented')
# Used by add_filters()
def add_filter(self, column, lookup_type, negated, db_type, value):
raise NotImplementedError('Not implemented')
# This is just a default implementation. You might want to override this
# in case your backend supports OR queries
def add_filters(self, filters):
"""Traverses the given Where tree and adds the filters to this query"""
if filters.negated:
self._negated = not self._negated
if not self._negated and filters.connector != AND:
raise DatabaseError('Only AND filters are supported')
# Remove unneeded children from tree
children = self._get_children(filters.children)
if self._negated and filters.connector != OR and len(children) > 1:
raise DatabaseError("When negating a whole filter subgroup "
"(e.g., a Q object) the subgroup filters must "
"be connected via OR, so the non-relational "
"backend can convert them like this: "
'"not (a OR b) => (not a) AND (not b)".')
for child in children:
if isinstance(child, Node):
self.add_filters(child)
continue
column, lookup_type, db_type, value = self._decode_child(child)
self.add_filter(column, lookup_type, self._negated, db_type, value)
if filters.negated:
self._negated = not self._negated
# ----------------------------------------------
# Internal API for reuse by subclasses
# ----------------------------------------------
def _decode_child(self, child):
constraint, lookup_type, annotation, value = child
packed, value = constraint.process(lookup_type, value, self.connection)
alias, column, db_type = packed
if alias and alias != self.query.model._meta.db_table:
raise DatabaseError("This database doesn't support JOINs "
"and multi-table inheritance.")
value = self._normalize_lookup_value(value, annotation, lookup_type)
return column, lookup_type, db_type, value
def _normalize_lookup_value(self, value, annotation, lookup_type):
# Django fields always return a list (see Field.get_db_prep_lookup)
# except if get_db_prep_lookup got overridden by a subclass
if lookup_type not in ('in', 'range', 'year') and isinstance(value, (tuple, list)):
if len(value) > 1:
raise DatabaseError('Filter lookup type was: %s. Expected the '
'filters value not to be a list. Only "in"-filters '
'can be used with lists.'
% lookup_type)
elif lookup_type == 'isnull':
value = annotation
else:
value = value[0]
if isinstance(value, unicode):
value = unicode(value)
elif isinstance(value, str):
value = str(value)
if lookup_type in ('startswith', 'istartswith'):
value = value[:-1]
elif lookup_type in ('endswith', 'iendswith'):
value = value[1:]
elif lookup_type in ('contains', 'icontains'):
value = value[1:-1]
return value
def _get_children(self, children):
# Filter out nodes that were automatically added by sql.Query, but are
# not necessary with emulated negation handling code
result = []
for child in children:
if isinstance(child, tuple):
constraint = child[0]
lookup_type = child[1]
if lookup_type == 'isnull' and constraint.field is None:
continue
result.append(child)
return result
def _matches_filters(self, entity, filters):
# Filters without rules match everything
if not filters.children:
return True
result = filters.connector == AND
for child in filters.children:
if isinstance(child, Node):
submatch = self._matches_filters(entity, child)
else:
constraint, lookup_type, annotation, value = child
packed, value = constraint.process(lookup_type, value, self.connection)
alias, column, db_type = packed
if alias != self.query.model._meta.db_table:
raise DatabaseError("This database doesn't support JOINs "
"and multi-table inheritance.")
# Django fields always return a list (see Field.get_db_prep_lookup)
# except if get_db_prep_lookup got overridden by a subclass
if lookup_type != 'in' and isinstance(value, (tuple, list)):
if len(value) > 1:
raise DatabaseError('Filter lookup type was: %s. '
'Expected the filters value not to be a list. '
'Only "in"-filters can be used with lists.'
% lookup_type)
elif lookup_type == 'isnull':
value = annotation
else:
value = value[0]
if entity[column] is None:
if isinstance(value, (datetime, date, time)):
submatch = lookup_type in ('lt', 'lte')
elif lookup_type in ('startswith', 'contains', 'endswith', 'iexact',
'istartswith', 'icontains', 'iendswith'):
submatch = False
else:
submatch = EMULATED_OPS[lookup_type](entity[column], value)
else:
submatch = EMULATED_OPS[lookup_type](entity[column], value)
if filters.connector == OR and submatch:
result = True
break
elif filters.connector == AND and not submatch:
result = False
break
if filters.negated:
return not result
return result
def _order_in_memory(self, lhs, rhs):
for column, descending in self.compiler._get_ordering():
result = cmp(lhs.get(column), rhs.get(column))
if descending:
result *= -1
if result != 0:
return result
return 0
def convert_value_from_db(self, db_type, value):
return self.compiler.convert_value_from_db(db_type, value)
def convert_value_for_db(self, db_type, value):
return self.compiler.convert_value_for_db(db_type, value)
class NonrelCompiler(SQLCompiler):
"""
Base class for non-relational compilers. Provides in-memory filter matching
and ordering. Entities are assumed to be dictionaries where the keys are
column names.
"""
# ----------------------------------------------
# Public API
# ----------------------------------------------
def results_iter(self):
"""
Returns an iterator over the results from executing this query.
"""
self.check_query()
fields = self.get_fields()
low_mark = self.query.low_mark
high_mark = self.query.high_mark
for entity in self.build_query(fields).fetch(low_mark, high_mark):
yield self._make_result(entity, fields)
def has_results(self):
return self.get_count(check_exists=True)
def execute_sql(self, result_type=MULTI):
"""
Handles aggregate/count queries
"""
aggregates = self.query.aggregate_select.values()
# Simulate a count()
if aggregates:
assert len(aggregates) == 1
aggregate = aggregates[0]
assert isinstance(aggregate, sqlaggregates.Count)
meta = self.query.get_meta()
assert aggregate.col == '*' or aggregate.col == (meta.db_table, meta.pk.column)
count = self.get_count()
if result_type is SINGLE:
return [count]
elif result_type is MULTI:
return [[count]]
raise NotImplementedError('The database backend only supports count() queries')
# ----------------------------------------------
# Additional NonrelCompiler API
# ----------------------------------------------
def _make_result(self, entity, fields):
result = []
for field in fields:
value = entity.get(field.column, NOT_PROVIDED)
if value is NOT_PROVIDED:
value = field.get_default()
else:
value = self.convert_value_from_db(field.db_type(connection=self.connection), value)
if value is None and not field.null:
raise IntegrityError("Non-nullable field %s can't be None!" % field.name)
result.append(value)
return result
def check_query(self):
if (len([a for a in self.query.alias_map if self.query.alias_refcount[a]]) > 1
or self.query.distinct or self.query.extra or self.query.having):
raise DatabaseError('This query is not supported by the database.')
def get_count(self, check_exists=False):
"""
Counts matches using the current filter constraints.
"""
if check_exists:
high_mark = 1
else:
high_mark = self.query.high_mark
return self.build_query().count(high_mark)
def build_query(self, fields=None):
if fields is None:
fields = self.get_fields()
query = self.query_class(self, fields)
query.add_filters(self.query.where)
query.order_by(self._get_ordering())
# This at least satisfies the most basic unit tests
if settings.DEBUG:
self.connection.queries.append({'sql': repr(query)})
return query
def get_fields(self):
"""
Returns the fields which should get loaded from the backend by self.query
"""
# We only set this up here because
# related_select_fields isn't populated until
# execute_sql() has been called.
if self.query.select_fields:
fields = self.query.select_fields + self.query.related_select_fields
else:
fields = self.query.model._meta.fields
# If the field was deferred, exclude it from being passed
# into `resolve_columns` because it wasn't selected.
only_load = self.deferred_to_columns()
if only_load:
db_table = self.query.model._meta.db_table
only_load = dict((k, v) for k, v in only_load.items()
if v or k == db_table)
if len(only_load.keys()) > 1:
raise DatabaseError('Multi-table inheritance is not supported '
'by non-relational DBs.' + repr(only_load))
fields = [f for f in fields if db_table in only_load and
f.column in only_load[db_table]]
query_model = self.query.model
if query_model._meta.proxy:
query_model = query_model._meta.proxy_for_model
for field in fields:
if field.model._meta != query_model._meta:
raise DatabaseError('Multi-table inheritance is not supported '
'by non-relational DBs.')
return fields
def _get_ordering(self):
opts = self.query.get_meta()
if not self.query.default_ordering:
ordering = self.query.order_by
else:
ordering = self.query.order_by or opts.ordering
for order in ordering:
if LOOKUP_SEP in order:
raise DatabaseError("Ordering can't span tables on non-relational backends (%s)" % order)
if order == '?':
raise DatabaseError("Randomized ordering isn't supported by the backend")
order = order.lstrip('+')
descending = order.startswith('-')
field = order.lstrip('-')
if field == 'pk':
field = opts.pk.name
if not self.query.standard_ordering:
descending = not descending
yield (opts.get_field(field).column, descending)
class NonrelInsertCompiler(object):
def execute_sql(self, return_id=False):
data = {}
for (field, value), column in zip(self.query.values, self.query.columns):
if field is not None:
if not field.null and value is None:
raise IntegrityError("You can't set %s (a non-nullable "
"field) to None!" % field.name)
db_type = field.db_type(connection=self.connection)
value = self.convert_value_for_db(db_type, value)
data[column] = value
return self.insert(data, return_id=return_id)
def insert(self, values, return_id):
"""
:param values: The model object as a list of (column, value) pairs
:param return_id: Whether to return the id of the newly created entity
"""
raise NotImplementedError
class NonrelUpdateCompiler(object):
def execute_sql(self, result_type):
values = []
for field, _, value in self.query.values:
if hasattr(value, 'prepare_database_save'):
value = value.prepare_database_save(field)
else:
value = field.get_db_prep_save(value, connection=self.connection)
value = self.convert_value_for_db(
field.db_type(connection=self.connection),
value
)
values.append((field, value))
return self.update(values)
def update(self, values):
"""
:param values: A list of (field, new-value) pairs
"""
raise NotImplementedError
class NonrelDeleteCompiler(object):
def execute_sql(self, result_type=MULTI):
self.build_query([self.query.get_meta().pk]).delete()
| |
import os
import re
import utility
DATE_PATTERN_1='(?P<month1>\d\d)[/-_.](?P<day1>\d\d)[/-_.](?P<year1>\d\d(\d\d)?)'
DATE_PATTERN_2='(?P<year2>\d\d\d\d)[/-_.](?P<month2>\d\d)[/-_.](?P<day2>\d\d)'
FILE_PATTERN_BY_EPISODE='^.*s(?P<season>\d+)[-_x\.\ ]?e(?P<episode>\d+).*$'
FILE_PATTERN_BY_EPISODE_FOLDERS='^.*/+season[-_.\ ]*(?P<season>\d+)/+(episode[-_.\ ]*)*(?P<episode>\d+).*$'
FILE_PATTERN_BY_DATE='^.*\D(' + DATE_PATTERN_1 + '|' + DATE_PATTERN_2 + ')\D.*$'
SERIES_TITLE_SEPARATOR_PATTERN='[^\\\/]+'
SERIES_TITLE_PATTERN='^.*%s.*$'
MOVIE_NAME_FILE_PATTERN='(?P<name1>.+)(\(\d+\)).*'
MOVIE_YEAR_FILE_PATTERN='.*\((?P<year>\d+)\).*'
MOVIE_DISC_FILE_PATTERN='.*(?:(?:disc|dvd)[\ _\-\.]*(?P<discnum>[\d]+)).*'
MOVIE_IMDB_FILE_PATTERN='.*(?:\[(?P<imdbid>\d+)\]).*'
class MovieMatcher():
def __init__(self, moviedb, database, config, debug):
self.moviedb = moviedb
self.database = database
self.config = config
self.debug = debug
self.name_re = re.compile(MOVIE_NAME_FILE_PATTERN, re.IGNORECASE)
self.year_re = re.compile(MOVIE_YEAR_FILE_PATTERN, re.IGNORECASE)
self.disc_re = re.compile(MOVIE_DISC_FILE_PATTERN, re.IGNORECASE)
self.imdb_re = re.compile(MOVIE_IMDB_FILE_PATTERN, re.IGNORECASE)
def match(self, movie_file_path):
(file_path, file_name) = os.path.split(movie_file_path)
movie = None
# if there is disc information embedded in the name,
# we want to know about it no matter which way we match
disc_num = self.get_disc(file_name)
# see if the file name has an imdb id...the fastest of matches
imdb_id = self.get_imdb_id(file_name)
if imdb_id is not None:
movie = self.database.get_movie(imdb_id)
if movie is not None:
self.database.add_movie(movie)
# if that didn't work, try to harvest the name and year via
# regular expressions and do an imdb lookup with the info
if movie is None:
name = self.get_name(file_name)
year = self.get_year(file_name)
if name is not None:
to_lookup = name.strip()
if year is not None:
to_lookup += " (%i)" % (year, )
movie = self.moviedb.lookup_movie(name)
if movie is not None:
self.database.add_movie(movie)
# if all of our matching magic fails, let imdb try to figure
# it out from the file name (with the ext removed)
if movie is None:
(file, extension) = utility.split_file_name(file_name)
movie = self.moviedb.lookup_movie(name)
if movie is not None:
self.database.add_movie(movie)
if movie is None:
return None
else:
return (file_name, movie, disc_num)
def get_name(self, file_name):
match = self.name_re.match(file_name)
if match:
return match.group('name1')
else:
return None
def get_year(self, file_name):
match = self.year_re.match(file_name)
if match:
return int(match.group('year'))
else:
return None
def get_disc(self, file_name):
match = self.disc_re.match(file_name)
if match:
return int(match.group('discnum'))
else:
return None
def get_imdb_id(self, file_name):
match = self.imdb_re.match(file_name)
if match:
return int(match.group('imdbid'))
else:
return None
class EpisodeMatch():
def __init__(self, file_name, series, debug, season_number, episode_number):
self.file_name = file_name
self.series = series
self.debug = debug
self.season_number = season_number
self.episode_number = episode_number
def get_episode_metadata(self, database, thetvdb):
episode = database.get_episode(self.series.id, self.season_number, self.episode_number)
if episode is None:
episode = thetvdb.get_specific_episode(self.series, self.season_number, self.episode_number)
if episode is None:
if self.debug:
print "Season %i episode %i of series '%s' does not exist.\n" % (self.season_number, self.episode_number, self.series.title)
return None
else:
database.add_episode(episode, self.series)
return episode
class DateMatch():
def __init__(self, file_name, series, debug, year, month, day):
self.file_name = file_name
self.series = series
self.debug = debug
self.year = year
self.month = month
self.day = day
def get_episode_metadata(self, database, thetvdb):
episode = database.get_episode_by_date(self.series.id, self.year, self.month, self.day)
if episode is None:
episode = thetvdb.get_specific_episode_by_date(self.series, self.year, self.month, self.day)
if episode is None:
if self.debug:
print "No episode of series '%s' was originally aired on %i-%i-%i.\n" % (self.series.title, self.year, self.month, self.day)
return None
else:
database.add_episode(episode, self.series)
return episode
class SeriesMatcher():
def __init__(self, config, series, debug):
self.config = config
self.series = series
self.debug = debug
series_title_pattern = self.build_series_title_pattern()
self.series_title_re = self.compile(series_title_pattern)
self.episode_re = self.compile(FILE_PATTERN_BY_EPISODE)
self.episode_by_folder_re = self.compile(FILE_PATTERN_BY_EPISODE_FOLDERS)
self.episode_by_date_re = self.compile(FILE_PATTERN_BY_DATE)
def compile(self, pattern):
return re.compile(pattern, re.IGNORECASE)
def build_series_title_pattern(self):
filtered_title = self.series.title
chars_to_ignore = self.config.getTitleCharsToIgnore()
words_to_ignore = self.config.getTitleWordsToIgnore()
for c in chars_to_ignore:
filtered_title = filtered_title.replace(c, ' ')
split_title = [w.strip().lower() for w in filtered_title.split(' ')]
split_filtered_title = []
for tword in split_title:
if not tword in words_to_ignore:
split_filtered_title.append(tword)
series_name_pattern = SERIES_TITLE_SEPARATOR_PATTERN.join(split_filtered_title)
return SERIES_TITLE_PATTERN % (series_name_pattern, )
def matches_series_title(self, file_path):
match = self.series_title_re.match(file_path)
if match:
return True
else:
return False
def match_episode(self, file_path):
#return a list of matches
to_return = []
(dir_name, file_name) = os.path.split(file_path)
dir_name=dir_name.replace('\\', '/')
converted_file_name = dir_name + '/' + file_name
# First try to match by season and episode number
match = self.episode_re.match(file_name)
# If we don't match the first episode pattern, try the folder version
if not match:
match = self.episode_by_folder_re.match(converted_file_name)
if match:
season_num = int(match.group('season'))
episode_num = int(match.group('episode'))
to_return.append(EpisodeMatch(file_path, self.series, self.debug, season_num, episode_num))
# If that fails to match, try matching by date
match = self.episode_by_date_re.match(file_name)
if match:
if not match.group('year1') is None:
year = self.get_four_digit_year(int(match.group('year1')))
month = int(match.group('month1'))
day = int(match.group('day1'))
else:
year = self.get_four_digit_year(int(match.group('year2')))
month = int(match.group('month2'))
day = int(match.group('day2'))
to_return.append(DateMatch(file_path, self.series, self.debug, year, month, day))
return to_return
def get_four_digit_year(self, raw_year):
if raw_year > 99:
return raw_year
elif raw_year > 40:
return raw_year + 1900
else:
return raw_year + 2000
| |
from rpython.rlib.rstring import StringBuilder
from rpython.rlib.runicode import unicode_encode_utf_8
from rpython.rlib.objectmodel import specialize
from rpython.rlib.parsing.ebnfparse import parse_ebnf, make_parse_function
from rpython.rlib.parsing.tree import Symbol, Nonterminal, RPythonVisitor
from rpython.tool.pairtype import extendabletype
# Union-Object to represent a json structure in a static way
class JsonBase(object):
__metaclass__ = extendabletype
is_string = is_int = is_float = is_bool = is_object = is_array = is_null = False
def __init__(self):
raise NotImplementedError("abstract base class")
def tostring(self):
raise NotImplementedError("abstract base class")
def is_primitive(self):
return False
def _unpack_deep(self):
"NON_RPYTHON"
def value_array(self):
raise TypeError
def value_object(self):
raise TypeError
def value_string(self):
raise TypeError
def value_float(self):
raise TypeError
def value_int(self):
raise TypeError
class JsonPrimitive(JsonBase):
def __init__(self):
pass
def is_primitive(self):
return True
class JsonNull(JsonPrimitive):
is_null = True
def tostring(self):
return "null"
def _unpack_deep(self):
return None
class JsonFalse(JsonPrimitive):
is_bool = True
def tostring(self):
return "false"
def _unpack_deep(self):
return False
class JsonTrue(JsonPrimitive):
is_bool = True
def tostring(self):
return "true"
def _unpack_deep(self):
return True
class JsonInt(JsonPrimitive):
is_int = True
def __init__(self, value):
self.value = value
def tostring(self):
return str(self.value)
def _unpack_deep(self):
return self.value
def value_int(self):
return self.value
class JsonFloat(JsonPrimitive):
is_float = True
def __init__(self, value):
self.value = value
def tostring(self):
return str(self.value)
def value_float(self):
return self.value
def _unpack_deep(self):
return self.value
class JsonString(JsonPrimitive):
is_string = True
def __init__(self, value):
self.value = value
def tostring(self):
# this function should really live in a slightly more accessible place
from pypy.objspace.std.bytesobject import string_escape_encode
return string_escape_encode(self.value, '"')
def _unpack_deep(self):
return self.value
def value_string(self):
return self.value
class JsonObject(JsonBase):
is_object = True
def __init__(self, dct):
self.value = dct
def tostring(self):
return "{%s}" % ", ".join(["\"%s\": %s" % (key, self.value[key].tostring()) for key in self.value])
def _unpack_deep(self):
result = {}
for key, value in self.value.iteritems():
result[key] = value._unpack_deep()
return result
def value_object(self):
return self.value
class JsonArray(JsonBase):
is_array = True
def __init__(self, lst):
self.value = lst
def tostring(self):
return "[%s]" % ", ".join([e.tostring() for e in self.value])
def _unpack_deep(self):
return [e._unpack_deep() for e in self.value]
def value_array(self):
return self.value
json_null = JsonNull()
json_true = JsonTrue()
json_false = JsonFalse()
class FakeSpace(object):
w_None = json_null
w_True = json_true
w_False = json_false
w_ValueError = ValueError
w_UnicodeDecodeError = UnicodeDecodeError
w_UnicodeEncodeError = UnicodeEncodeError
w_int = JsonInt
w_float = JsonFloat
def newtuple(self, items):
return None
def newdict(self):
return JsonObject({})
def newlist(self, items):
return JsonArray([])
def call_method(self, obj, name, arg):
assert name == 'append'
assert isinstance(obj, JsonArray)
obj.value.append(arg)
call_method._dont_inline_ = True
def call_function(self, w_func, *args_w):
assert 0
def setitem(self, d, key, value):
assert isinstance(d, JsonObject)
assert isinstance(key, JsonString)
d.value[key.value_string()] = value
def wrapunicode(self, x):
return JsonString(unicode_encode_utf_8(x, len(x), "strict"))
def wrapint(self, x):
return JsonInt(x)
def wrapfloat(self, x):
return JsonFloat(x)
def wrap(self, x):
if isinstance(x, int):
return JsonInt(x)
elif isinstance(x, float):
return JsonFloat(x)
return self.wrapunicode(unicode(x))
wrap._annspecialcase_ = "specialize:argtype(1)"
fakespace = FakeSpace()
from pypy.module._pypyjson.interp_decoder import JSONDecoder
class OwnJSONDecoder(JSONDecoder):
def __init__(self, s):
self.space = fakespace
self.s = s
# we put our string in a raw buffer so:
# 1) we automatically get the '\0' sentinel at the end of the string,
# which means that we never have to check for the "end of string"
self.ll_chars = s + chr(0)
self.pos = 0
self.last_type = 0
def close(self):
pass
@specialize.arg(1)
def _raise(self, msg, *args):
raise ValueError(msg % args)
def decode_float(self, i):
start = i
while self.ll_chars[i] in "+-0123456789.eE":
i += 1
self.pos = i
return self.space.wrap(float(self.getslice(start, i)))
def decode_string(self, i):
start = i
while True:
# this loop is a fast path for strings which do not contain escape
# characters
ch = self.ll_chars[i]
i += 1
if ch == '"':
content_utf8 = self.getslice(start, i-1)
self.last_type = 1
self.pos = i
return JsonString(content_utf8)
elif ch == '\\':
content_so_far = self.getslice(start, i-1)
self.pos = i-1
return self.decode_string_escaped(start, content_so_far)
elif ch < '\x20':
self._raise("Invalid control character at char %d", self.pos-1)
def loads(s):
decoder = OwnJSONDecoder(s)
try:
w_res = decoder.decode_any(0)
i = decoder.skip_whitespace(decoder.pos)
if i < len(s):
start = i
end = len(s) - 1
raise ValueError("Extra data: char %d - %d" % (start, end))
return w_res
finally:
decoder.close()
| |
"""
Generate Gauss-Patterson quadrature abscissa and weights.
Example usage
-------------
With increasing order::
>>> distribution = chaospy.Beta(2, 2, lower=-1, upper=1)
>>> for order in range(3):
... abscissas, weights = chaospy.generate_quadrature(
... order, distribution, rule="patterson")
... print(abscissas.round(2), weights.round(2))
[[0.]] [1.]
[[-0.77 0. 0.77]] [0.17 0.67 0.17]
[[-0.96 -0.77 -0.43 0. 0.43 0.77 0.96]] [0.01 0.08 0.24 0.34 0.24 0.08 0.01]
"""
try:
from functools import lru_cache
except ImportError: # pragma: no cover
from functools32 import lru_cache
import numpy
import chaospy
from .hypercube import hypercube_quadrature
def patterson(order, domain):
"""
Generate Gauss-Patterson quadrature abscissa and weights.
The Gauss-Patterson quadrature is a nested family which begins with the
Gauss-Legendre rules of orders 1 and 3, and then successively inserts one
new abscissa in each subinterval. Thus, after the second rule, the
Gauss-Patterson rules do not have the super-high precision of the
Gauss-Legendre rules. They trade this precision in exchange for the
advantages of nestedness. This means that Gauss-Patterson rules are only
available for orders of 0, 2, 6, 14, 30, 62, 126, 254 or 510.
Args:
order (int):
The quadrature order. Must be in the interval (0, 8).
domain (chaospy.Distribution, numpy.ndarray):
Either distribution or bounding of interval to integrate over.
Returns:
abscissas: (numpy.ndarray):
The quadrature points for where to evaluate the model function
with ``abscissas.shape == (len(dist), N)`` where ``N`` is the
number of samples.
weights: (numpy.ndarray):
The quadrature weights with ``weights.shape == (N,)``.
Notes:
This code is an adapted of John Burkardt's implementation in Fortran.
The algorithm is taken from "Handbook of Computational Methods for
Integration":cite:`kythe_handbook_2004`, and "The Optimal Addition of
Points to Quadrature Formulae":cite:`patterson_optimum_1968`.
Example:
>>> distribution = chaospy.Iid(chaospy.Uniform(0, 1), 2)
>>> abscissas, weights = chaospy.quadrature.patterson(1, distribution)
>>> abscissas.round(3)
array([[0.113, 0.113, 0.113, 0.5 , 0.5 , 0.5 , 0.887, 0.887, 0.887],
[0.113, 0.5 , 0.887, 0.113, 0.5 , 0.887, 0.113, 0.5 , 0.887]])
>>> weights.round(3)
array([0.077, 0.123, 0.077, 0.123, 0.198, 0.123, 0.077, 0.123, 0.077])
"""
abscissas, weights = hypercube_quadrature(
patterson_simple,
order=int(order),
domain=domain,
)
return abscissas, weights
@lru_cache(None)
def patterson_simple(order):
assert order < 9
abscissas, weights = PATTERSON_VALUES[int(order)]
return numpy.array(abscissas)*0.5+0.5, numpy.array(weights)/2.
PATTERSON_VALUES = {
0 : ((0e+00,), (2.0e+00,)),
1 : ((
-0.77459666924148337704e+00, 0.0e+00, 0.77459666924148337704e+00,
), (
0.555555555555555555556e+00, 0.888888888888888888889e+00,
0.555555555555555555556e+00,
)),
2 : ((
-0.96049126870802028342e+00, -0.77459666924148337704e+00,
-0.43424374934680255800e+00, 0.0e+00, 0.43424374934680255800e+00,
0.77459666924148337704e+00, 0.96049126870802028342e+00,
), (
0.104656226026467265194e+00, 0.268488089868333440729e+00,
0.401397414775962222905e+00, 0.450916538658474142345e+00,
0.401397414775962222905e+00, 0.268488089868333440729e+00,
0.104656226026467265194e+00,
)),
3 : ((
-0.99383196321275502221e+00, -0.96049126870802028342e+00,
-0.88845923287225699889e+00, -0.77459666924148337704e+00,
-0.62110294673722640294e+00, -0.43424374934680255800e+00,
-0.22338668642896688163e+00, 0.0e+00, 0.22338668642896688163e+00,
0.43424374934680255800e+00, 0.62110294673722640294e+00,
0.77459666924148337704e+00, 0.88845923287225699889e+00,
0.96049126870802028342e+00, 0.99383196321275502221e+00,
), (
0.0170017196299402603390e+00, 0.0516032829970797396969e+00,
0.0929271953151245376859e+00, 0.134415255243784220360e+00,
0.171511909136391380787e+00, 0.200628529376989021034e+00,
0.219156858401587496404e+00, 0.225510499798206687386e+00,
0.219156858401587496404e+00, 0.200628529376989021034e+00,
0.171511909136391380787e+00, 0.134415255243784220360e+00,
0.0929271953151245376859e+00, 0.0516032829970797396969e+00,
0.0170017196299402603390e+00,
)),
4 : ((
-0.99909812496766759766e+00, -0.99383196321275502221e+00,
-0.98153114955374010687e+00, -0.96049126870802028342e+00,
-0.92965485742974005667e+00, -0.88845923287225699889e+00,
-0.83672593816886873550e+00, -0.77459666924148337704e+00,
-0.70249620649152707861e+00, -0.62110294673722640294e+00,
-0.53131974364437562397e+00, -0.43424374934680255800e+00,
-0.33113539325797683309e+00, -0.22338668642896688163e+00,
-0.11248894313318662575e+00, 0.0e+00, 0.11248894313318662575e+00,
0.22338668642896688163e+00, 0.33113539325797683309e+00,
0.43424374934680255800e+00, 0.53131974364437562397e+00,
0.62110294673722640294e+00, 0.70249620649152707861e+00,
0.77459666924148337704e+00, 0.83672593816886873550e+00,
0.88845923287225699889e+00, 0.92965485742974005667e+00,
0.96049126870802028342e+00, 0.98153114955374010687e+00,
0.99383196321275502221e+00, 0.99909812496766759766e+00,
), (
0.00254478079156187441540e+00, 0.00843456573932110624631e+00,
0.0164460498543878109338e+00, 0.0258075980961766535646e+00,
0.0359571033071293220968e+00, 0.0464628932617579865414e+00,
0.0569795094941233574122e+00, 0.0672077542959907035404e+00,
0.0768796204990035310427e+00, 0.0857559200499903511542e+00,
0.0936271099812644736167e+00, 0.100314278611795578771e+00,
0.105669893580234809744e+00, 0.109578421055924638237e+00,
0.111956873020953456880e+00, 0.112755256720768691607e+00,
0.111956873020953456880e+00, 0.109578421055924638237e+00,
0.105669893580234809744e+00, 0.100314278611795578771e+00,
0.0936271099812644736167e+00, 0.0857559200499903511542e+00,
0.0768796204990035310427e+00, 0.0672077542959907035404e+00,
0.0569795094941233574122e+00, 0.0464628932617579865414e+00,
0.0359571033071293220968e+00, 0.0258075980961766535646e+00,
0.0164460498543878109338e+00, 0.00843456573932110624631e+00,
0.00254478079156187441540e+00,
)),
5 : ((
-0.99987288812035761194e+00, -0.99909812496766759766e+00,
-0.99720625937222195908e+00, -0.99383196321275502221e+00,
-0.98868475754742947994e+00, -0.98153114955374010687e+00,
-0.97218287474858179658e+00, -0.96049126870802028342e+00,
-0.94634285837340290515e+00, -0.92965485742974005667e+00,
-0.91037115695700429250e+00, -0.88845923287225699889e+00,
-0.86390793819369047715e+00, -0.83672593816886873550e+00,
-0.80694053195021761186e+00, -0.77459666924148337704e+00,
-0.73975604435269475868e+00, -0.70249620649152707861e+00,
-0.66290966002478059546e+00, -0.62110294673722640294e+00,
-0.57719571005204581484e+00, -0.53131974364437562397e+00,
-0.48361802694584102756e+00, -0.43424374934680255800e+00,
-0.38335932419873034692e+00, -0.33113539325797683309e+00,
-0.27774982202182431507e+00, -0.22338668642896688163e+00,
-0.16823525155220746498e+00, -0.11248894313318662575e+00,
-0.056344313046592789972e+00, 0.0e+00, 0.056344313046592789972e+00,
0.11248894313318662575e+00, 0.16823525155220746498e+00,
0.22338668642896688163e+00, 0.27774982202182431507e+00,
0.33113539325797683309e+00, 0.38335932419873034692e+00,
0.43424374934680255800e+00, 0.48361802694584102756e+00,
0.53131974364437562397e+00, 0.57719571005204581484e+00,
0.62110294673722640294e+00, 0.66290966002478059546e+00,
0.70249620649152707861e+00, 0.73975604435269475868e+00,
0.77459666924148337704e+00, 0.80694053195021761186e+00,
0.83672593816886873550e+00, 0.86390793819369047715e+00,
0.88845923287225699889e+00, 0.91037115695700429250e+00,
0.92965485742974005667e+00, 0.94634285837340290515e+00,
0.96049126870802028342e+00, 0.97218287474858179658e+00,
0.98153114955374010687e+00, 0.98868475754742947994e+00,
0.99383196321275502221e+00, 0.99720625937222195908e+00,
0.99909812496766759766e+00, 0.99987288812035761194e+00,
), (
0.000363221481845530659694e+00, 0.00126515655623006801137e+00,
0.00257904979468568827243e+00, 0.00421763044155885483908e+00,
0.00611550682211724633968e+00, 0.00822300795723592966926e+00,
0.0104982469096213218983e+00, 0.0129038001003512656260e+00,
0.0154067504665594978021e+00, 0.0179785515681282703329e+00,
0.0205942339159127111492e+00, 0.0232314466399102694433e+00,
0.0258696793272147469108e+00, 0.0284897547458335486125e+00,
0.0310735511116879648799e+00, 0.0336038771482077305417e+00,
0.0360644327807825726401e+00, 0.0384398102494555320386e+00,
0.0407155101169443189339e+00, 0.0428779600250077344929e+00,
0.0449145316536321974143e+00, 0.0468135549906280124026e+00,
0.0485643304066731987159e+00, 0.0501571393058995374137e+00,
0.0515832539520484587768e+00, 0.0528349467901165198621e+00,
0.0539054993352660639269e+00, 0.0547892105279628650322e+00,
0.0554814043565593639878e+00, 0.0559784365104763194076e+00,
0.0562776998312543012726e+00, 0.0563776283603847173877e+00,
0.0562776998312543012726e+00, 0.0559784365104763194076e+00,
0.0554814043565593639878e+00, 0.0547892105279628650322e+00,
0.0539054993352660639269e+00, 0.0528349467901165198621e+00,
0.0515832539520484587768e+00, 0.0501571393058995374137e+00,
0.0485643304066731987159e+00, 0.0468135549906280124026e+00,
0.0449145316536321974143e+00, 0.0428779600250077344929e+00,
0.0407155101169443189339e+00, 0.0384398102494555320386e+00,
0.0360644327807825726401e+00, 0.0336038771482077305417e+00,
0.0310735511116879648799e+00, 0.0284897547458335486125e+00,
0.0258696793272147469108e+00, 0.0232314466399102694433e+00,
0.0205942339159127111492e+00, 0.0179785515681282703329e+00,
0.0154067504665594978021e+00, 0.0129038001003512656260e+00,
0.0104982469096213218983e+00, 0.00822300795723592966926e+00,
0.00611550682211724633968e+00, 0.00421763044155885483908e+00,
0.00257904979468568827243e+00, 0.00126515655623006801137e+00,
0.000363221481845530659694e+00,
)),
6 : ((
-0.99998243035489159858e+00, -0.99987288812035761194e+00,
-0.99959879967191068325e+00, -0.99909812496766759766e+00,
-0.99831663531840739253e+00, -0.99720625937222195908e+00,
-0.99572410469840718851e+00, -0.99383196321275502221e+00,
-0.99149572117810613240e+00, -0.98868475754742947994e+00,
-0.98537149959852037111e+00, -0.98153114955374010687e+00,
-0.97714151463970571416e+00, -0.97218287474858179658e+00,
-0.96663785155841656709e+00, -0.96049126870802028342e+00,
-0.95373000642576113641e+00, -0.94634285837340290515e+00,
-0.93832039777959288365e+00, -0.92965485742974005667e+00,
-0.92034002547001242073e+00, -0.91037115695700429250e+00,
-0.89974489977694003664e+00, -0.88845923287225699889e+00,
-0.87651341448470526974e+00, -0.86390793819369047715e+00,
-0.85064449476835027976e+00, -0.83672593816886873550e+00,
-0.82215625436498040737e+00, -0.80694053195021761186e+00,
-0.79108493379984836143e+00, -0.77459666924148337704e+00,
-0.75748396638051363793e+00, -0.73975604435269475868e+00,
-0.72142308537009891548e+00, -0.70249620649152707861e+00,
-0.68298743109107922809e+00, -0.66290966002478059546e+00,
-0.64227664250975951377e+00, -0.62110294673722640294e+00,
-0.59940393024224289297e+00, -0.57719571005204581484e+00,
-0.55449513263193254887e+00, -0.53131974364437562397e+00,
-0.50768775753371660215e+00, -0.48361802694584102756e+00,
-0.45913001198983233287e+00, -0.43424374934680255800e+00,
-0.40897982122988867241e+00, -0.38335932419873034692e+00,
-0.35740383783153215238e+00, -0.33113539325797683309e+00,
-0.30457644155671404334e+00, -0.27774982202182431507e+00,
-0.25067873030348317661e+00, -0.22338668642896688163e+00,
-0.19589750271110015392e+00, -0.16823525155220746498e+00,
-0.14042423315256017459e+00, -0.11248894313318662575e+00,
-0.084454040083710883710e+00, -0.056344313046592789972e+00,
-0.028184648949745694339e+00, 0.0e+00, 0.028184648949745694339e+00,
0.056344313046592789972e+00, 0.084454040083710883710e+00,
0.11248894313318662575e+00, 0.14042423315256017459e+00,
0.16823525155220746498e+00, 0.19589750271110015392e+00,
0.22338668642896688163e+00, 0.25067873030348317661e+00,
0.27774982202182431507e+00, 0.30457644155671404334e+00,
0.33113539325797683309e+00, 0.35740383783153215238e+00,
0.38335932419873034692e+00, 0.40897982122988867241e+00,
0.43424374934680255800e+00, 0.45913001198983233287e+00,
0.48361802694584102756e+00, 0.50768775753371660215e+00,
0.53131974364437562397e+00, 0.55449513263193254887e+00,
0.57719571005204581484e+00, 0.59940393024224289297e+00,
0.62110294673722640294e+00, 0.64227664250975951377e+00,
0.66290966002478059546e+00, 0.68298743109107922809e+00,
0.70249620649152707861e+00, 0.72142308537009891548e+00,
0.73975604435269475868e+00, 0.75748396638051363793e+00,
0.77459666924148337704e+00, 0.79108493379984836143e+00,
0.80694053195021761186e+00, 0.82215625436498040737e+00,
0.83672593816886873550e+00, 0.85064449476835027976e+00,
0.86390793819369047715e+00, 0.87651341448470526974e+00,
0.88845923287225699889e+00, 0.89974489977694003664e+00,
0.91037115695700429250e+00, 0.92034002547001242073e+00,
0.92965485742974005667e+00, 0.93832039777959288365e+00,
0.94634285837340290515e+00, 0.95373000642576113641e+00,
0.96049126870802028342e+00, 0.96663785155841656709e+00,
0.97218287474858179658e+00, 0.97714151463970571416e+00,
0.98153114955374010687e+00, 0.98537149959852037111e+00,
0.98868475754742947994e+00, 0.99149572117810613240e+00,
0.99383196321275502221e+00, 0.99572410469840718851e+00,
0.99720625937222195908e+00, 0.99831663531840739253e+00,
0.99909812496766759766e+00, 0.99959879967191068325e+00,
0.99987288812035761194e+00, 0.99998243035489159858e+00,
), (
0.0000505360952078625176247e+00, 0.000180739564445388357820e+00,
0.000377746646326984660274e+00, 0.000632607319362633544219e+00,
0.000938369848542381500794e+00, 0.00128952408261041739210e+00,
0.00168114286542146990631e+00, 0.00210881524572663287933e+00,
0.00256876494379402037313e+00, 0.00305775341017553113613e+00,
0.00357289278351729964938e+00, 0.00411150397865469304717e+00,
0.00467105037211432174741e+00, 0.00524912345480885912513e+00,
0.00584344987583563950756e+00, 0.00645190005017573692280e+00,
0.00707248999543355546805e+00, 0.00770337523327974184817e+00,
0.00834283875396815770558e+00, 0.00898927578406413572328e+00,
0.00964117772970253669530e+00, 0.0102971169579563555237e+00,
0.0109557333878379016480e+00, 0.0116157233199551347270e+00,
0.0122758305600827700870e+00, 0.0129348396636073734547e+00,
0.0135915710097655467896e+00, 0.0142448773729167743063e+00,
0.0148936416648151820348e+00, 0.0155367755558439824399e+00,
0.0161732187295777199419e+00, 0.0168019385741038652709e+00,
0.0174219301594641737472e+00, 0.0180322163903912863201e+00,
0.0186318482561387901863e+00, 0.0192199051247277660193e+00,
0.0197954950480974994880e+00, 0.0203577550584721594669e+00,
0.0209058514458120238522e+00, 0.0214389800125038672465e+00,
0.0219563663053178249393e+00, 0.0224572658268160987071e+00,
0.0229409642293877487608e+00, 0.0234067774953140062013e+00,
0.0238540521060385400804e+00, 0.0242821652033365993580e+00,
0.0246905247444876769091e+00, 0.0250785696529497687068e+00,
0.0254457699654647658126e+00, 0.0257916269760242293884e+00,
0.0261156733767060976805e+00, 0.0264174733950582599310e+00,
0.0266966229274503599062e+00, 0.0269527496676330319634e+00,
0.0271855132296247918192e+00, 0.0273946052639814325161e+00,
0.0275797495664818730349e+00, 0.0277407021782796819939e+00,
0.0278772514766137016085e+00, 0.0279892182552381597038e+00,
0.0280764557938172466068e+00, 0.0281388499156271506363e+00,
0.0281763190330166021307e+00, 0.0281888141801923586938e+00,
0.0281763190330166021307e+00, 0.0281388499156271506363e+00,
0.0280764557938172466068e+00, 0.0279892182552381597038e+00,
0.0278772514766137016085e+00, 0.0277407021782796819939e+00,
0.0275797495664818730349e+00, 0.0273946052639814325161e+00,
0.0271855132296247918192e+00, 0.0269527496676330319634e+00,
0.0266966229274503599062e+00, 0.0264174733950582599310e+00,
0.0261156733767060976805e+00, 0.0257916269760242293884e+00,
0.0254457699654647658126e+00, 0.0250785696529497687068e+00,
0.0246905247444876769091e+00, 0.0242821652033365993580e+00,
0.0238540521060385400804e+00, 0.0234067774953140062013e+00,
0.0229409642293877487608e+00, 0.0224572658268160987071e+00,
0.0219563663053178249393e+00, 0.0214389800125038672465e+00,
0.0209058514458120238522e+00, 0.0203577550584721594669e+00,
0.0197954950480974994880e+00, 0.0192199051247277660193e+00,
0.0186318482561387901863e+00, 0.0180322163903912863201e+00,
0.0174219301594641737472e+00, 0.0168019385741038652709e+00,
0.0161732187295777199419e+00, 0.0155367755558439824399e+00,
0.0148936416648151820348e+00, 0.0142448773729167743063e+00,
0.0135915710097655467896e+00, 0.0129348396636073734547e+00,
0.0122758305600827700870e+00, 0.0116157233199551347270e+00,
0.0109557333878379016480e+00, 0.0102971169579563555237e+00,
0.00964117772970253669530e+00, 0.00898927578406413572328e+00,
0.00834283875396815770558e+00, 0.00770337523327974184817e+00,
0.00707248999543355546805e+00, 0.00645190005017573692280e+00,
0.00584344987583563950756e+00, 0.00524912345480885912513e+00,
0.00467105037211432174741e+00, 0.00411150397865469304717e+00,
0.00357289278351729964938e+00, 0.00305775341017553113613e+00,
0.00256876494379402037313e+00, 0.00210881524572663287933e+00,
0.00168114286542146990631e+00, 0.00128952408261041739210e+00,
0.000938369848542381500794e+00, 0.000632607319362633544219e+00,
0.000377746646326984660274e+00, 0.000180739564445388357820e+00,
0.0000505360952078625176247e+00,
)),
7 : ((
-0.99999759637974846462e+00, -0.99998243035489159858e+00,
-0.99994399620705437576e+00, -0.99987288812035761194e+00,
-0.99976049092443204733e+00, -0.99959879967191068325e+00,
-0.99938033802502358193e+00, -0.99909812496766759766e+00,
-0.99874561446809511470e+00, -0.99831663531840739253e+00,
-0.99780535449595727456e+00, -0.99720625937222195908e+00,
-0.99651414591489027385e+00, -0.99572410469840718851e+00,
-0.99483150280062100052e+00, -0.99383196321275502221e+00,
-0.99272134428278861533e+00, -0.99149572117810613240e+00,
-0.99015137040077015918e+00, -0.98868475754742947994e+00,
-0.98709252795403406719e+00, -0.98537149959852037111e+00,
-0.98351865757863272876e+00, -0.98153114955374010687e+00,
-0.97940628167086268381e+00, -0.97714151463970571416e+00,
-0.97473445975240266776e+00, -0.97218287474858179658e+00,
-0.96948465950245923177e+00, -0.96663785155841656709e+00,
-0.96364062156981213252e+00, -0.96049126870802028342e+00,
-0.95718821610986096274e+00, -0.95373000642576113641e+00,
-0.95011529752129487656e+00, -0.94634285837340290515e+00,
-0.94241156519108305981e+00, -0.93832039777959288365e+00,
-0.93406843615772578800e+00, -0.92965485742974005667e+00,
-0.92507893290707565236e+00, -0.92034002547001242073e+00,
-0.91543758715576504064e+00, -0.91037115695700429250e+00,
-0.90514035881326159519e+00, -0.89974489977694003664e+00,
-0.89418456833555902286e+00, -0.88845923287225699889e+00,
-0.88256884024734190684e+00, -0.87651341448470526974e+00,
-0.87029305554811390585e+00, -0.86390793819369047715e+00,
-0.85735831088623215653e+00, -0.85064449476835027976e+00,
-0.84376688267270860104e+00, -0.83672593816886873550e+00,
-0.82952219463740140018e+00, -0.82215625436498040737e+00,
-0.81462878765513741344e+00, -0.80694053195021761186e+00,
-0.79909229096084140180e+00, -0.79108493379984836143e+00,
-0.78291939411828301639e+00, -0.77459666924148337704e+00,
-0.76611781930376009072e+00, -0.75748396638051363793e+00,
-0.74869629361693660282e+00, -0.73975604435269475868e+00,
-0.73066452124218126133e+00, -0.72142308537009891548e+00,
-0.71203315536225203459e+00, -0.70249620649152707861e+00,
-0.69281376977911470289e+00, -0.68298743109107922809e+00,
-0.67301883023041847920e+00, -0.66290966002478059546e+00,
-0.65266166541001749610e+00, -0.64227664250975951377e+00,
-0.63175643771119423041e+00, -0.62110294673722640294e+00,
-0.61031811371518640016e+00, -0.59940393024224289297e+00,
-0.58836243444766254143e+00, -0.57719571005204581484e+00,
-0.56590588542365442262e+00, -0.55449513263193254887e+00,
-0.54296566649831149049e+00, -0.53131974364437562397e+00,
-0.51955966153745702199e+00, -0.50768775753371660215e+00,
-0.49570640791876146017e+00, -0.48361802694584102756e+00,
-0.47142506587165887693e+00, -0.45913001198983233287e+00,
-0.44673538766202847374e+00, -0.43424374934680255800e+00,
-0.42165768662616330006e+00, -0.40897982122988867241e+00,
-0.39621280605761593918e+00, -0.38335932419873034692e+00,
-0.37042208795007823014e+00, -0.35740383783153215238e+00,
-0.34430734159943802278e+00, -0.33113539325797683309e+00,
-0.31789081206847668318e+00, -0.30457644155671404334e+00,
-0.29119514851824668196e+00, -0.27774982202182431507e+00,
-0.26424337241092676194e+00, -0.25067873030348317661e+00,
-0.23705884558982972721e+00, -0.22338668642896688163e+00,
-0.20966523824318119477e+00, -0.19589750271110015392e+00,
-0.18208649675925219825e+00, -0.16823525155220746498e+00,
-0.15434681148137810869e+00, -0.14042423315256017459e+00,
-0.12647058437230196685e+00, -0.11248894313318662575e+00,
-0.098482396598119202090e+00, -0.084454040083710883710e+00,
-0.070406976042855179063e+00, -0.056344313046592789972e+00,
-0.042269164765363603212e+00, -0.028184648949745694339e+00,
-0.014093886410782462614e+00, 0.0e+00, 0.014093886410782462614e+00,
0.028184648949745694339e+00, 0.042269164765363603212e+00,
0.056344313046592789972e+00, 0.070406976042855179063e+00,
0.084454040083710883710e+00, 0.098482396598119202090e+00,
0.11248894313318662575e+00, 0.12647058437230196685e+00,
0.14042423315256017459e+00, 0.15434681148137810869e+00,
0.16823525155220746498e+00, 0.18208649675925219825e+00,
0.19589750271110015392e+00, 0.20966523824318119477e+00,
0.22338668642896688163e+00, 0.23705884558982972721e+00,
0.25067873030348317661e+00, 0.26424337241092676194e+00,
0.27774982202182431507e+00, 0.29119514851824668196e+00,
0.30457644155671404334e+00, 0.31789081206847668318e+00,
0.33113539325797683309e+00, 0.34430734159943802278e+00,
0.35740383783153215238e+00, 0.37042208795007823014e+00,
0.38335932419873034692e+00, 0.39621280605761593918e+00,
0.40897982122988867241e+00, 0.42165768662616330006e+00,
0.43424374934680255800e+00, 0.44673538766202847374e+00,
0.45913001198983233287e+00, 0.47142506587165887693e+00,
0.48361802694584102756e+00, 0.49570640791876146017e+00,
0.50768775753371660215e+00, 0.51955966153745702199e+00,
0.53131974364437562397e+00, 0.54296566649831149049e+00,
0.55449513263193254887e+00, 0.56590588542365442262e+00,
0.57719571005204581484e+00, 0.58836243444766254143e+00,
0.59940393024224289297e+00, 0.61031811371518640016e+00,
0.62110294673722640294e+00, 0.63175643771119423041e+00,
0.64227664250975951377e+00, 0.65266166541001749610e+00,
0.66290966002478059546e+00, 0.67301883023041847920e+00,
0.68298743109107922809e+00, 0.69281376977911470289e+00,
0.70249620649152707861e+00, 0.71203315536225203459e+00,
0.72142308537009891548e+00, 0.73066452124218126133e+00,
0.73975604435269475868e+00, 0.74869629361693660282e+00,
0.75748396638051363793e+00, 0.76611781930376009072e+00,
0.77459666924148337704e+00, 0.78291939411828301639e+00,
0.79108493379984836143e+00, 0.79909229096084140180e+00,
0.80694053195021761186e+00, 0.81462878765513741344e+00,
0.82215625436498040737e+00, 0.82952219463740140018e+00,
0.83672593816886873550e+00, 0.84376688267270860104e+00,
0.85064449476835027976e+00, 0.85735831088623215653e+00,
0.86390793819369047715e+00, 0.87029305554811390585e+00,
0.87651341448470526974e+00, 0.88256884024734190684e+00,
0.88845923287225699889e+00, 0.89418456833555902286e+00,
0.89974489977694003664e+00, 0.90514035881326159519e+00,
0.91037115695700429250e+00, 0.91543758715576504064e+00,
0.92034002547001242073e+00, 0.92507893290707565236e+00,
0.92965485742974005667e+00, 0.93406843615772578800e+00,
0.93832039777959288365e+00, 0.94241156519108305981e+00,
0.94634285837340290515e+00, 0.95011529752129487656e+00,
0.95373000642576113641e+00, 0.95718821610986096274e+00,
0.96049126870802028342e+00, 0.96364062156981213252e+00,
0.96663785155841656709e+00, 0.96948465950245923177e+00,
0.97218287474858179658e+00, 0.97473445975240266776e+00,
0.97714151463970571416e+00, 0.97940628167086268381e+00,
0.98153114955374010687e+00, 0.98351865757863272876e+00,
0.98537149959852037111e+00, 0.98709252795403406719e+00,
0.98868475754742947994e+00, 0.99015137040077015918e+00,
0.99149572117810613240e+00, 0.99272134428278861533e+00,
0.99383196321275502221e+00, 0.99483150280062100052e+00,
0.99572410469840718851e+00, 0.99651414591489027385e+00,
0.99720625937222195908e+00, 0.99780535449595727456e+00,
0.99831663531840739253e+00, 0.99874561446809511470e+00,
0.99909812496766759766e+00, 0.99938033802502358193e+00,
0.99959879967191068325e+00, 0.99976049092443204733e+00,
0.99987288812035761194e+00, 0.99994399620705437576e+00,
0.99998243035489159858e+00, 0.99999759637974846462e+00,
), (
0.69379364324108267170e-05, 0.25157870384280661489e-04,
0.53275293669780613125e-04, 0.90372734658751149261e-04,
0.13575491094922871973e-03, 0.18887326450650491366e-03,
0.24921240048299729402e-03, 0.31630366082226447689e-03,
0.38974528447328229322e-03, 0.46918492424785040975e-03,
0.55429531493037471492e-03, 0.64476204130572477933e-03,
0.74028280424450333046e-03, 0.84057143271072246365e-03,
0.94536151685852538246e-03, 0.10544076228633167722e-02,
0.11674841174299594077e-02, 0.12843824718970101768e-02,
0.14049079956551446427e-02, 0.15288767050877655684e-02,
0.16561127281544526052e-02, 0.17864463917586498247e-02,
0.19197129710138724125e-02, 0.20557519893273465236e-02,
0.21944069253638388388e-02, 0.23355251860571608737e-02,
0.24789582266575679307e-02, 0.26245617274044295626e-02,
0.27721957645934509940e-02, 0.29217249379178197538e-02,
0.30730184347025783234e-02, 0.32259500250878684614e-02,
0.33803979910869203823e-02, 0.35362449977167777340e-02,
0.36933779170256508183e-02, 0.38516876166398709241e-02,
0.40110687240750233989e-02, 0.41714193769840788528e-02,
0.43326409680929828545e-02, 0.44946378920320678616e-02,
0.46573172997568547773e-02, 0.48205888648512683476e-02,
0.49843645647655386012e-02, 0.51485584789781777618e-02,
0.53130866051870565663e-02, 0.54778666939189508240e-02,
0.56428181013844441585e-02, 0.58078616599775673635e-02,
0.59729195655081658049e-02, 0.61379152800413850435e-02,
0.63027734490857587172e-02, 0.64674198318036867274e-02,
0.66317812429018878941e-02, 0.67957855048827733948e-02,
0.69593614093904229394e-02, 0.71224386864583871532e-02,
0.72849479805538070639e-02, 0.74468208324075910174e-02,
0.76079896657190565832e-02, 0.77683877779219912200e-02,
0.79279493342948491103e-02, 0.80866093647888599710e-02,
0.82443037630328680306e-02, 0.84009692870519326354e-02,
0.85565435613076896192e-02, 0.87109650797320868736e-02,
0.88641732094824942641e-02, 0.90161081951956431600e-02,
0.91667111635607884067e-02, 0.93159241280693950932e-02,
0.94636899938300652943e-02, 0.96099525623638830097e-02,
0.97546565363174114611e-02, 0.98977475240487497440e-02,
0.10039172044056840798e-01, 0.10178877529236079733e-01,
0.10316812330947621682e-01, 0.10452925722906011926e-01,
0.10587167904885197931e-01, 0.10719490006251933623e-01,
0.10849844089337314099e-01, 0.10978183152658912470e-01,
0.11104461134006926537e-01, 0.11228632913408049354e-01,
0.11350654315980596602e-01, 0.11470482114693874380e-01,
0.11588074033043952568e-01, 0.11703388747657003101e-01,
0.11816385890830235763e-01, 0.11927026053019270040e-01,
0.12035270785279562630e-01, 0.12141082601668299679e-01,
0.12244424981611985899e-01, 0.12345262372243838455e-01,
0.12443560190714035263e-01, 0.12539284826474884353e-01,
0.12632403643542078765e-01, 0.12722884982732382906e-01,
0.12810698163877361967e-01, 0.12895813488012114694e-01,
0.12978202239537399286e-01, 0.13057836688353048840e-01,
0.13134690091960152836e-01, 0.13208736697529129966e-01,
0.13279951743930530650e-01, 0.13348311463725179953e-01,
0.13413793085110098513e-01, 0.13476374833816515982e-01,
0.13536035934956213614e-01, 0.13592756614812395910e-01,
0.13646518102571291428e-01, 0.13697302631990716258e-01,
0.13745093443001896632e-01, 0.13789874783240936517e-01,
0.13831631909506428676e-01, 0.13870351089139840997e-01,
0.13906019601325461264e-01, 0.13938625738306850804e-01,
0.13968158806516938516e-01, 0.13994609127619079852e-01,
0.14017968039456608810e-01, 0.14038227896908623303e-01,
0.14055382072649964277e-01, 0.14069424957813575318e-01,
0.14080351962553661325e-01, 0.14088159516508301065e-01,
0.14092845069160408355e-01, 0.14094407090096179347e-01,
0.14092845069160408355e-01, 0.14088159516508301065e-01,
0.14080351962553661325e-01, 0.14069424957813575318e-01,
0.14055382072649964277e-01, 0.14038227896908623303e-01,
0.14017968039456608810e-01, 0.13994609127619079852e-01,
0.13968158806516938516e-01, 0.13938625738306850804e-01,
0.13906019601325461264e-01, 0.13870351089139840997e-01,
0.13831631909506428676e-01, 0.13789874783240936517e-01,
0.13745093443001896632e-01, 0.13697302631990716258e-01,
0.13646518102571291428e-01, 0.13592756614812395910e-01,
0.13536035934956213614e-01, 0.13476374833816515982e-01,
0.13413793085110098513e-01, 0.13348311463725179953e-01,
0.13279951743930530650e-01, 0.13208736697529129966e-01,
0.13134690091960152836e-01, 0.13057836688353048840e-01,
0.12978202239537399286e-01, 0.12895813488012114694e-01,
0.12810698163877361967e-01, 0.12722884982732382906e-01,
0.12632403643542078765e-01, 0.12539284826474884353e-01,
0.12443560190714035263e-01, 0.12345262372243838455e-01,
0.12244424981611985899e-01, 0.12141082601668299679e-01,
0.12035270785279562630e-01, 0.11927026053019270040e-01,
0.11816385890830235763e-01, 0.11703388747657003101e-01,
0.11588074033043952568e-01, 0.11470482114693874380e-01,
0.11350654315980596602e-01, 0.11228632913408049354e-01,
0.11104461134006926537e-01, 0.10978183152658912470e-01,
0.10849844089337314099e-01, 0.10719490006251933623e-01,
0.10587167904885197931e-01, 0.10452925722906011926e-01,
0.10316812330947621682e-01, 0.10178877529236079733e-01,
0.10039172044056840798e-01, 0.98977475240487497440e-02,
0.97546565363174114611e-02, 0.96099525623638830097e-02,
0.94636899938300652943e-02, 0.93159241280693950932e-02,
0.91667111635607884067e-02, 0.90161081951956431600e-02,
0.88641732094824942641e-02, 0.87109650797320868736e-02,
0.85565435613076896192e-02, 0.84009692870519326354e-02,
0.82443037630328680306e-02, 0.80866093647888599710e-02,
0.79279493342948491103e-02, 0.77683877779219912200e-02,
0.76079896657190565832e-02, 0.74468208324075910174e-02,
0.72849479805538070639e-02, 0.71224386864583871532e-02,
0.69593614093904229394e-02, 0.67957855048827733948e-02,
0.66317812429018878941e-02, 0.64674198318036867274e-02,
0.63027734490857587172e-02, 0.61379152800413850435e-02,
0.59729195655081658049e-02, 0.58078616599775673635e-02,
0.56428181013844441585e-02, 0.54778666939189508240e-02,
0.53130866051870565663e-02, 0.51485584789781777618e-02,
0.49843645647655386012e-02, 0.48205888648512683476e-02,
0.46573172997568547773e-02, 0.44946378920320678616e-02,
0.43326409680929828545e-02, 0.41714193769840788528e-02,
0.40110687240750233989e-02, 0.38516876166398709241e-02,
0.36933779170256508183e-02, 0.35362449977167777340e-02,
0.33803979910869203823e-02, 0.32259500250878684614e-02,
0.30730184347025783234e-02, 0.29217249379178197538e-02,
0.27721957645934509940e-02, 0.26245617274044295626e-02,
0.24789582266575679307e-02, 0.23355251860571608737e-02,
0.21944069253638388388e-02, 0.20557519893273465236e-02,
0.19197129710138724125e-02, 0.17864463917586498247e-02,
0.16561127281544526052e-02, 0.15288767050877655684e-02,
0.14049079956551446427e-02, 0.12843824718970101768e-02,
0.11674841174299594077e-02, 0.10544076228633167722e-02,
0.94536151685852538246e-03, 0.84057143271072246365e-03,
0.74028280424450333046e-03, 0.64476204130572477933e-03,
0.55429531493037471492e-03, 0.46918492424785040975e-03,
0.38974528447328229322e-03, 0.31630366082226447689e-03,
0.24921240048299729402e-03, 0.18887326450650491366e-03,
0.13575491094922871973e-03, 0.90372734658751149261e-04,
0.53275293669780613125e-04, 0.25157870384280661489e-04,
0.69379364324108267170e-05,
)),
8 : ((
-0.999999672956734384381e+00, -0.999997596379748464620e+00,
-0.999992298136257588028e+00, -0.999982430354891598580e+00,
-0.999966730098486276883e+00, -0.999943996207054375764e+00,
-0.999913081144678282800e+00, -0.999872888120357611938e+00,
-0.999822363679787739196e+00, -0.999760490924432047330e+00,
-0.999686286448317731776e+00, -0.999598799671910683252e+00,
-0.999497112467187190535e+00, -0.999380338025023581928e+00,
-0.999247618943342473599e+00, -0.999098124967667597662e+00,
-0.998931050830810562236e+00, -0.998745614468095114704e+00,
-0.998541055697167906027e+00, -0.998316635318407392531e+00,
-0.998071634524930323302e+00, -0.997805354495957274562e+00,
-0.997517116063472399965e+00, -0.997206259372221959076e+00,
-0.996872143485260161299e+00, -0.996514145914890273849e+00,
-0.996131662079315037786e+00, -0.995724104698407188509e+00,
-0.995290903148810302261e+00, -0.994831502800621000519e+00,
-0.994345364356723405931e+00, -0.993831963212755022209e+00,
-0.993290788851684966211e+00, -0.992721344282788615328e+00,
-0.992123145530863117683e+00, -0.991495721178106132399e+00,
-0.990838611958294243677e+00, -0.990151370400770159181e+00,
-0.989433560520240838716e+00, -0.988684757547429479939e+00,
-0.987904547695124280467e+00, -0.987092527954034067190e+00,
-0.986248305913007552681e+00, -0.985371499598520371114e+00,
-0.984461737328814534596e+00, -0.983518657578632728762e+00,
-0.982541908851080604251e+00, -0.981531149553740106867e+00,
-0.980486047876721339416e+00, -0.979406281670862683806e+00,
-0.978291538324758539526e+00, -0.977141514639705714156e+00,
-0.975955916702011753129e+00, -0.974734459752402667761e+00,
-0.973476868052506926773e+00, -0.972182874748581796578e+00,
-0.970852221732792443256e+00, -0.969484659502459231771e+00,
-0.968079947017759947964e+00, -0.966637851558416567092e+00,
-0.965158148579915665979e+00, -0.963640621569812132521e+00,
-0.962085061904651475741e+00, -0.960491268708020283423e+00,
-0.958859048710200221356e+00, -0.957188216109860962736e+00,
-0.955478592438183697574e+00, -0.953730006425761136415e+00,
-0.951942293872573589498e+00, -0.950115297521294876558e+00,
-0.948248866934137357063e+00, -0.946342858373402905148e+00,
-0.944397134685866648591e+00, -0.942411565191083059813e+00,
-0.940386025573669721370e+00, -0.938320397779592883655e+00,
-0.936214569916450806625e+00, -0.934068436157725787999e+00,
-0.931881896650953639345e+00, -0.929654857429740056670e+00,
-0.927387230329536696843e+00, -0.925078932907075652364e+00,
-0.922729888363349241523e+00, -0.920340025470012420730e+00,
-0.917909278499077501636e+00, -0.915437587155765040644e+00,
-0.912924896514370590080e+00, -0.910371156957004292498e+00,
-0.907776324115058903624e+00, -0.905140358813261595189e+00,
-0.902463227016165675048e+00, -0.899744899776940036639e+00,
-0.896985353188316590376e+00, -0.894184568335559022859e+00,
-0.891342531251319871666e+00, -0.888459232872256998890e+00,
-0.885534668997285008926e+00, -0.882568840247341906842e+00,
-0.879561752026556262568e+00, -0.876513414484705269742e+00,
-0.873423842480859310192e+00, -0.870293055548113905851e+00,
-0.867121077859315215614e+00, -0.863907938193690477146e+00,
-0.860653669904299969802e+00, -0.857358310886232156525e+00,
-0.854021903545468625813e+00, -0.850644494768350279758e+00,
-0.847226135891580884381e+00, -0.843766882672708601038e+00,
-0.840266795261030442350e+00, -0.836725938168868735503e+00,
-0.833144380243172624728e+00, -0.829522194637401400178e+00,
-0.825859458783650001088e+00, -0.822156254364980407373e+00,
-0.818412667287925807395e+00, -0.814628787655137413436e+00,
-0.810804709738146594361e+00, -0.806940531950217611856e+00,
-0.803036356819268687782e+00, -0.799092290960841401800e+00,
-0.795108445051100526780e+00, -0.791084933799848361435e+00,
-0.787021875923539422170e+00, -0.782919394118283016385e+00,
-0.778777615032822744702e+00, -0.774596669241483377036e+00,
-0.770376691217076824278e+00, -0.766117819303760090717e+00,
-0.761820195689839149173e+00, -0.757483966380513637926e+00,
-0.753109281170558142523e+00, -0.748696293616936602823e+00,
-0.744245161011347082309e+00, -0.739756044352694758677e+00,
-0.735229108319491547663e+00, -0.730664521242181261329e+00,
-0.726062455075389632685e+00, -0.721423085370098915485e+00,
-0.716746591245747095767e+00, -0.712033155362252034587e+00,
-0.707282963891961103412e+00, -0.702496206491527078610e+00,
-0.697673076273711232906e+00, -0.692813769779114702895e+00,
-0.687918486947839325756e+00, -0.682987431091079228087e+00,
-0.678020808862644517838e+00, -0.673018830230418479199e+00,
-0.667981708447749702165e+00, -0.662909660024780595461e+00,
-0.657802904699713735422e+00, -0.652661665410017496101e+00,
-0.647486168263572388782e+00, -0.642276642509759513774e+00,
-0.637033320510492495071e+00, -0.631756437711194230414e+00,
-0.626446232611719746542e+00, -0.621102946737226402941e+00,
-0.615726824608992638014e+00, -0.610318113715186400156e+00,
-0.604877064481584353319e+00, -0.599403930242242892974e+00,
-0.593898967210121954393e+00, -0.588362434447662541434e+00,
-0.582794593837318850840e+00, -0.577195710052045814844e+00,
-0.571566050525742833992e+00, -0.565905885423654422623e+00,
-0.560215487612728441818e+00, -0.554495132631932548866e+00,
-0.548745098662529448608e+00, -0.542965666498311490492e+00,
-0.537157119515795115982e+00, -0.531319743644375623972e+00,
-0.525453827336442687395e+00, -0.519559661537457021993e+00,
-0.513637539655988578507e+00, -0.507687757533716602155e+00,
-0.501710613415391878251e+00, -0.495706407918761460170e+00,
-0.489675444004456155436e+00, -0.483618026945841027562e+00,
-0.477534464298829155284e+00, -0.471425065871658876934e+00,
-0.465290143694634735858e+00, -0.459130011989832332874e+00,
-0.452944987140767283784e+00, -0.446735387662028473742e+00,
-0.440501534168875795783e+00, -0.434243749346802558002e+00,
-0.427962357921062742583e+00, -0.421657686626163300056e+00,
-0.415330064175321663764e+00, -0.408979821229888672409e+00,
-0.402607290368737092671e+00, -0.396212806057615939183e+00,
-0.389796704618470795479e+00, -0.383359324198730346916e+00,
-0.376901004740559344802e+00, -0.370422087950078230138e+00,
-0.363922917266549655269e+00, -0.357403837831532152376e+00,
-0.350865196458001209011e+00, -0.344307341599438022777e+00,
-0.337730623318886219621e+00, -0.331135393257976833093e+00,
-0.324522004605921855207e+00, -0.317890812068476683182e+00,
-0.311242171836871800300e+00, -0.304576441556714043335e+00,
-0.297893980296857823437e+00, -0.291195148518246681964e+00,
-0.284480308042725577496e+00, -0.277749822021824315065e+00,
-0.271004054905512543536e+00, -0.264243372410926761945e+00,
-0.257468141491069790481e+00, -0.250678730303483176613e+00,
-0.243875508178893021593e+00, -0.237058845589829727213e+00,
-0.230229114119222177156e+00, -0.223386686428966881628e+00,
-0.216531936228472628081e+00, -0.209665238243181194766e+00,
-0.202786968183064697557e+00, -0.195897502711100153915e+00,
-0.188997219411721861059e+00, -0.182086496759252198246e+00,
-0.175165714086311475707e+00, -0.168235251552207464982e+00,
-0.161295490111305257361e+00, -0.154346811481378108692e+00,
-0.147389598111939940054e+00, -0.140424233152560174594e+00,
-0.133451100421161601344e+00, -0.126470584372301966851e+00,
-0.119483070065440005133e+00, -0.112488943133186625746e+00,
-0.105488589749541988533e+00, -0.984823965981192020903e-01,
-0.914707508403553909095e-01, -0.844540400837108837102e-01,
-0.774326523498572825675e-01, -0.704069760428551790633e-01,
-0.633773999173222898797e-01, -0.563443130465927899720e-01,
-0.493081047908686267156e-01, -0.422691647653636032124e-01,
-0.352278828084410232603e-01, -0.281846489497456943394e-01,
-0.211398533783310883350e-01, -0.140938864107824626142e-01,
-0.704713845933674648514e-02, +0.000000000000000000000e+00,
+0.704713845933674648514e-02, +0.140938864107824626142e-01,
+0.211398533783310883350e-01, +0.281846489497456943394e-01,
+0.352278828084410232603e-01, +0.422691647653636032124e-01,
+0.493081047908686267156e-01, +0.563443130465927899720e-01,
+0.633773999173222898797e-01, +0.704069760428551790633e-01,
+0.774326523498572825675e-01, +0.844540400837108837102e-01,
+0.914707508403553909095e-01, +0.984823965981192020903e-01,
+0.105488589749541988533e+00, +0.112488943133186625746e+00,
+0.119483070065440005133e+00, +0.126470584372301966851e+00,
+0.133451100421161601344e+00, +0.140424233152560174594e+00,
+0.147389598111939940054e+00, +0.154346811481378108692e+00,
+0.161295490111305257361e+00, +0.168235251552207464982e+00,
+0.175165714086311475707e+00, +0.182086496759252198246e+00,
+0.188997219411721861059e+00, +0.195897502711100153915e+00,
+0.202786968183064697557e+00, +0.209665238243181194766e+00,
+0.216531936228472628081e+00, +0.223386686428966881628e+00,
+0.230229114119222177156e+00, +0.237058845589829727213e+00,
+0.243875508178893021593e+00, +0.250678730303483176613e+00,
+0.257468141491069790481e+00, +0.264243372410926761945e+00,
+0.271004054905512543536e+00, +0.277749822021824315065e+00,
+0.284480308042725577496e+00, +0.291195148518246681964e+00,
+0.297893980296857823437e+00, +0.304576441556714043335e+00,
+0.311242171836871800300e+00, +0.317890812068476683182e+00,
+0.324522004605921855207e+00, +0.331135393257976833093e+00,
+0.337730623318886219621e+00, +0.344307341599438022777e+00,
+0.350865196458001209011e+00, +0.357403837831532152376e+00,
+0.363922917266549655269e+00, +0.370422087950078230138e+00,
+0.376901004740559344802e+00, +0.383359324198730346916e+00,
+0.389796704618470795479e+00, +0.396212806057615939183e+00,
+0.402607290368737092671e+00, +0.408979821229888672409e+00,
+0.415330064175321663764e+00, +0.421657686626163300056e+00,
+0.427962357921062742583e+00, +0.434243749346802558002e+00,
+0.440501534168875795783e+00, +0.446735387662028473742e+00,
+0.452944987140767283784e+00, +0.459130011989832332874e+00,
+0.465290143694634735858e+00, +0.471425065871658876934e+00,
+0.477534464298829155284e+00, +0.483618026945841027562e+00,
+0.489675444004456155436e+00, +0.495706407918761460170e+00,
+0.501710613415391878251e+00, +0.507687757533716602155e+00,
+0.513637539655988578507e+00, +0.519559661537457021993e+00,
+0.525453827336442687395e+00, +0.531319743644375623972e+00,
+0.537157119515795115982e+00, +0.542965666498311490492e+00,
+0.548745098662529448608e+00, +0.554495132631932548866e+00,
+0.560215487612728441818e+00, +0.565905885423654422623e+00,
+0.571566050525742833992e+00, +0.577195710052045814844e+00,
+0.582794593837318850840e+00, +0.588362434447662541434e+00,
+0.593898967210121954393e+00, +0.599403930242242892974e+00,
+0.604877064481584353319e+00, +0.610318113715186400156e+00,
+0.615726824608992638014e+00, +0.621102946737226402941e+00,
+0.626446232611719746542e+00, +0.631756437711194230414e+00,
+0.637033320510492495071e+00, +0.642276642509759513774e+00,
+0.647486168263572388782e+00, +0.652661665410017496101e+00,
+0.657802904699713735422e+00, +0.662909660024780595461e+00,
+0.667981708447749702165e+00, +0.673018830230418479199e+00,
+0.678020808862644517838e+00, +0.682987431091079228087e+00,
+0.687918486947839325756e+00, +0.692813769779114702895e+00,
+0.697673076273711232906e+00, +0.702496206491527078610e+00,
+0.707282963891961103412e+00, +0.712033155362252034587e+00,
+0.716746591245747095767e+00, +0.721423085370098915485e+00,
+0.726062455075389632685e+00, +0.730664521242181261329e+00,
+0.735229108319491547663e+00, +0.739756044352694758677e+00,
+0.744245161011347082309e+00, +0.748696293616936602823e+00,
+0.753109281170558142523e+00, +0.757483966380513637926e+00,
+0.761820195689839149173e+00, +0.766117819303760090717e+00,
+0.770376691217076824278e+00, +0.774596669241483377036e+00,
+0.778777615032822744702e+00, +0.782919394118283016385e+00,
+0.787021875923539422170e+00, +0.791084933799848361435e+00,
+0.795108445051100526780e+00, +0.799092290960841401800e+00,
+0.803036356819268687782e+00, +0.806940531950217611856e+00,
+0.810804709738146594361e+00, +0.814628787655137413436e+00,
+0.818412667287925807395e+00, +0.822156254364980407373e+00,
+0.825859458783650001088e+00, +0.829522194637401400178e+00,
+0.833144380243172624728e+00, +0.836725938168868735503e+00,
+0.840266795261030442350e+00, +0.843766882672708601038e+00,
+0.847226135891580884381e+00, +0.850644494768350279758e+00,
+0.854021903545468625813e+00, +0.857358310886232156525e+00,
+0.860653669904299969802e+00, +0.863907938193690477146e+00,
+0.867121077859315215614e+00, +0.870293055548113905851e+00,
+0.873423842480859310192e+00, +0.876513414484705269742e+00,
+0.879561752026556262568e+00, +0.882568840247341906842e+00,
+0.885534668997285008926e+00, +0.888459232872256998890e+00,
+0.891342531251319871666e+00, +0.894184568335559022859e+00,
+0.896985353188316590376e+00, +0.899744899776940036639e+00,
+0.902463227016165675048e+00, +0.905140358813261595189e+00,
+0.907776324115058903624e+00, +0.910371156957004292498e+00,
+0.912924896514370590080e+00, +0.915437587155765040644e+00,
+0.917909278499077501636e+00, +0.920340025470012420730e+00,
+0.922729888363349241523e+00, +0.925078932907075652364e+00,
+0.927387230329536696843e+00, +0.929654857429740056670e+00,
+0.931881896650953639345e+00, +0.934068436157725787999e+00,
+0.936214569916450806625e+00, +0.938320397779592883655e+00,
+0.940386025573669721370e+00, +0.942411565191083059813e+00,
+0.944397134685866648591e+00, +0.946342858373402905148e+00,
+0.948248866934137357063e+00, +0.950115297521294876558e+00,
+0.951942293872573589498e+00, +0.953730006425761136415e+00,
+0.955478592438183697574e+00, +0.957188216109860962736e+00,
+0.958859048710200221356e+00, +0.960491268708020283423e+00,
+0.962085061904651475741e+00, +0.963640621569812132521e+00,
+0.965158148579915665979e+00, +0.966637851558416567092e+00,
+0.968079947017759947964e+00, +0.969484659502459231771e+00,
+0.970852221732792443256e+00, +0.972182874748581796578e+00,
+0.973476868052506926773e+00, +0.974734459752402667761e+00,
+0.975955916702011753129e+00, +0.977141514639705714156e+00,
+0.978291538324758539526e+00, +0.979406281670862683806e+00,
+0.980486047876721339416e+00, +0.981531149553740106867e+00,
+0.982541908851080604251e+00, +0.983518657578632728762e+00,
+0.984461737328814534596e+00, +0.985371499598520371114e+00,
+0.986248305913007552681e+00, +0.987092527954034067190e+00,
+0.987904547695124280467e+00, +0.988684757547429479939e+00,
+0.989433560520240838716e+00, +0.990151370400770159181e+00,
+0.990838611958294243677e+00, +0.991495721178106132399e+00,
+0.992123145530863117683e+00, +0.992721344282788615328e+00,
+0.993290788851684966211e+00, +0.993831963212755022209e+00,
+0.994345364356723405931e+00, +0.994831502800621000519e+00,
+0.995290903148810302261e+00, +0.995724104698407188509e+00,
+0.996131662079315037786e+00, +0.996514145914890273849e+00,
+0.996872143485260161299e+00, +0.997206259372221959076e+00,
+0.997517116063472399965e+00, +0.997805354495957274562e+00,
+0.998071634524930323302e+00, +0.998316635318407392531e+00,
+0.998541055697167906027e+00, +0.998745614468095114704e+00,
+0.998931050830810562236e+00, +0.999098124967667597662e+00,
+0.999247618943342473599e+00, +0.999380338025023581928e+00,
+0.999497112467187190535e+00, +0.999598799671910683252e+00,
+0.999686286448317731776e+00, +0.999760490924432047330e+00,
+0.999822363679787739196e+00, +0.999872888120357611938e+00,
+0.999913081144678282800e+00, +0.999943996207054375764e+00,
+0.999966730098486276883e+00, +0.999982430354891598580e+00,
+0.999992298136257588028e+00, +0.999997596379748464620e+00,
+0.999999672956734384381e+00,
), (
0.945715933950007048827e-06, 0.345456507169149134898e-05,
0.736624069102321668857e-05, 0.125792781889592743525e-04,
0.190213681905875816679e-04, 0.266376412339000901358e-04,
0.353751372055189588628e-04, 0.451863674126296143105e-04,
0.560319507856164252140e-04, 0.678774554733972416227e-04,
0.806899228014035293851e-04, 0.944366322532705527066e-04,
0.109085545645741522051e-03, 0.124606200241498368482e-03,
0.140970302204104791413e-03, 0.158151830411132242924e-03,
0.176126765545083195474e-03, 0.194872642236641146532e-03,
0.214368090034216937149e-03, 0.234592462123925204879e-03,
0.255525589595236862014e-03, 0.277147657465187357459e-03,
0.299439176850911730874e-03, 0.322381020652862389664e-03,
0.345954492129903871350e-03, 0.370141402122251665232e-03,
0.394924138246873704434e-03, 0.420285716355361231823e-03,
0.446209810101403247488e-03, 0.472680758429262691232e-03,
0.499683553312800484519e-03, 0.527203811431658386125e-03,
0.555227733977307579715e-03, 0.583742058714979703847e-03,
0.612734008012225209294e-03, 0.642191235948505088403e-03,
0.672101776960108194646e-03, 0.702453997827572321358e-03,
0.733236554224767912055e-03, 0.764438352543882784191e-03,
0.796048517297550871506e-03, 0.828056364077226302608e-03,
0.860451377808527848128e-03, 0.893223195879324912340e-03,
0.926361595613111283368e-03, 0.959856485506936206261e-03,
0.993697899638760857945e-03, 0.102787599466367326179e-02,
0.106238104885340071375e-02, 0.109720346268191941940e-02,
0.113233376051597664917e-02, 0.116776259302858043685e-02,
0.120348074001265964881e-02, 0.123947911332878396534e-02,
0.127574875977346947345e-02, 0.131228086370221478128e-02,
0.134906674928353113127e-02, 0.138609788229672549700e-02,
0.142336587141720519900e-02, 0.146086246895890987689e-02,
0.149857957106456636214e-02, 0.153650921735128916170e-02,
0.157464359003212166189e-02, 0.161297501254393423070e-02,
0.165149594771914570655e-02, 0.169019899554346019117e-02,
0.172907689054461607168e-02, 0.176812249885838886701e-02,
0.180732881501808930079e-02, 0.184668895851282540913e-02,
0.188619617015808475394e-02, 0.192584380831993546204e-02,
0.196562534503150547732e-02, 0.200553436203751169944e-02,
0.204556454679958293446e-02, 0.208570968849203942640e-02,
0.212596367401472533045e-02, 0.216632048404649142727e-02,
0.220677418916003329194e-02, 0.224731894601603393082e-02,
0.228794899365195972378e-02, 0.232865864987842738864e-02,
0.236944230779380495146e-02, 0.241029443242563417382e-02,
0.245120955750556483923e-02, 0.249218228238276930060e-02,
0.253320726907925325750e-02, 0.257427923948908888092e-02,
0.261539297272236109225e-02, 0.265654330259352828314e-02,
0.269772511525294586667e-02, 0.273893334695947541201e-02,
0.278016298199139435045e-02, 0.282140905069222207923e-02,
0.286266662764757868253e-02, 0.290393082998878368175e-02,
0.294519681581857582284e-02, 0.298645978275408290247e-02,
0.302771496658198544480e-02, 0.306895764002069252174e-02,
0.311018311158427546158e-02, 0.315138672454287935858e-02,
0.319256385597434736790e-02, 0.323370991590184336368e-02,
0.327482034651233969564e-02, 0.331589062145094394706e-02,
0.335691624518616761342e-02, 0.339789275244138669739e-02,
0.343881570768790591876e-02, 0.347968070469521146972e-02,
0.352048336613417922682e-02, 0.356121934322919357659e-02,
0.360188431545532431869e-02, 0.364247399027690353194e-02,
0.368298410292403911967e-02, 0.372341041620379550870e-02,
0.376374872034296338241e-02, 0.380399483285952829161e-02,
0.384414459846013158917e-02, 0.388419388896099560998e-02,
0.392413860322995774660e-02, 0.396397466714742455513e-02,
0.400369803358421688562e-02, 0.404330468239442998549e-02,
0.408279062042157838350e-02, 0.412215188151643401528e-02,
0.416138452656509745764e-02, 0.420048464352596631772e-02,
0.423944834747438184434e-02, 0.427827178065384480959e-02,
0.431695111253279479928e-02, 0.435548253986604343679e-02,
0.439386228676004195260e-02, 0.443208660474124713206e-02,
0.447015177282692726900e-02, 0.450805409759782158001e-02,
0.454578991327213285488e-02, 0.458335558178039420335e-02,
0.462074749284080687482e-02, 0.465796206403469754658e-02,
0.469499574088179046532e-02, 0.473184499691503264714e-02,
0.476850633375474925263e-02, 0.480497628118194150483e-02,
0.484125139721057135214e-02, 0.487732826815870573054e-02,
0.491320350871841897367e-02, 0.494887376202437487201e-02,
0.498433569972103029914e-02, 0.501958602202842039909e-02,
0.505462145780650125058e-02, 0.508943876461803986674e-02,
0.512403472879005351831e-02, 0.515840616547381084096e-02,
0.519254991870341614863e-02, 0.522646286145300596306e-02,
0.526014189569259311205e-02, 0.529358395244259896547e-02,
0.532678599182711857974e-02, 0.535974500312596681161e-02,
0.539245800482555593606e-02, 0.542492204466865704951e-02,
0.545713419970309863995e-02, 0.548909157632945623482e-02,
0.552079131034778706457e-02, 0.555223056700346326850e-02,
0.558340654103215637610e-02, 0.561431645670402467678e-02,
0.564495756786715368885e-02, 0.567532715799029830087e-02,
0.570542254020497332312e-02, 0.573524105734693719020e-02,
0.576478008199711142954e-02, 0.579403701652197628421e-02,
0.582300929311348057702e-02, 0.585169437382850155033e-02,
0.588008975062788803205e-02, 0.590819294541511788161e-02,
0.593600151007459827614e-02, 0.596351302650963502011e-02,
0.599072510668009471472e-02, 0.601763539263978131522e-02,
0.604424155657354634589e-02, 0.607054130083414983949e-02,
0.609653235797888692923e-02, 0.612221249080599294931e-02,
0.614757949239083790214e-02, 0.617263118612191922727e-02,
0.619736542573665996342e-02, 0.622178009535701763157e-02,
0.624587310952490748541e-02, 0.626964241323744217671e-02,
0.629308598198198836688e-02, 0.631620182177103938227e-02,
0.633898796917690165912e-02, 0.636144249136619145314e-02,
0.638356348613413709795e-02, 0.640534908193868098342e-02,
0.642679743793437438922e-02, 0.644790674400605734710e-02,
0.646867522080231481688e-02, 0.648910111976869964292e-02,
0.650918272318071200827e-02, 0.652891834417652442012e-02,
0.654830632678944064054e-02, 0.656734504598007641819e-02,
0.658603290766824937794e-02, 0.660436834876456498276e-02,
0.662234983720168509457e-02, 0.663997587196526532519e-02,
0.665724498312454708217e-02, 0.667415573186258997654e-02,
0.669070671050613006584e-02, 0.670689654255504925648e-02,
0.672272388271144108036e-02, 0.673818741690825799086e-02,
0.675328586233752529078e-02, 0.676801796747810680683e-02,
0.678238251212300746082e-02, 0.679637830740619795480e-02,
0.681000419582894688374e-02, 0.682325905128564571420e-02,
0.683614177908911221841e-02, 0.684865131599535812903e-02,
0.686078663022780697951e-02, 0.687254672150094831613e-02,
0.688393062104341470995e-02, 0.689493739162046825872e-02,
0.690556612755588354803e-02, 0.691581595475321433825e-02,
0.692568603071643155621e-02, 0.693517554456992049848e-02,
0.694428371707782549438e-02, 0.695300980066273063177e-02,
0.696135307942366551493e-02, 0.696931286915342540213e-02,
0.697688851735519545845e-02, 0.698407940325846925786e-02,
0.699088493783425207545e-02, 0.699730456380953992594e-02,
0.700333775568106572820e-02, 0.700898401972830440494e-02,
0.701424289402572916425e-02, 0.701911394845431165171e-02,
0.702359678471225911031e-02, 0.702769103632498213858e-02,
0.703139636865428709508e-02, 0.703471247890678765907e-02,
0.703763909614153052319e-02, 0.704017598127683066242e-02,
0.704232292709631209597e-02, 0.704407975825415053266e-02,
0.704544633127951476780e-02, 0.704642253458020417748e-02,
0.704700828844548013730e-02, 0.704720354504808967346e-02,
0.704700828844548013730e-02, 0.704642253458020417748e-02,
0.704544633127951476780e-02, 0.704407975825415053266e-02,
0.704232292709631209597e-02, 0.704017598127683066242e-02,
0.703763909614153052319e-02, 0.703471247890678765907e-02,
0.703139636865428709508e-02, 0.702769103632498213858e-02,
0.702359678471225911031e-02, 0.701911394845431165171e-02,
0.701424289402572916425e-02, 0.700898401972830440494e-02,
0.700333775568106572820e-02, 0.699730456380953992594e-02,
0.699088493783425207545e-02, 0.698407940325846925786e-02,
0.697688851735519545845e-02, 0.696931286915342540213e-02,
0.696135307942366551493e-02, 0.695300980066273063177e-02,
0.694428371707782549438e-02, 0.693517554456992049848e-02,
0.692568603071643155621e-02, 0.691581595475321433825e-02,
0.690556612755588354803e-02, 0.689493739162046825872e-02,
0.688393062104341470995e-02, 0.687254672150094831613e-02,
0.686078663022780697951e-02, 0.684865131599535812903e-02,
0.683614177908911221841e-02, 0.682325905128564571420e-02,
0.681000419582894688374e-02, 0.679637830740619795480e-02,
0.678238251212300746082e-02, 0.676801796747810680683e-02,
0.675328586233752529078e-02, 0.673818741690825799086e-02,
0.672272388271144108036e-02, 0.670689654255504925648e-02,
0.669070671050613006584e-02, 0.667415573186258997654e-02,
0.665724498312454708217e-02, 0.663997587196526532519e-02,
0.662234983720168509457e-02, 0.660436834876456498276e-02,
0.658603290766824937794e-02, 0.656734504598007641819e-02,
0.654830632678944064054e-02, 0.652891834417652442012e-02,
0.650918272318071200827e-02, 0.648910111976869964292e-02,
0.646867522080231481688e-02, 0.644790674400605734710e-02,
0.642679743793437438922e-02, 0.640534908193868098342e-02,
0.638356348613413709795e-02, 0.636144249136619145314e-02,
0.633898796917690165912e-02, 0.631620182177103938227e-02,
0.629308598198198836688e-02, 0.626964241323744217671e-02,
0.624587310952490748541e-02, 0.622178009535701763157e-02,
0.619736542573665996342e-02, 0.617263118612191922727e-02,
0.614757949239083790214e-02, 0.612221249080599294931e-02,
0.609653235797888692923e-02, 0.607054130083414983949e-02,
0.604424155657354634589e-02, 0.601763539263978131522e-02,
0.599072510668009471472e-02, 0.596351302650963502011e-02,
0.593600151007459827614e-02, 0.590819294541511788161e-02,
0.588008975062788803205e-02, 0.585169437382850155033e-02,
0.582300929311348057702e-02, 0.579403701652197628421e-02,
0.576478008199711142954e-02, 0.573524105734693719020e-02,
0.570542254020497332312e-02, 0.567532715799029830087e-02,
0.564495756786715368885e-02, 0.561431645670402467678e-02,
0.558340654103215637610e-02, 0.555223056700346326850e-02,
0.552079131034778706457e-02, 0.548909157632945623482e-02,
0.545713419970309863995e-02, 0.542492204466865704951e-02,
0.539245800482555593606e-02, 0.535974500312596681161e-02,
0.532678599182711857974e-02, 0.529358395244259896547e-02,
0.526014189569259311205e-02, 0.522646286145300596306e-02,
0.519254991870341614863e-02, 0.515840616547381084096e-02,
0.512403472879005351831e-02, 0.508943876461803986674e-02,
0.505462145780650125058e-02, 0.501958602202842039909e-02,
0.498433569972103029914e-02, 0.494887376202437487201e-02,
0.491320350871841897367e-02, 0.487732826815870573054e-02,
0.484125139721057135214e-02, 0.480497628118194150483e-02,
0.476850633375474925263e-02, 0.473184499691503264714e-02,
0.469499574088179046532e-02, 0.465796206403469754658e-02,
0.462074749284080687482e-02, 0.458335558178039420335e-02,
0.454578991327213285488e-02, 0.450805409759782158001e-02,
0.447015177282692726900e-02, 0.443208660474124713206e-02,
0.439386228676004195260e-02, 0.435548253986604343679e-02,
0.431695111253279479928e-02, 0.427827178065384480959e-02,
0.423944834747438184434e-02, 0.420048464352596631772e-02,
0.416138452656509745764e-02, 0.412215188151643401528e-02,
0.408279062042157838350e-02, 0.404330468239442998549e-02,
0.400369803358421688562e-02, 0.396397466714742455513e-02,
0.392413860322995774660e-02, 0.388419388896099560998e-02,
0.384414459846013158917e-02, 0.380399483285952829161e-02,
0.376374872034296338241e-02, 0.372341041620379550870e-02,
0.368298410292403911967e-02, 0.364247399027690353194e-02,
0.360188431545532431869e-02, 0.356121934322919357659e-02,
0.352048336613417922682e-02, 0.347968070469521146972e-02,
0.343881570768790591876e-02, 0.339789275244138669739e-02,
0.335691624518616761342e-02, 0.331589062145094394706e-02,
0.327482034651233969564e-02, 0.323370991590184336368e-02,
0.319256385597434736790e-02, 0.315138672454287935858e-02,
0.311018311158427546158e-02, 0.306895764002069252174e-02,
0.302771496658198544480e-02, 0.298645978275408290247e-02,
0.294519681581857582284e-02, 0.290393082998878368175e-02,
0.286266662764757868253e-02, 0.282140905069222207923e-02,
0.278016298199139435045e-02, 0.273893334695947541201e-02,
0.269772511525294586667e-02, 0.265654330259352828314e-02,
0.261539297272236109225e-02, 0.257427923948908888092e-02,
0.253320726907925325750e-02, 0.249218228238276930060e-02,
0.245120955750556483923e-02, 0.241029443242563417382e-02,
0.236944230779380495146e-02, 0.232865864987842738864e-02,
0.228794899365195972378e-02, 0.224731894601603393082e-02,
0.220677418916003329194e-02, 0.216632048404649142727e-02,
0.212596367401472533045e-02, 0.208570968849203942640e-02,
0.204556454679958293446e-02, 0.200553436203751169944e-02,
0.196562534503150547732e-02, 0.192584380831993546204e-02,
0.188619617015808475394e-02, 0.184668895851282540913e-02,
0.180732881501808930079e-02, 0.176812249885838886701e-02,
0.172907689054461607168e-02, 0.169019899554346019117e-02,
0.165149594771914570655e-02, 0.161297501254393423070e-02,
0.157464359003212166189e-02, 0.153650921735128916170e-02,
0.149857957106456636214e-02, 0.146086246895890987689e-02,
0.142336587141720519900e-02, 0.138609788229672549700e-02,
0.134906674928353113127e-02, 0.131228086370221478128e-02,
0.127574875977346947345e-02, 0.123947911332878396534e-02,
0.120348074001265964881e-02, 0.116776259302858043685e-02,
0.113233376051597664917e-02, 0.109720346268191941940e-02,
0.106238104885340071375e-02, 0.102787599466367326179e-02,
0.993697899638760857945e-03, 0.959856485506936206261e-03,
0.926361595613111283368e-03, 0.893223195879324912340e-03,
0.860451377808527848128e-03, 0.828056364077226302608e-03,
0.796048517297550871506e-03, 0.764438352543882784191e-03,
0.733236554224767912055e-03, 0.702453997827572321358e-03,
0.672101776960108194646e-03, 0.642191235948505088403e-03,
0.612734008012225209294e-03, 0.583742058714979703847e-03,
0.555227733977307579715e-03, 0.527203811431658386125e-03,
0.499683553312800484519e-03, 0.472680758429262691232e-03,
0.446209810101403247488e-03, 0.420285716355361231823e-03,
0.394924138246873704434e-03, 0.370141402122251665232e-03,
0.345954492129903871350e-03, 0.322381020652862389664e-03,
0.299439176850911730874e-03, 0.277147657465187357459e-03,
0.255525589595236862014e-03, 0.234592462123925204879e-03,
0.214368090034216937149e-03, 0.194872642236641146532e-03,
0.176126765545083195474e-03, 0.158151830411132242924e-03,
0.140970302204104791413e-03, 0.124606200241498368482e-03,
0.109085545645741522051e-03, 0.944366322532705527066e-04,
0.806899228014035293851e-04, 0.678774554733972416227e-04,
0.560319507856164252140e-04, 0.451863674126296143105e-04,
0.353751372055189588628e-04, 0.266376412339000901358e-04,
0.190213681905875816679e-04, 0.125792781889592743525e-04,
0.736624069102321668857e-05, 0.345456507169149134898e-05,
0.945715933950007048827e-06,
))
}
| |
################################################################################
# Copyright (C) 2014 Jaakko Luttinen
#
# This file is licensed under the MIT License.
################################################################################
"""
Demonstrate the linear state-space model with switching dynamics.
The model differs from the classical linear state-space model in that it has a
set of state dynamics matrices of which one is used at each time instance. A
hidden Markov model is used to select the dynamics matrix.
Some functions in this module are re-usable:
* ``model`` can be used to construct the LSSM with switching dynamics.
* ``infer`` can be used to apply the model to given data.
"""
import numpy as np
import matplotlib.pyplot as plt
from bayespy.nodes import (GaussianARD,
SwitchingGaussianMarkovChain,
CategoricalMarkovChain,
Dirichlet,
Mixture,
Gamma,
SumMultiply)
from bayespy.inference.vmp.vmp import VB
from bayespy.inference.vmp import transformations
import bayespy.plot as bpplt
def model(M=20, N=100, D=10, K=3):
"""
Construct the linear state-space model with switching dynamics.
"""
#
# Switching dynamics (HMM)
#
# Prior for initial state probabilities
rho = Dirichlet(1e-3*np.ones(K),
name='rho')
# Prior for state transition probabilities
V = Dirichlet(1e-3*np.ones(K),
plates=(K,),
name='V')
v = 10*np.identity(K) + 1*np.ones((K,K))
v /= np.sum(v, axis=-1, keepdims=True)
V.initialize_from_value(v)
# Hidden states (with unknown initial state probabilities and state
# transition probabilities)
Z = CategoricalMarkovChain(rho, V,
states=N-1,
name='Z',
plotter=bpplt.CategoricalMarkovChainPlotter(),
initialize=False)
Z.u[0] = np.random.dirichlet(np.ones(K))
Z.u[1] = np.reshape(np.random.dirichlet(0.5*np.ones(K*K), size=(N-2)),
(N-2, K, K))
#
# Linear state-space models
#
# Dynamics matrix with ARD
# (K,D) x ()
alpha = Gamma(1e-5,
1e-5,
plates=(K,1,D),
name='alpha')
# (K,1,1,D) x (D)
A = GaussianARD(0,
alpha,
shape=(D,),
plates=(K,D),
name='A',
plotter=bpplt.GaussianHintonPlotter())
A.initialize_from_value(np.identity(D)*np.ones((K,D,D))
+ 0.1*np.random.randn(K,D,D))
# Latent states with dynamics
# (K,1) x (N,D)
X = SwitchingGaussianMarkovChain(np.zeros(D), # mean of x0
1e-3*np.identity(D), # prec of x0
A, # dynamics
Z, # dynamics selection
np.ones(D), # innovation
n=N, # time instances
name='X',
plotter=bpplt.GaussianMarkovChainPlotter())
X.initialize_from_value(10*np.random.randn(N,D))
# Mixing matrix from latent space to observation space using ARD
# (K,1,1,D) x ()
gamma = Gamma(1e-5,
1e-5,
plates=(D,),
name='gamma')
# (K,M,1) x (D)
C = GaussianARD(0,
gamma,
shape=(D,),
plates=(M,1),
name='C',
plotter=bpplt.GaussianHintonPlotter(rows=-3,cols=-1))
C.initialize_from_value(np.random.randn(M,1,D))
# Underlying noiseless function
# (K,M,N) x ()
F = SumMultiply('i,i',
C,
X,
name='F')
#
# Mixing the models
#
# Observation noise
tau = Gamma(1e-5,
1e-5,
name='tau')
tau.initialize_from_value(1e2)
# Emission/observation distribution
Y = GaussianARD(F, tau,
name='Y')
Q = VB(Y, F,
Z, rho, V,
C, gamma, X, A, alpha,
tau)
return Q
def infer(y, D, K, rotate=True, debug=False, maxiter=100, mask=True,
plot_C=True, monitor=False, update_hyper=0, autosave=None):
"""
Apply LSSM with switching dynamics to the given data.
"""
(M, N) = np.shape(y)
# Construct model
Q = model(M=M, K=K, N=N, D=D)
if not plot_C:
Q['C'].set_plotter(None)
if autosave is not None:
Q.set_autosave(autosave, iterations=10)
Q['Y'].observe(y, mask=mask)
# Set up rotation speed-up
if rotate:
# Initial rotate the D-dimensional state space (X, A, C)
# Do not update hyperparameters
rotA_init = transformations.RotateGaussianARD(Q['A'])
rotX_init = transformations.RotateSwitchingMarkovChain(Q['X'],
Q['A'],
Q['Z'],
rotA_init)
rotC_init = transformations.RotateGaussianARD(Q['C'])
R_init = transformations.RotationOptimizer(rotX_init, rotC_init, D)
# Rotate the D-dimensional state space (X, A, C)
rotA = transformations.RotateGaussianARD(Q['A'],
Q['alpha'])
rotX = transformations.RotateSwitchingMarkovChain(Q['X'],
Q['A'],
Q['Z'],
rotA)
rotC = transformations.RotateGaussianARD(Q['C'],
Q['gamma'])
R = transformations.RotationOptimizer(rotX, rotC, D)
if debug:
rotate_kwargs = {'maxiter': 10,
'check_bound': True,
'check_gradient': True}
else:
rotate_kwargs = {'maxiter': 10}
# Run inference
if monitor:
Q.plot()
for n in range(maxiter):
if n < update_hyper:
Q.update('X', 'C', 'A', 'tau', 'Z', plot=monitor)
if rotate:
R_init.rotate(**rotate_kwargs)
else:
Q.update(plot=monitor)
if rotate:
R.rotate(**rotate_kwargs)
return Q
def simulate_data(N):
"""
Generate time-series data with switching dynamics.
"""
# Two states: 1) oscillation, 2) random walk
w1 = 0.02 * 2*np.pi
A = [ [[np.cos(w1), -np.sin(w1)],
[np.sin(w1), np.cos(w1)]],
[[ 1.0, 0.0],
[ 0.0, 0.0]] ]
C = [[1.0, 0.0]]
# State switching probabilities
q = 0.993 # probability to stay in the same state
r = (1-q)/(2-1) # probability to switch
P = q*np.identity(2) + r*(np.ones((2,2))-np.identity(2))
X = np.zeros((N, 2))
Z = np.zeros(N)
Y = np.zeros(N)
F = np.zeros(N)
z = np.random.randint(2)
x = np.random.randn(2)
Z[0] = z
X[0,:] = x
for n in range(1,N):
x = np.dot(A[z], x) + np.random.randn(2)
f = np.dot(C, x)
y = f + 5*np.random.randn()
z = np.random.choice(2, p=P[z])
Z[n] = z
X[n,:] = x
Y[n] = y
F[n] = f
Y = Y[None,:]
return (Y, F)
@bpplt.interactive
def demo(N=1000, maxiter=100, D=3, K=2, seed=42, plot=True, debug=False,
rotate=True, monitor=True):
"""
Run the demo for linear state-space model with switching dynamics.
"""
# Use deterministic random numbers
if seed is not None:
np.random.seed(seed)
# Generate data
(Y, F) = simulate_data(N)
# Plot observations
if plot:
plt.figure()
bpplt.timeseries(F, linestyle='-', color='b')
bpplt.timeseries(Y, linestyle='None', color='r', marker='x')
# Apply the linear state-space model with switching dynamics
Q = infer(Y, D, K,
debug=debug,
maxiter=maxiter,
monitor=monitor,
rotate=rotate,
update_hyper=5)
# Show results
if plot:
Q.plot()
return
if __name__ == '__main__':
import sys, getopt, os
try:
opts, args = getopt.getopt(sys.argv[1:],
"",
["n=",
"d=",
"k=",
"seed=",
"debug",
"no-rotation",
"no-monitor",
"no-plot",
"maxiter="])
except getopt.GetoptError:
print('python lssm_sd.py <options>')
print('--n=<INT> Number of data vectors')
print('--d=<INT> Latent space dimensionality')
print('--k=<INT> Number of mixed models')
print('--maxiter=<INT> Maximum number of VB iterations')
print('--seed=<INT> Seed (integer) for the random number generator')
print('--no-rotation Do not peform rotation speed ups')
print('--no-plot Do not plot results')
print('--no-monitor Do not plot distributions during VB learning')
print('--debug Check that the rotations are implemented correctly')
sys.exit(2)
kwargs = {}
for opt, arg in opts:
if opt == "--maxiter":
kwargs["maxiter"] = int(arg)
elif opt == "--d":
kwargs["D"] = int(arg)
elif opt == "--k":
kwargs["K"] = int(arg)
elif opt == "--seed":
kwargs["seed"] = int(arg)
elif opt == "--no-rotation":
kwargs["rotate"] = False
elif opt == "--no-monitor":
kwargs["monitor"] = False
elif opt == "--no-plot":
kwargs["plot"] = False
elif opt == "--debug":
kwargs["debug"] = True
elif opt in ("--n",):
kwargs["N"] = int(arg)
else:
raise ValueError("Unhandled option given")
demo(**kwargs)
plt.show()
| |
# Copyright (c) 2017 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from os_ken.lib import mac as mac_api
from oslo_log import log
from dragonflow import conf as cfg
from dragonflow.controller.common import constants as const
from dragonflow.controller.common import logical_networks
from dragonflow.controller import df_base_app
from dragonflow.controller import port_locator
from dragonflow.db.models import l2
LOG = log.getLogger(__name__)
class TunnelingApp(df_base_app.DFlowApp):
def __init__(self, *args, **kwargs):
super(TunnelingApp, self).__init__(*args, **kwargs)
self.tunnel_types = cfg.CONF.df.tunnel_types
self.local_networks = logical_networks.LogicalNetworks()
def switch_features_handler(self, ev):
self._create_tunnels()
def _create_tunnels(self):
tunnel_ports = self.vswitch_api.get_virtual_tunnel_ports()
for tunnel_port in tunnel_ports:
if tunnel_port.tunnel_type not in self.tunnel_types:
self.vswitch_api.delete_port(tunnel_port)
for t in self.tunnel_types:
# The customized ovs idl will ingore the command if the port
# already exists.
self.vswitch_api.add_virtual_tunnel_port(t)
@df_base_app.register_event(l2.LogicalPort, l2.EVENT_BIND_LOCAL)
def _add_local_port(self, lport):
lswitch = lport.lswitch
network_type = lswitch.network_type
if network_type not in self.tunnel_types:
LOG.info("added unsupported network %(net_type)s lport",
{'net_type': network_type})
return
network_id = lswitch.unique_key
LOG.info("adding %(net_type)s lport %(lport)s",
{'net_type': network_type,
'lport': lport})
port_count = self.local_networks.get_local_port_count(
network_id=network_id,
network_type=network_type)
if port_count == 0:
self._new_network_ingress_flow(lport,
network_id)
self.local_networks.add_local_port(port_id=lport.id,
network_id=network_id,
network_type=network_type)
@df_base_app.register_event(l2.LogicalPort, l2.EVENT_UNBIND_LOCAL)
def _remove_local_port(self, lport):
lswitch = lport.lswitch
network_type = lswitch.network_type
if network_type not in self.tunnel_types:
LOG.info("removed unsupported network %(net_type)s lport",
{'net_type': network_type})
return
network_id = lswitch.unique_key
self.local_networks.remove_local_port(port_id=lport.id,
network_id=network_id,
network_type=network_type)
port_count = self.local_networks.get_local_port_count(
network_id=network_id,
network_type=network_type)
if port_count == 0:
self._remove_network_ingress_flow(lport)
def _new_network_ingress_flow(self, lport, network_id):
LOG.debug("adding new %(net_type)s network %(network_id)s",
{'net_type': lport.lswitch.network_type,
'network_id': network_id})
match = self._make_network_match(lport)
actions = [self.parser.OFPActionSetField(metadata=network_id)]
action_inst = self.parser.OFPInstructionActions(
self.ofproto.OFPIT_APPLY_ACTIONS, actions)
goto_inst = self.parser.OFPInstructionGotoTable(
const.INGRESS_DESTINATION_PORT_LOOKUP_TABLE)
inst = [action_inst, goto_inst]
self.mod_flow(
inst=inst,
table_id=const.INGRESS_CLASSIFICATION_DISPATCH_TABLE,
priority=const.PRIORITY_MEDIUM,
match=match)
def _remove_network_ingress_flow(self, lport):
match = self._make_network_match(lport)
self.mod_flow(
command=self.ofproto.OFPFC_DELETE,
table_id=const.INGRESS_CLASSIFICATION_DISPATCH_TABLE,
priority=const.PRIORITY_MEDIUM,
match=match)
def _make_network_match(self, lport):
segmentation_id = lport.lswitch.segmentation_id
port_num = self._get_lport_tunnel_ofport(lport)
return self.parser.OFPMatch(tunnel_id_nxm=segmentation_id,
in_port=port_num)
def _get_lport_tunnel_ofport(self, lport):
network_type = lport.lswitch.network_type
return self.vswitch_api.get_vtp_ofport(network_type)
@df_base_app.register_event(l2.LogicalPort, l2.EVENT_BIND_REMOTE)
def _add_remote_port(self, lport):
lswitch = lport.lswitch
network_type = lswitch.network_type
if network_type not in self.tunnel_types:
return
segmentation_id = lswitch.segmentation_id
self._add_egress_dispatch_flow(lport, segmentation_id)
network_id = lswitch.unique_key
LOG.info("adding remote %(net_type)s lport %(lport)s",
{'net_type': network_type,
'lport': lport})
self.local_networks.add_remote_port(port_id=lport.id,
network_id=network_id,
network_type=network_type)
self._modify_egress_bum_flow(network_id,
network_type,
segmentation_id,
self.ofproto.OFPFC_ADD)
@df_base_app.register_event(l2.LogicalPort, l2.EVENT_UNBIND_REMOTE)
def remove_remote_port(self, lport):
lswitch = lport.lswitch
network_type = lswitch.network_type
if network_type not in self.tunnel_types:
return
self._remove_egress_dispatch_flow(lport)
network_id = lswitch.unique_key
segmentation_id = lswitch.segmentation_id
self.local_networks.remove_remote_port(port_id=lport.id,
network_id=network_id,
network_type=network_type)
self._modify_egress_bum_flow(network_id,
network_type,
segmentation_id,
self.ofproto.OFPFC_MODIFY)
def _add_egress_dispatch_flow(self, lport, segmentation_id):
binding = port_locator.get_port_binding(lport)
remote_ip = binding.ip
port_num = self._get_lport_tunnel_ofport(lport)
LOG.debug("set egress dispatch flow %(seg)s peer %(remote_ip)s",
{'seg': segmentation_id,
'remote_ip': remote_ip})
match = self.parser.OFPMatch(reg7=lport.unique_key)
actions = [
self.parser.OFPActionSetField(tun_ipv4_dst=remote_ip),
self.parser.OFPActionSetField(tunnel_id_nxm=segmentation_id),
self.parser.OFPActionOutput(port=port_num)]
ofproto = self.ofproto
action_inst = self.parser.OFPInstructionActions(
ofproto.OFPIT_APPLY_ACTIONS, actions)
inst = [action_inst]
self.mod_flow(
inst=inst,
table_id=const.EGRESS_TABLE,
priority=const.PRIORITY_MEDIUM,
match=match)
def _remove_egress_dispatch_flow(self, lport):
match = self.parser.OFPMatch(reg7=lport.unique_key)
self.mod_flow(
command=self.ofproto.OFPFC_DELETE,
table_id=const.EGRESS_TABLE,
priority=const.PRIORITY_MEDIUM,
match=match)
def _eval_flow_actions(self, network_id, segmentation_id,
port_count, command):
inst = None
if port_count == 0:
# override command to delete as it is the last port for network
command = self.ofproto.OFPFC_DELETE
else:
if port_count != 1:
# when there are more then 1 ports in network modify
command = self.ofproto.OFPFC_MODIFY
# use the command provided by higher level call as
# the mod_flow command
actions = self._make_bum_flow_actions(network_id, segmentation_id)
inst = [self.parser.OFPInstructionActions(
self.ofproto.OFPIT_APPLY_ACTIONS, actions)]
return inst, command
def _modify_egress_bum_flow(self,
network_id,
network_type,
segmentation_id,
command):
match = self._make_bum_match(metadata=network_id)
port_count = self.local_networks.get_remote_port_count(
network_id=network_id,
network_type=network_type)
inst, command = self._eval_flow_actions(
network_id, segmentation_id, port_count, command)
self.mod_flow(
inst=inst,
table_id=const.EGRESS_TABLE,
command=command,
priority=const.PRIORITY_LOW,
match=match)
def _make_bum_match(self, **kwargs):
match = self.parser.OFPMatch(**kwargs)
bum_addr = mac_api.haddr_to_bin(mac_api.UNICAST)
match.set_dl_dst_masked(bum_addr, bum_addr)
return match
def _make_bum_flow_actions(self, network_id, segmentation_id):
remote_ports = self.local_networks.get_remote_ports(
network_id=network_id)
actions = []
peer_ip_list = set()
for port_id in remote_ports:
lport = self.db_store.get_one(l2.LogicalPort(id=port_id))
if not lport:
continue
binding = port_locator.get_port_binding(lport)
peer_ip = binding.ip
if peer_ip in peer_ip_list:
continue
peer_ip_list.add(peer_ip)
port_num = self._get_lport_tunnel_ofport(lport)
ofpact_set_field = self.parser.OFPActionSetField
actions += [
ofpact_set_field(tun_ipv4_dst=peer_ip),
ofpact_set_field(tunnel_id_nxm=segmentation_id),
self.parser.OFPActionOutput(port=port_num)]
return actions
| |
"""This module implements a loader and dumper for the svmlight format
This format is a text-based format, with one sample per line. It does
not store zero valued features hence is suitable for sparse dataset.
The first element of each line can be used to store a target variable to
predict.
This format is used as the default format for both svmlight and the
libsvm command line programs.
"""
# Authors: Mathieu Blondel <mathieu@mblondel.org>
# Lars Buitinck
# Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
from contextlib import closing
import io
import os.path
import numpy as np
import scipy.sparse as sp
from ._svmlight_format import _load_svmlight_file
from .. import __version__
from ..externals import six
from ..externals.six import u, b
from ..externals.six.moves import range, zip
from ..utils import check_array
def load_svmlight_file(f, n_features=None, dtype=np.float64,
multilabel=False, zero_based="auto", query_id=False,
offset=0, length=-1):
"""Load datasets in the svmlight / libsvm format into sparse CSR matrix
This format is a text-based format, with one sample per line. It does
not store zero valued features hence is suitable for sparse dataset.
The first element of each line can be used to store a target variable
to predict.
This format is used as the default format for both svmlight and the
libsvm command line programs.
Parsing a text based source can be expensive. When working on
repeatedly on the same dataset, it is recommended to wrap this
loader with joblib.Memory.cache to store a memmapped backup of the
CSR results of the first call and benefit from the near instantaneous
loading of memmapped structures for the subsequent calls.
In case the file contains a pairwise preference constraint (known
as "qid" in the svmlight format) these are ignored unless the
query_id parameter is set to True. These pairwise preference
constraints can be used to constraint the combination of samples
when using pairwise loss functions (as is the case in some
learning to rank problems) so that only pairs with the same
query_id value are considered.
This implementation is written in Cython and is reasonably fast.
However, a faster API-compatible loader is also available at:
https://github.com/mblondel/svmlight-loader
Parameters
----------
f : {str, file-like, int}
(Path to) a file to load. If a path ends in ".gz" or ".bz2", it will
be uncompressed on the fly. If an integer is passed, it is assumed to
be a file descriptor. A file-like or file descriptor will not be closed
by this function. A file-like object must be opened in binary mode.
n_features : int or None
The number of features to use. If None, it will be inferred. This
argument is useful to load several files that are subsets of a
bigger sliced dataset: each subset might not have examples of
every feature, hence the inferred shape might vary from one
slice to another.
n_features is only required if ``offset`` or ``length`` are passed a
non-default value.
multilabel : boolean, optional, default False
Samples may have several labels each (see
http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html)
zero_based : boolean or "auto", optional, default "auto"
Whether column indices in f are zero-based (True) or one-based
(False). If column indices are one-based, they are transformed to
zero-based to match Python/NumPy conventions.
If set to "auto", a heuristic check is applied to determine this from
the file contents. Both kinds of files occur "in the wild", but they
are unfortunately not self-identifying. Using "auto" or True should
always be safe when no ``offset`` or ``length`` is passed.
If ``offset`` or ``length`` are passed, the "auto" mode falls back
to ``zero_based=True`` to avoid having the heuristic check yield
inconsistent results on different segments of the file.
query_id : boolean, default False
If True, will return the query_id array for each file.
dtype : numpy data type, default np.float64
Data type of dataset to be loaded. This will be the data type of the
output numpy arrays ``X`` and ``y``.
offset : integer, optional, default 0
Ignore the offset first bytes by seeking forward, then
discarding the following bytes up until the next new line
character.
length : integer, optional, default -1
If strictly positive, stop reading any new line of data once the
position in the file has reached the (offset + length) bytes threshold.
Returns
-------
X : scipy.sparse matrix of shape (n_samples, n_features)
y : ndarray of shape (n_samples,), or, in the multilabel a list of
tuples of length n_samples.
query_id : array of shape (n_samples,)
query_id for each sample. Only returned when query_id is set to
True.
See also
--------
load_svmlight_files: similar function for loading multiple files in this
format, enforcing the same number of features/columns on all of them.
Examples
--------
To use joblib.Memory to cache the svmlight file::
from sklearn.externals.joblib import Memory
from sklearn.datasets import load_svmlight_file
mem = Memory("./mycache")
@mem.cache
def get_data():
data = load_svmlight_file("mysvmlightfile")
return data[0], data[1]
X, y = get_data()
"""
return tuple(load_svmlight_files([f], n_features, dtype, multilabel,
zero_based, query_id, offset, length))
def _gen_open(f):
if isinstance(f, int): # file descriptor
return io.open(f, "rb", closefd=False)
elif not isinstance(f, six.string_types):
raise TypeError("expected {str, int, file-like}, got %s" % type(f))
_, ext = os.path.splitext(f)
if ext == ".gz":
import gzip
return gzip.open(f, "rb")
elif ext == ".bz2":
from bz2 import BZ2File
return BZ2File(f, "rb")
else:
return open(f, "rb")
def _open_and_load(f, dtype, multilabel, zero_based, query_id,
offset=0, length=-1):
if hasattr(f, "read"):
actual_dtype, data, ind, indptr, labels, query = \
_load_svmlight_file(f, dtype, multilabel, zero_based, query_id,
offset, length)
# XXX remove closing when Python 2.7+/3.1+ required
else:
with closing(_gen_open(f)) as f:
actual_dtype, data, ind, indptr, labels, query = \
_load_svmlight_file(f, dtype, multilabel, zero_based, query_id,
offset, length)
# convert from array.array, give data the right dtype
if not multilabel:
labels = np.frombuffer(labels, np.float64)
data = np.frombuffer(data, actual_dtype)
indices = np.frombuffer(ind, np.intc)
indptr = np.frombuffer(indptr, dtype=np.intc) # never empty
query = np.frombuffer(query, np.int64)
data = np.asarray(data, dtype=dtype) # no-op for float{32,64}
return data, indices, indptr, labels, query
def load_svmlight_files(files, n_features=None, dtype=np.float64,
multilabel=False, zero_based="auto", query_id=False,
offset=0, length=-1):
"""Load dataset from multiple files in SVMlight format
This function is equivalent to mapping load_svmlight_file over a list of
files, except that the results are concatenated into a single, flat list
and the samples vectors are constrained to all have the same number of
features.
In case the file contains a pairwise preference constraint (known
as "qid" in the svmlight format) these are ignored unless the
query_id parameter is set to True. These pairwise preference
constraints can be used to constraint the combination of samples
when using pairwise loss functions (as is the case in some
learning to rank problems) so that only pairs with the same
query_id value are considered.
Parameters
----------
files : iterable over {str, file-like, int}
(Paths of) files to load. If a path ends in ".gz" or ".bz2", it will
be uncompressed on the fly. If an integer is passed, it is assumed to
be a file descriptor. File-likes and file descriptors will not be
closed by this function. File-like objects must be opened in binary
mode.
n_features : int or None
The number of features to use. If None, it will be inferred from the
maximum column index occurring in any of the files.
This can be set to a higher value than the actual number of features
in any of the input files, but setting it to a lower value will cause
an exception to be raised.
multilabel : boolean, optional
Samples may have several labels each (see
http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html)
zero_based : boolean or "auto", optional
Whether column indices in f are zero-based (True) or one-based
(False). If column indices are one-based, they are transformed to
zero-based to match Python/NumPy conventions.
If set to "auto", a heuristic check is applied to determine this from
the file contents. Both kinds of files occur "in the wild", but they
are unfortunately not self-identifying. Using "auto" or True should
always be safe when no offset or length is passed.
If offset or length are passed, the "auto" mode falls back
to zero_based=True to avoid having the heuristic check yield
inconsistent results on different segments of the file.
query_id : boolean, defaults to False
If True, will return the query_id array for each file.
dtype : numpy data type, default np.float64
Data type of dataset to be loaded. This will be the data type of the
output numpy arrays ``X`` and ``y``.
offset : integer, optional, default 0
Ignore the offset first bytes by seeking forward, then
discarding the following bytes up until the next new line
character.
length : integer, optional, default -1
If strictly positive, stop reading any new line of data once the
position in the file has reached the (offset + length) bytes threshold.
Returns
-------
[X1, y1, ..., Xn, yn]
where each (Xi, yi) pair is the result from load_svmlight_file(files[i]).
If query_id is set to True, this will return instead [X1, y1, q1,
..., Xn, yn, qn] where (Xi, yi, qi) is the result from
load_svmlight_file(files[i])
Notes
-----
When fitting a model to a matrix X_train and evaluating it against a
matrix X_test, it is essential that X_train and X_test have the same
number of features (X_train.shape[1] == X_test.shape[1]). This may not
be the case if you load the files individually with load_svmlight_file.
See also
--------
load_svmlight_file
"""
if (offset != 0 or length > 0) and zero_based == "auto":
# disable heuristic search to avoid getting inconsistent results on
# different segments of the file
zero_based = True
if (offset != 0 or length > 0) and n_features is None:
raise ValueError(
"n_features is required when offset or length is specified.")
r = [_open_and_load(f, dtype, multilabel, bool(zero_based), bool(query_id),
offset=offset, length=length)
for f in files]
if (zero_based is False or
zero_based == "auto" and all(len(tmp[1]) and np.min(tmp[1]) > 0
for tmp in r)):
for _, indices, _, _, _ in r:
indices -= 1
n_f = max(ind[1].max() if len(ind[1]) else 0 for ind in r) + 1
if n_features is None:
n_features = n_f
elif n_features < n_f:
raise ValueError("n_features was set to {},"
" but input file contains {} features"
.format(n_features, n_f))
result = []
for data, indices, indptr, y, query_values in r:
shape = (indptr.shape[0] - 1, n_features)
X = sp.csr_matrix((data, indices, indptr), shape)
X.sort_indices()
result += X, y
if query_id:
result.append(query_values)
return result
def _dump_svmlight(X, y, f, multilabel, one_based, comment, query_id):
X_is_sp = int(hasattr(X, "tocsr"))
y_is_sp = int(hasattr(y, "tocsr"))
if X.dtype.kind == 'i':
value_pattern = u("%d:%d")
else:
value_pattern = u("%d:%.16g")
if y.dtype.kind == 'i':
label_pattern = u("%d")
else:
label_pattern = u("%.16g")
line_pattern = u("%s")
if query_id is not None:
line_pattern += u(" qid:%d")
line_pattern += u(" %s\n")
if comment:
f.write(b("# Generated by dump_svmlight_file from scikit-learn %s\n"
% __version__))
f.write(b("# Column indices are %s-based\n"
% ["zero", "one"][one_based]))
f.write(b("#\n"))
f.writelines(b("# %s\n" % line) for line in comment.splitlines())
for i in range(X.shape[0]):
if X_is_sp:
span = slice(X.indptr[i], X.indptr[i + 1])
row = zip(X.indices[span], X.data[span])
else:
nz = X[i] != 0
row = zip(np.where(nz)[0], X[i, nz])
s = " ".join(value_pattern % (j + one_based, x) for j, x in row)
if multilabel:
if y_is_sp:
nz_labels = y[i].nonzero()[1]
else:
nz_labels = np.where(y[i] != 0)[0]
labels_str = ",".join(label_pattern % j for j in nz_labels)
else:
if y_is_sp:
labels_str = label_pattern % y.data[i]
else:
labels_str = label_pattern % y[i]
if query_id is not None:
feat = (labels_str, query_id[i], s)
else:
feat = (labels_str, s)
f.write((line_pattern % feat).encode('ascii'))
def dump_svmlight_file(X, y, f, zero_based=True, comment=None, query_id=None,
multilabel=False):
"""Dump the dataset in svmlight / libsvm file format.
This format is a text-based format, with one sample per line. It does
not store zero valued features hence is suitable for sparse dataset.
The first element of each line can be used to store a target variable
to predict.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : {array-like, sparse matrix}, shape = [n_samples (, n_labels)]
Target values. Class labels must be an
integer or float, or array-like objects of integer or float for
multilabel classifications.
f : string or file-like in binary mode
If string, specifies the path that will contain the data.
If file-like, data will be written to f. f should be opened in binary
mode.
zero_based : boolean, optional
Whether column indices should be written zero-based (True) or one-based
(False).
comment : string, optional
Comment to insert at the top of the file. This should be either a
Unicode string, which will be encoded as UTF-8, or an ASCII byte
string.
If a comment is given, then it will be preceded by one that identifies
the file as having been dumped by scikit-learn. Note that not all
tools grok comments in SVMlight files.
query_id : array-like, shape = [n_samples]
Array containing pairwise preference constraints (qid in svmlight
format).
multilabel : boolean, optional
Samples may have several labels each (see
http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html)
.. versionadded:: 0.17
parameter *multilabel* to support multilabel datasets.
"""
if comment is not None:
# Convert comment string to list of lines in UTF-8.
# If a byte string is passed, then check whether it's ASCII;
# if a user wants to get fancy, they'll have to decode themselves.
# Avoid mention of str and unicode types for Python 3.x compat.
if isinstance(comment, bytes):
comment.decode("ascii") # just for the exception
else:
comment = comment.encode("utf-8")
if six.b("\0") in comment:
raise ValueError("comment string contains NUL byte")
yval = check_array(y, accept_sparse='csr', ensure_2d=False)
if sp.issparse(yval):
if yval.shape[1] != 1 and not multilabel:
raise ValueError("expected y of shape (n_samples, 1),"
" got %r" % (yval.shape,))
else:
if yval.ndim != 1 and not multilabel:
raise ValueError("expected y of shape (n_samples,), got %r"
% (yval.shape,))
Xval = check_array(X, accept_sparse='csr')
if Xval.shape[0] != yval.shape[0]:
raise ValueError("X.shape[0] and y.shape[0] should be the same, got"
" %r and %r instead." % (Xval.shape[0], yval.shape[0]))
# We had some issues with CSR matrices with unsorted indices (e.g. #1501),
# so sort them here, but first make sure we don't modify the user's X.
# TODO We can do this cheaper; sorted_indices copies the whole matrix.
if yval is y and hasattr(yval, "sorted_indices"):
y = yval.sorted_indices()
else:
y = yval
if hasattr(y, "sort_indices"):
y.sort_indices()
if Xval is X and hasattr(Xval, "sorted_indices"):
X = Xval.sorted_indices()
else:
X = Xval
if hasattr(X, "sort_indices"):
X.sort_indices()
if query_id is not None:
query_id = np.asarray(query_id)
if query_id.shape[0] != y.shape[0]:
raise ValueError("expected query_id of shape (n_samples,), got %r"
% (query_id.shape,))
one_based = not zero_based
if hasattr(f, "write"):
_dump_svmlight(X, y, f, multilabel, one_based, comment, query_id)
else:
with open(f, "wb") as f:
_dump_svmlight(X, y, f, multilabel, one_based, comment, query_id)
| |
import sys, os
import visit_rcParams as vrc
import visit_colormaps as vc
import visit_time as vt
import visit as v
rcParams = dict( surface_linewidth=3,
snow_linewidth=4
)
class Operator:
def __init__(self, oname, otype, oatts):
self.oname = oname
self.otype = otype
self.oatts = oatts
class Plot:
def __init__(self, pname, ptype, patts, varname=None):
"""Plot class"""
self.pname = pname
self.varname = varname
self.ptype = ptype
self.patts = patts
self.operators = []
self.annot = v.GetAnnotationObject(pname)
self.annot.fontFamily = vrc.getDefaultFont()
self.annot.fontHeight = vrc.rcParams['legend.fontheight']
self.annot.managePosition = 0
self.annot.xScale = vrc.rcParams['legend.scale'][0]
self.annot.yScale = vrc.rcParams['legend.scale'][1]
self.annot.position = vrc.rcParams['legend.position']
self.annot.drawMinMax = vrc.rcParams['legend.minmax']
if varname is not None:
self.annot.drawTitle = 0
self.title = v.CreateAnnotationObject("Text2D")
self.title.text = varname
self.title.fontFamily = vrc.getDefaultFont()
self.title.height = vrc.rcParams['legend.title.fontheight']
self.title.position = vrc.rcParams['legend.title.position']
def getLimits(self):
assert self.ptype == 'Pseudocolor'
min = None
max = None
if self.patts.minFlag:
min = self.patts.min
if self.patts.maxFlag:
min = self.patts.max
return min,max
class VisItWindow:
"""Class for a window"""
class Slice:
"""Helper class for slicing into 2D"""
def __init__(self, point=None, normal=None):
if point is None:
point = (0,0,0)
if normal is None:
normal = (0,-1,0)
assert type(point) is tuple
assert len(point) == 3
assert type(normal) is tuple
assert len(normal) == 3
self.point = point
self.normal = normal
def toAttributes(self):
s = v.SliceAttributes()
s.originType = s.Point
s.originPoint = self.point
s.normal = self.normal
s.axisType = s.Arbitrary
return s
def __init__(self, index):
self.i = index
self.annot = vrc.getAnnotationAttributes()
self.setDimension(3)
self.plots = []
self.nonplots = [] # other objects like meshes
self._slice = None
self.exaggeration = None
def setDimension(self, dim):
"""Sets the dimension, which is used in controlling the view"""
self.dim = dim
if dim == 2:
self.view = v.GetView2D()
elif dim == 3:
self.view = v.GetView3D()
else:
raise RuntimeError("Invalid dimension %s"%str(dim))
def slice(self, point=None, normal=None):
"""Registers a slice -- this is not immediately added"""
self._slice = self.Slice(point, normal)
self.setDimension(2)
def exaggerateVertical(self, factor):
"""Registers an exxageration -- this is not immediately added"""
self.exaggeration = factor
def _exaggerateVertical(self):
if self.dim == 3:
self.view.axis3DScaleFlag = 1
self.view.axis3DScales = (self.view.axis3DScales[0],
self.view.axis3DScales[1],
self.exaggeration)
else:
for i,plot in enumerate(self.plots):
done = False
for op in plot.operators:
if "exaggerate_vertical" == op.oname:
done = True
if not done:
print "transforming plot %d..."%i
tr = v.TransformAttributes()
tr.doScale = 1
tr.scaleY = self.exaggeration
v.SetActivePlots(i)
v.AddOperator("Transform")
v.SetOperatorOptions(tr)
plot.operators.append(Operator("exaggerate_vertical", "Transform", tr))
def createMesh(self, color='w', opacity=0.15, silo=False):
_colors = dict(w=(255,255,255,255),
k=(0,0,0,255),
gray=(175,175,175),
)
if silo:
v.AddPlot('Mesh', "mesh")
else:
v.AddPlot('Mesh', "Mesh")
ma = v.MeshAttributes()
ma.legendFlag = 0
ma.meshColor = _colors[color]
ma.meshColorSource = ma.MeshCustom
if (opacity < 1.):
ma.opaqueMode = ma.On
ma.opacity = opacity
v.SetPlotOptions(ma)
pname = v.GetPlotList().GetPlots(v.GetNumPlots()-1).plotName
if silo:
plot = Plot(pname, 'mesh', ma)
else:
plot = Plot(pname, 'Mesh', ma)
self.nonplots.append(plot)
return plot
def createPseudocolor(self, varname, display_name=None, cmap=None,
limits=None, linewidth=None, legend=True, alpha=False):
"""Generic creation of pseudocolor"""
if display_name is None:
display_name = vrc.renameScalar(varname)
if "temperature" in display_name:
display_name = display_name.replace("[K]", "[C]")
print "defining alias: %s = %s"%(display_name, varname)
v.DefineScalarExpression(display_name, "<%s> - 273.15"%varname)
elif display_name != varname:
print "defining alias: %s = %s"%(display_name, varname)
v.DefineScalarExpression(display_name, '<'+varname+'>')
v.AddPlot('Pseudocolor', display_name)
pa = v.PseudocolorAttributes()
# limits
if limits is None:
limits = vrc.getLimits(varname)
if limits is not None:
min = limits[0]
max = limits[1]
if min is not None:
pa.minFlag = 1
pa.min = min
if max is not None:
pa.maxFlag = 1
pa.max = max
# opacity
if alpha:
pa.opacity = 0
pa.opacityType = pa.ColorTable
# colormap
if cmap is not None:
reverse = cmap.endswith("_r")
if reverse:
cmap = cmap.strip("_r")
pa.invertColorTable = 1
pa.colorTableName = cmap
# linewidth for 2D
if linewidth is None:
linewidth = vrc.rcParams['pseudocolor.linewidth']
pa.lineWidth = linewidth
# turn off legend for 2D surf
if not legend:
pa.legendFlag = 0
v.SetActivePlots(len(self.plots)+1)
v.SetPlotOptions(pa)
pname = v.GetPlotList().GetPlots(v.GetNumPlots()-1).plotName
if legend:
plot = Plot(pname, 'Pseudocolor', pa, display_name)
else:
plot = Plot(pname, 'Pseudocolor', pa)
self.plots.append(plot)
return plot
def createContour(self, varname, value, color=None, linewidth=None):
"""Generic creation of a single contour without a legend"""
v.AddPlot('Contour', varname)
ca = v.ContourAttributes()
ca.contourMethod = ca.Value
ca.contourValue = (value,)
ca.colorType = ca.ColorBySingleColor
if color is None:
color = vrc.rcParams['contour.color']
if type(color) is str:
color = vc.common_colors[color]
ca.singleColor = color
if linewidth is None:
linewidth = vrc.rcParams['contour.linewidth']
ca.lineWidth = linewidth
# turn off legend for 2D surf
ca.legendFlag = 0
v.SetPlotOptions(ca)
pname = v.GetPlotList().GetPlots(v.GetNumPlots()-1).plotName
plot = Plot(pname, 'Contour', ca)
self.plots.append(plot)
return plot
def draw(self):
print "drawing window %d of dimension %d"%(self.i,self.dim)
v.SetActiveWindow(self.i)
v.SetAnnotationAttributes(self.annot)
if self.dim == 2:
# add the slice
assert self._slice is not None
for i,plot in enumerate(self.plots):
sliced = False
for op in plot.operators:
if "slice" == op.oname:
sliced = True
if not sliced:
print "slicing plot %d..."%i
v.SetActivePlots(i)
v.AddOperator("Slice")
sa = self._slice.toAttributes()
v.SetOperatorOptions(sa)
plot.operators.append(Operator("slice", "Slice", sa))
if self.exaggeration is not None:
print "exaggerating..."
self._exaggerateVertical()
# set the plot options
for i, plot in enumerate(self.plots):
print "setting plot options for plot %i..."%i
v.SetActivePlots(i)
v.SetPlotOptions(plot.patts)
# set the view
print "setting the view..."
if self.dim == 2:
v.SetView2D(self.view)
else:
v.SetView3D(self.view)
print "drawing..."
v.DrawPlots()
class Vis:
"""Container class for windows, also manages sources and correlations"""
def __init__(self, directory, hostname="localhost", n_windows=1):
self.directory = directory
self.hostname = hostname
self.windows = []
self._active_window = 0
for i in range(n_windows):
self.addWindow()
self.setActiveWindow(1)
# if self.hostname != "localhost":
# args = []
# v.OpenMDServer(self.hostname,args)
# v.OpenComputeEngine(self.hostname,args)
def loadSources(self, prefix="visdump_data",
surface_prefix="visdump_surface_data", filetype="xdmf"):
"""Loads source files for subsurface and potentially surface."""
if prefix is None:
self.subsurface_src = None
else:
if filetype == "xdmf":
self.subsurface_src = ":".join((self.hostname, os.path.join(self.directory, "%s.VisIt.xmf"%prefix)))
elif filetype == "silo":
self.subsurface_src = ":".join((self.hostname, os.path.join(self.directory, "%s.silo"%prefix)))
if surface_prefix is None:
self.surface_src = None
else:
if filetype == "xdmf":
self.surface_src = ":".join((self.hostname, os.path.join(self.directory, "%s.VisIt.xmf"%surface_prefix)))
elif filetype == "silo":
self.surface_src = ":".join((self.hostname, os.path.join(self.directory, "%s.silo"%surface_prefix)))
# open the subsurface database
if self.subsurface_src is not None:
v.OpenDatabase(self.subsurface_src)
if surface_prefix is not None:
# open the surface database
v.OpenDatabase(self.surface_src)
if prefix is not None:
# create the database correlation
v.CreateDatabaseCorrelation("my_correlation", (self.subsurface_src, self.surface_src), 0)
v.SetActiveTimeSlider("my_correlation")
# create vector expressions for ponded depth and snow depth
v.DefineVectorExpression("ponded_depth_displace", "{0,0,ponded_depth.cell.0}")
v.DefineVectorExpression("snow_displace", "{0,0,snow_depth.cell.0+ponded_depth.cell.0}")
def loadSourcesList(self, srclist):
"""A generic set of sources."""
self.src = [":".join((self.hostname, os.path.join(self.directory, s))) for s in srclist]
for s in self.src:
v.OpenDatabase(s)
def unloadSources(self):
v.DeleteAllPlots()
if self.subsurface_src is not None:
v.CloseDatabase(self.subsurface_src)
if self.surface_src is not None:
v.CloseDatabase(self.surface_src)
def addWindow(self):
"""Adds a window to VisIt and makes it active"""
if len(self.windows) != 0:
v.AddWindow()
win = VisItWindow(len(self.windows)+1)
self.windows.append(win)
self.setActiveWindow(len(self.windows))
v.ToggleLockTime()
def getActiveWindow(self):
return self.windows[self._active_window-1]
def setActiveWindow(self, i):
assert 0 < i <= len(self.windows)
self._active_window = i
v.SetActiveWindow(i)
def activateSurface(self):
v.ActivateDatabase(self.surface_src)
def activateSubsurface(self):
v.ActivateDatabase(self.subsurface_src)
def activateSource(self, src):
v.ActivateDatabase(src)
class ATSVis(Vis):
def __init__(self, *args, **kwargs):
Vis.__init__(self, *args, **kwargs)
self.time_annot = None
def createSubsurfacePseudocolor(self, varname, limits=None, cmap=None, window=None):
"""Simplified interface to create standard pseudocolors"""
self.activateSubsurface()
if window is not None:
self.setActiveWindow(window)
win = self.getActiveWindow()
return win.createPseudocolor(varname, limits=limits, cmap=cmap,
legend=True)
def createSurfacePseudocolor(self, varname, limits=None, cmap=None, window=None,
displace=True, alpha=False, legend=False):
"""Simplified interface to create standard pseudocolors on the surface."""
self.activateSurface()
if window is not None:
self.setActiveWindow(window)
win = self.getActiveWindow()
pcolor = win.createPseudocolor(varname, limits=limits, cmap=cmap,
legend=legend, alpha=alpha)
if displace:
# deform by surface vector
v.AddOperator("Displace")
da = v.DisplaceAttributes()
da.variable = "ponded_depth_displace"
v.SetOperatorOptions(da)
pcolor.operators.append(Operator("displace", "Displace", da))
return pcolor
def createSnowPseudocolor(self, varname, limits=None, cmap=None, window=None, legend=False):
"""Simplified interface to create standard pseudocolors on the snow surface."""
self.activateSurface()
if window is not None:
self.setActiveWindow(window)
win = self.getActiveWindow()
if cmap is None:
cmap = "hot"
pcolor = win.createPseudocolor(varname, limits=limits, cmap=cmap,
legend=legend)
# deform by surface vector
v.AddOperator("Displace")
da = v.DisplaceAttributes()
da.variable = "snow_displace"
v.SetOperatorOptions(da)
pcolor.operators.append(Operator("displace", "Displace", da))
return pcolor
def createSubsurfaceContour(self, varname, value, window=None, color=None,
linewidth=None):
self.activateSubsurface()
if window is not None:
self.setActiveWindow(window)
win = self.getActiveWindow()
return win.createContour(varname, value, color=color, linewidth=linewidth)
def plotPressure(self, window=None, limits=None):
"""Adds a plot of subsurface pressure"""
return self.createSubsurfacePseudocolor("pressure.cell.0", limits=limits, window=window)
def plotSurfacePressure(self, window=None, limits=None):
"""Adds a plot of surface pressure"""
return self.createSurfacePseudocolor("surface_pressure.cell.0", limits=limits, window=window)
def plotLiquidSaturation(self, window=None, limits=None, cmap="saturation_liquid_r"):
"""Adds a plot of subsurface pressure"""
return self.createSubsurfacePseudocolor("saturation_liquid.cell.0", limits=limits,
cmap=cmap, window=window)
def plotGasSaturation(self, window=None, limits=None):
"""Adds a plot of subsurface pressure"""
return self.createSubsurfacePseudocolor("saturation_gas.cell.0", limits=limits,
window=window)
def plotIceSaturation(self, window=None, limits=None, cmap="saturation_ice_r"):
"""Adds a plot of subsurface pressure"""
return self.createSubsurfacePseudocolor("saturation_ice.cell.0", limits=limits,
cmap=cmap, window=window)
def plotTemperature(self, window=None, limits=None):
"""Adds a plot of subsurface temperature"""
# create the colormap
cmap = None
if limits is None:
limits = vrc.rcParams['var.limits']["temperature"]
if limits is not None:
if limits[0] != None and limits[1] != None:
vc.createTemperaturesColorMap(limits[0], limits[1], 0., "temperature")
cmap = "temperature"
return self.createSubsurfacePseudocolor("temperature.cell.0", limits=limits,
cmap=cmap, window=window)
def plotSurfaceTemperature(self, window=None, limits=None):
"""Adds a plot of surface temperature"""
# create the colormap
cmap = None
if limits is None:
limits = vrc.rcParams['var.limits']["temperature"]
if limits != None:
if limits[0] != None and limits[1] != None:
vc.createTemperaturesColorMap(limits[0], limits[1], 0., "surface_temperature")
cmap = "surface_temperature"
return self.createSurfacePseudocolor("surface_temperature.cell.0", limits=limits,
cmap=cmap, window=window)
def plotSnowTemperature(self, window=None, limits=None):
"""Adds a plot of snow temperature"""
# create the colormap
cmap = None
if limits is None:
limits = vrc.rcParams['var.limits']["temperature"]
if limits != None:
if limits[0] != None and limits[1] != None:
vc.createTemperaturesColorMap(limits[0], limits[1], 0., "snow_temperature")
cmap = "snow_temperature"
return self.createSnowPseudocolor("snow_temperature.cell.0", limits=limits,
cmap=cmap, window=window)
def plotPondedDepth(self, **kwargs):
"""Adds a plot of surface ponded depth"""
if 'domain_name' in kwargs.keys():
varname = kwargs['domain_name']+"-ponded_depth.cell.0"
kwargs.pop("domain_name")
else:
varname = "surface-ponded_depth.cell.0"
return self.createSurfacePseudocolor(varname, **kwargs)
def plotSnowDepth(self, **kwargs):
"""Adds a plot of snow depth"""
return self.createSnowPseudocolor("snow_depth.cell.0", **kwargs)
def _getIndexByTime(self, time):
pass
def plotALD(self, yr_start, yr_end):
"""Adds a plot of contours of ALD from year start to year end, inclusive"""
# find the time slice that starts the full period
# for yr in range(yr_start, yr_end+1):
# # find the time slice that starts the year
pass
def writeTime(self, round=None):
self.setActiveWindow(vrc.rcParams['time.window'])
if self.time_annot is None:
ta = v.CreateAnnotationObject("Text2D")
ta.position = vrc.rcParams['time.location']
ta.height = vrc.rcParams['time.fontheight']
ta.fontFamily = vrc.getDefaultFont()
self.time_annot = ta
if round is None:
round = vrc.rcParams['time.round']
self.time_annot.text = vt.visitTime(round)
def plotSurfaceMesh(self, color='w', opacity=.15):
"""Simplified interface to create standard pseudocolors on the surface."""
self.activateSurface()
win = self.getActiveWindow()
mesh = win.createMesh(color,opacity)
return mesh
def plotSubsurfaceMesh(self, color='w', opacity=.15):
"""Simplified interface to create standard pseudocolors on the surface."""
self.activateSubsurface()
win = self.getActiveWindow()
mesh = win.createMesh(color,opacity)
return mesh
def draw(self):
"""Draw the plots"""
for win in self.windows:
win.draw()
# leave with 1 as the active window, which seems to be
# required to get saving to work
self.setActiveWindow(1)
def update(self):
"""Any changes not made by SetTimeSliderState()"""
if self.time_annot is not None:
self.writeTime()
| |
import logging
import re
import simplejson as json
from fakturo.core import exceptions
from fakturo.core.provider import resource
LOG = logging.getLogger(__name__)
class Base(resource.BaseResource):
parent = None
resource_name = None
resource_exclude = False
resource_key = None
url = None
@classmethod
def _item_url(cls, resource_exclude=False):
"""
Return the url part for a single resource of this class
:return: URL For a Item
:rtype: string
"""
if cls.resource_name:
part_name = re.sub('-', '_', cls.resource_name)[0:-1]
part_id = '/' + '%(' + part_name + '_id)s'
return part_id if resource_exclude else '/' + \
cls.resource_name + part_id
else:
return cls.url
@classmethod
def get_url(cls, item=False):
"""
Get the URL for a collection / resource in this Controller
:param item: Retrieve of URL for a specific Item or Collection
:type item: bool
"""
i = 0
url = []
current = cls
while current:
next = current.parent or None
if current.resource_name:
if not item and i == 0:
part = current.resource_name
else:
exclude = True if not next and current.resource_exclude \
else False
part = current._item_url(resource_exclude=exclude)
else:
part = current.url
if part and not part.startswith('/'):
part = '/' + part
url.append(part)
i += 1
current = next
url.reverse()
return ''.join(url) if url else None
@property
def item_url(self):
"""
Get the URL for a item in this Controller
"""
return self.get_url(item=True)
@property
def collection_url(self):
"""
Return the collection URL for this Controller
"""
return self.get_url()
@classmethod
def get_name(cls):
"""
Get the Name of this Controller
"""
name = cls.resource_key or cls.__name__
return re.sub(r'((?<=[a-z])[A-Z]|(?<!\A)[A-Z](?=[a-z]))', r'_\1',
name.lower())
def _data(self, **kw):
data = {}
for k, v in kw.items():
if v is not None:
data[k] = v
return data
def _format_url(self, f_url, **f_data):
LOG.debug("Formatting URL %s with data %s", f_url, f_data)
try:
return f_url % f_data
except KeyError:
msg = "Data (%s) insufficient to format (%s)" % (f_url, f_data)
raise exceptions.LocalError(msg)
def wrap_request(self, func, url=None, f_url=None, f_data=None,
*args, **kw):
"""
Constructs the URL from the given arguments if it has a url and
f_data. If only url is given then it just uses that.
:param func: A function to be invoked.
Example: self.client.[get,list,update,delete,create]
:type func: callable
:param url: A URL or Format string
:type url: string
:param f_data: Data from which to construct the URL
:type f_data: dict
:param args: Arguments that's forwarded to the func
:type args: list
:param kw: Keywords to forward to func
:type kw: dict / keywords
:return: requests Response object
:rtype: Response
"""
if isinstance(f_data, dict):
if not f_data.get('account_id') and self.client.account_id:
f_data['account_id'] = self.client.account_id
f_data['merchant_id'] = f_data['account_id']
if f_data and 'account_id' in f_data:
f_data['merchant_id'] = f_data.get('account_id')
# NOTE: URL takes precedense over f_url
if not url and f_url:
# NOTE: Can this be changed?
url = self._format_url(f_url, **f_data)
elif not url:
msg = 'No URL or URL Format String was passed'
raise exceptions.LocalError(msg)
response = func(url, *args, **kw)
return response
def _create(self, values, status_code=202, *args, **kw):
"""
Create a new Resource from values
"""
f_url = kw.pop('f_url', self.collection_url)
response = self.wrap_request(
self.client.post, f_url=f_url, data=json.dumps(values),
status_code=status_code, *args, **kw)
return response
def _list(self, *args, **kw):
"""
List objects of this Resource
"""
f_url = kw.pop('f_url', self.collection_url)
response = self.wrap_request(self.client.get, f_url=f_url, *args, **kw)
return response
def _get(self, *args, **kw):
"""
Get a object of this Resource
"""
f_url = kw.pop('f_url', self.item_url)
response = self.wrap_request(self.client.get, f_url=f_url, *args, **kw)
return response
def _update(self, values, *args, **kw):
"""
Update a Resource
"""
f_url = kw.pop('f_url', self.item_url)
response = self.wrap_request(
self.client.put, f_url=f_url, data=json.dumps(values),
*args, **kw)
return response
def _delete(self, status_code=204, *args, **kw):
"""
Delete a Resource
"""
f_url = kw.pop('f_url', self.item_url)
response = self.wrap_request(
self.client.delete, f_url=f_url, status_code=status_code,
*args, **kw)
return response
class Account(Base):
resource_key = 'account'
resource_name = 'merchants'
def create(self, values):
return self._create(values, url=self.collection_url).json
def list(self):
return self._list(url=self.collection_url).json
def get(self, account_id):
return self._get(f_data=self._data(account_id=account_id)).json
def update(self, values, account_id=None):
return self._get(f_data=self._data(account_id=account_id)).json
def delete(self, account_id):
return self._delete(f_data=self._data(account_id=account_id))
class Customer(Base):
parent = Account
resource_name = 'customers'
def create(self, values, account_id=None):
f_data = self._data(account_id=account_id)
return self._create(values, f_data=f_data).json
def list(self, account_id=None):
f_data = self._data(account_id=account_id)
return self._list(f_data=f_data).json
def get(self, customer_id, account_id=None):
f_data = self._data(account_id=account_id, customer_id=customer_id)
return self._get(f_data=f_data).json
def update(self, customer_id, values, account_id=None):
f_data = self._data(account_id=account_id, customer_id=customer_id)
return self._get(f_data=f_data).json
def delete(self, customer_id, account_id=None):
f_data = self._data(account_id=account_id, customer_id=customer_id)
return self._delete(f_data=f_data)
class PaymentMethod(Base):
parent = Customer
resource_key = 'payment_method'
resource_name = 'payment-methods'
def create(self, customer_id, values, account_id=None):
f_data = self._data(account_id=account_id, customer_id=customer_id)
return self._create(values, f_data=f_data).json
def list(self, customer_id, account_id=None):
f_data = self._data(account_id=account_id, customer_id=customer_id)
return self._list(f_data=f_data).json
def get(self, customer_id, pm_id, account_id=None):
f_data = self._data(
account_id=account_id, customer_id=customer_id,
payment_method_id=pm_id)
return self._get(f_data=f_data).json
def update(self, customer_id, pm_id, values, account_id=None):
f_data = self._data(
account_id=account_id, customer_id=customer_id,
payment_method_id=pm_id)
return self._get(f_data=f_data).json
def delete(self, customer_id, pm_id, account_id=None):
f_data = self._data(
account_id=account_id, customer_id=customer_id,
payment_method_id=pm_id)
return self._delete(f_data=f_data)
class Product(Base):
parent = Account
resource_name = 'products'
def create(self, values, account_id=None):
f_data = self._data(account_id=account_id)
return self._create(values, f_data=f_data).json
def list(self, account_id=None):
f_data = self._data(account_id=account_id)
return self._list(f_data=f_data).json
def get(self, product_id, account_id=None):
f_data = self._data(account_id=account_id, product_id=product_id)
return self._get(f_data=f_data).json
def update(self, product_id, values, account_id=None):
f_data = self._data(account_id=account_id, product_id=product_id)
return self._get(f_data=f_data).json
def delete(self, product_id, account_id=None):
f_data = self._data(account_id=account_id, product_id=product_id)
return self._delete(f_data=f_data).json
class Plan(Base):
parent = Account
resource_name = 'plans'
def create(self, values, account_id=None):
f_data = self._data(account_id=account_id)
return self._create(values, f_data=f_data).json
def list(self, account_id=None):
f_data = self._data(account_id=account_id)
return self._list(f_data=f_data).json
def get(self, plan_id, account_id=None):
f_data = self._data(account_id=account_id, plan_id=plan_id)
return self._get(f_data=f_data).json
def update(self, plan_id, values, account_id=None):
f_data = self._data(account_id=account_id, plan_id=plan_id)
return self._update(values, f_data=f_data).json
def delete(self, plan_id, account_id=None):
f_data = self._data(account_id=account_id, plan_id=plan_id)
return self._delete(f_data=f_data).json
class PlanItem(Base):
parent = Plan
resource_name = 'items'
def create(self, plan_id, values, account_id=None):
f_data = self._data(account_id=account_id, plan_id=plan_id)
return self._create(values, f_data=f_data).json
def list(self, plan_id, account_id=None):
f_data = self._data(account_id=account_id, plan_id=plan_id)
return self._list(f_data=f_data).json
def get(self, plan_id, item_id, account_id=None):
f_data = self._data(account_id=account_id, plan_id=plan_id,
item_id=item_id)
return self._get(f_data=f_data).json
def update(self, plan_id, item_id, values, account_id=None):
f_data = self._data(account_id=account_id, plan_id=plan_id,
item_id=item_id)
return self._update(values, f_data=f_data).json
def delete(self, plan_id, item_id, account_id=None):
f_data = self._data(account_id=account_id, plan_id=plan_id,
item_id=item_id)
return self._delete(f_data=f_data).json
class Subscription(Base):
parent = Account
resource_name = 'subscriptions'
def create(self, values, account_id=None):
f_data = self._data(account_id=account_id)
return self._create(values, f_data=f_data).json
def list(self, account_id=None):
f_data = self._data(account_id=account_id)
return self._list(f_data=f_data).json
def get(self, subscription_id, account_id=None):
f_data = self._data(account_id=account_id,
subscription_id=subscription_id)
return self._get(f_data=f_data).json
def update(self, subscription_id, values, account_id=None):
f_data = self._data(account_id=account_id,
subscription_id=subscription_id)
return self._update(values, f_data=f_data).json
def delete(self, subscription_id, account_id=None):
f_data = self._data(account_id=account_id,
subscription_id=subscription_id)
return self._delete(f_data=f_data).json
class Usage(Base):
parent = Account
resource_name = 'usages'
def create(self, values, account_id=None):
f_data = self._data(account_id=account_id)
return self._create(values, f_data=f_data).json
def list(self, account_id=None):
f_data = self._data(account_id=account_id)
return self._list(f_data=f_data).json
def get(self, subscription_id, account_id=None):
f_data = self._data(account_id=account_id,
subscription_id=subscription_id)
return self._get(f_data=f_data).json
def update(self, subscription_id, values, account_id=None):
f_data = self._data(account_id=account_id,
subscription_id=subscription_id)
return self._update(values, f_data=f_data).json
def delete(self, subscription_id, account_id=None):
f_data = self._data(account_id=account_id,
subscription_id=subscription_id)
return self._delete(f_data=f_data).json
__all__ = [Account, Customer, PaymentMethod, Product, Plan, PlanItem,
Subscription, Usage]
| |
#!/usr/bin/env python3
#
# Copyright (c) 2020, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS'
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# This test verifies that PBBR sends BMLR.ntf correctly when multicast addresses are registered.
#
# Topology:
# ---- -(eth)-------
# | |
# PBBR----SBBR
# \ /
# Router1---Commissioner
#
import unittest
import thread_cert
from pktverify.packet_verifier import PacketVerifier
PBBR = 1
SBBR = 2
ROUTER1 = 3
COMMISSIONER = 4
REREG_DELAY = 4 # Seconds
MLR_TIMEOUT = 300 # Seconds
CUSTOM_MLR_TIMEOUT = 1000 # Seconds
MA1 = 'ff04::1'
MA2 = 'ff04::2'
MA3 = 'ff04::3'
MA4 = 'ff04::4'
MA5 = 'ff04::5'
class BBR_5_11_01(thread_cert.TestCase):
USE_MESSAGE_FACTORY = False
TOPOLOGY = {
PBBR: {
'name': 'PBBR',
'allowlist': [SBBR, ROUTER1],
'is_otbr': True,
'version': '1.2',
},
SBBR: {
'name': 'SBBR',
'allowlist': [PBBR, ROUTER1],
'is_otbr': True,
'version': '1.2',
},
ROUTER1: {
'name': 'ROUTER1',
'allowlist': [PBBR, SBBR, COMMISSIONER],
'version': '1.2',
},
COMMISSIONER: {
'name': 'COMMISSIONER',
'allowlist': [ROUTER1],
'version': '1.2',
}
}
def test(self):
self.nodes[PBBR].start()
self.wait_node_state(PBBR, 'leader', 5)
self.nodes[PBBR].set_backbone_router(reg_delay=REREG_DELAY, mlr_timeout=MLR_TIMEOUT)
self.nodes[PBBR].enable_backbone_router()
self.wait_until(lambda: self.nodes[PBBR].is_primary_backbone_router, 5)
self.nodes[SBBR].start()
self.wait_node_state(SBBR, 'router', 5)
self.nodes[SBBR].set_backbone_router(reg_delay=REREG_DELAY, mlr_timeout=MLR_TIMEOUT)
self.nodes[SBBR].enable_backbone_router()
self.simulator.go(5)
self.assertFalse(self.nodes[SBBR].is_primary_backbone_router)
self.nodes[ROUTER1].start()
self.wait_node_state(ROUTER1, 'router', 5)
self.nodes[COMMISSIONER].start()
self.wait_node_state(COMMISSIONER, 'router', 5)
self.wait_route_established(COMMISSIONER, PBBR)
self.nodes[COMMISSIONER].commissioner_start()
self.simulator.go(10)
self.assertEqual('active', self.nodes[COMMISSIONER].commissioner_state())
self.nodes[PBBR].add_ipmaddr(MA1)
self.simulator.go(REREG_DELAY)
self.nodes[ROUTER1].add_ipmaddr(MA2)
self.simulator.go(REREG_DELAY)
# Commissioner registers MA3 with default timeout
self.assertEqual((0, []), self.nodes[COMMISSIONER].register_multicast_listener(MA3, timeout=None))
# Commissioner registers MA4 with a custom timeout
self.assertEqual((0, []), self.nodes[COMMISSIONER].register_multicast_listener(MA4,
timeout=CUSTOM_MLR_TIMEOUT))
# Commissioner unregisters MA5
self.assertEqual((0, []), self.nodes[COMMISSIONER].register_multicast_listener(MA5, timeout=0))
self.collect_ipaddrs()
self.collect_rloc16s()
def verify(self, pv: PacketVerifier):
pkts = pv.pkts
pv.add_common_vars()
pv.summary.show()
pv.verify_attached('ROUTER1')
ROUTER1 = pv.vars['ROUTER1']
COMMISSIONER = pv.vars['COMMISSIONER']
PBBR_ETH = pv.vars['PBBR_ETH']
SBBR_ETH = pv.vars['SBBR_ETH']
# Verify SBBR must not send `/b/bmr` during the test.
pkts.filter_eth_src(SBBR_ETH).filter_coap_request('/b/bmr').must_not_next()
# Verify PBBR sends `/b/bmr` on the Backbone link for MA1 with default timeout.
pkts.filter_eth_src(PBBR_ETH).filter_coap_request('/b/bmr').must_next().must_verify(f"""
thread_meshcop.tlv.ipv6_addr == ['{MA1}']
and thread_bl.tlv.timeout == {MLR_TIMEOUT}
and ipv6.src.is_link_local
""")
# Router registers MA2 with default timeout
pkts.filter_wpan_src64(ROUTER1).filter_coap_request('/n/mr').must_next().must_verify(f"""
thread_meshcop.tlv.ipv6_addr == ['{MA2}']
and thread_bl.tlv.timeout is null
""")
# Verify PBBR sends `/b/bmr` on the Backbone link for MA2 with default timeout.
pkts.filter_eth_src(PBBR_ETH).filter_coap_request('/b/bmr').must_next().must_verify(f"""
thread_meshcop.tlv.ipv6_addr == ['{MA2}']
and thread_bl.tlv.timeout == {MLR_TIMEOUT}
and ipv6.src.is_link_local
""")
# Commissioner registers MA3 with deafult timeout
pkts.filter_wpan_src64(COMMISSIONER).filter_coap_request('/n/mr').must_next().must_verify(f"""
thread_meshcop.tlv.ipv6_addr == ['{MA3}']
and thread_bl.tlv.timeout is null
""")
# Verify PBBR sends `/b/bmr` on the Backbone link for MA3 with default timeout.
pkts.filter_eth_src(PBBR_ETH).filter_coap_request('/b/bmr').must_next().must_verify(f"""
thread_meshcop.tlv.ipv6_addr == ['{MA3}']
and thread_bl.tlv.timeout == {MLR_TIMEOUT}
and ipv6.src.is_link_local
""")
# Commissioner registers MA4 with custom timeout
pkts.filter_wpan_src64(COMMISSIONER).filter_coap_request('/n/mr').must_next().must_verify(f"""
thread_meshcop.tlv.ipv6_addr == ['{MA4}']
and thread_nm.tlv.timeout == {CUSTOM_MLR_TIMEOUT}
""")
# Verify PBBR sends `/b/bmr` on the Backbone link for MA4 with custom timeout.
pkts.filter_eth_src(PBBR_ETH).filter_coap_request('/b/bmr').must_next().must_verify(f"""
thread_meshcop.tlv.ipv6_addr == ['{MA4}']
and thread_bl.tlv.timeout == {CUSTOM_MLR_TIMEOUT}
and ipv6.src.is_link_local
""")
# Commissioner unregisters MA5
pkts.filter_wpan_src64(COMMISSIONER).filter_coap_request('/n/mr').must_next().must_verify(f"""
thread_meshcop.tlv.ipv6_addr == ['{MA5}']
and thread_nm.tlv.timeout == 0
""")
# Verify PBBR sends `/b/bmr` on the Backbone link for MA5 with timeout equal to zero.
pkts.filter_eth_src(PBBR_ETH).filter_coap_request('/b/bmr').must_next().must_verify(f"""
thread_meshcop.tlv.ipv6_addr == ['{MA5}']
and thread_bl.tlv.timeout == 0
and ipv6.src.is_link_local
""")
if __name__ == '__main__':
unittest.main()
| |
"""
Copyright 2017-2018 Fizyr (https://fizyr.com)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
from tensorflow import keras
from ..utils.compute_overlap import compute_overlap
class AnchorParameters:
""" The parameteres that define how anchors are generated.
Args
sizes : List of sizes to use. Each size corresponds to one feature level.
strides : List of strides to use. Each stride correspond to one feature level.
ratios : List of ratios to use per location in a feature map.
scales : List of scales to use per location in a feature map.
"""
def __init__(self, sizes, strides, ratios, scales):
self.sizes = sizes
self.strides = strides
self.ratios = ratios
self.scales = scales
def num_anchors(self):
return len(self.ratios) * len(self.scales)
"""
The default anchor parameters.
"""
AnchorParameters.default = AnchorParameters(
sizes = [32, 64, 128, 256, 512],
strides = [8, 16, 32, 64, 128],
ratios = np.array([0.5, 1, 2], keras.backend.floatx()),
scales = np.array([2 ** 0, 2 ** (1.0 / 3.0), 2 ** (2.0 / 3.0)], keras.backend.floatx()),
)
def anchor_targets_bbox(
anchors,
image_group,
annotations_group,
num_classes,
negative_overlap=0.4,
positive_overlap=0.5
):
""" Generate anchor targets for bbox detection.
Args
anchors: np.array of annotations of shape (N, 4) for (x1, y1, x2, y2).
image_group: List of BGR images.
annotations_group: List of annotation dictionaries with each annotation containing 'labels' and 'bboxes' of an image.
num_classes: Number of classes to predict.
mask_shape: If the image is padded with zeros, mask_shape can be used to mark the relevant part of the image.
negative_overlap: IoU overlap for negative anchors (all anchors with overlap < negative_overlap are negative).
positive_overlap: IoU overlap or positive anchors (all anchors with overlap > positive_overlap are positive).
Returns
labels_batch: batch that contains labels & anchor states (np.array of shape (batch_size, N, num_classes + 1),
where N is the number of anchors for an image and the last column defines the anchor state (-1 for ignore, 0 for bg, 1 for fg).
regression_batch: batch that contains bounding-box regression targets for an image & anchor states (np.array of shape (batch_size, N, 4 + 1),
where N is the number of anchors for an image, the first 4 columns define regression targets for (x1, y1, x2, y2) and the
last column defines anchor states (-1 for ignore, 0 for bg, 1 for fg).
"""
assert(len(image_group) == len(annotations_group)), "The length of the images and annotations need to be equal."
assert(len(annotations_group) > 0), "No data received to compute anchor targets for."
for annotations in annotations_group:
assert('bboxes' in annotations), "Annotations should contain bboxes."
assert('labels' in annotations), "Annotations should contain labels."
batch_size = len(image_group)
regression_batch = np.zeros((batch_size, anchors.shape[0], 4 + 1), dtype=keras.backend.floatx())
labels_batch = np.zeros((batch_size, anchors.shape[0], num_classes + 1), dtype=keras.backend.floatx())
# compute labels and regression targets
for index, (image, annotations) in enumerate(zip(image_group, annotations_group)):
if annotations['bboxes'].shape[0]:
# obtain indices of gt annotations with the greatest overlap
positive_indices, ignore_indices, argmax_overlaps_inds = compute_gt_annotations(anchors, annotations['bboxes'], negative_overlap, positive_overlap)
labels_batch[index, ignore_indices, -1] = -1
labels_batch[index, positive_indices, -1] = 1
regression_batch[index, ignore_indices, -1] = -1
regression_batch[index, positive_indices, -1] = 1
# compute target class labels
labels_batch[index, positive_indices, annotations['labels'][argmax_overlaps_inds[positive_indices]].astype(int)] = 1
regression_batch[index, :, :-1] = bbox_transform(anchors, annotations['bboxes'][argmax_overlaps_inds, :])
# ignore annotations outside of image
if image.shape:
anchors_centers = np.vstack([(anchors[:, 0] + anchors[:, 2]) / 2, (anchors[:, 1] + anchors[:, 3]) / 2]).T
indices = np.logical_or(anchors_centers[:, 0] >= image.shape[1], anchors_centers[:, 1] >= image.shape[0])
labels_batch[index, indices, -1] = -1
regression_batch[index, indices, -1] = -1
return regression_batch, labels_batch
def compute_gt_annotations(
anchors,
annotations,
negative_overlap=0.4,
positive_overlap=0.5
):
""" Obtain indices of gt annotations with the greatest overlap.
Args
anchors: np.array of annotations of shape (N, 4) for (x1, y1, x2, y2).
annotations: np.array of shape (N, 5) for (x1, y1, x2, y2, label).
negative_overlap: IoU overlap for negative anchors (all anchors with overlap < negative_overlap are negative).
positive_overlap: IoU overlap or positive anchors (all anchors with overlap > positive_overlap are positive).
Returns
positive_indices: indices of positive anchors
ignore_indices: indices of ignored anchors
argmax_overlaps_inds: ordered overlaps indices
"""
overlaps = compute_overlap(anchors.astype(np.float64), annotations.astype(np.float64))
argmax_overlaps_inds = np.argmax(overlaps, axis=1)
max_overlaps = overlaps[np.arange(overlaps.shape[0]), argmax_overlaps_inds]
# assign "dont care" labels
positive_indices = max_overlaps >= positive_overlap
ignore_indices = (max_overlaps > negative_overlap) & ~positive_indices
return positive_indices, ignore_indices, argmax_overlaps_inds
def layer_shapes(image_shape, model):
"""Compute layer shapes given input image shape and the model.
Args
image_shape: The shape of the image.
model: The model to use for computing how the image shape is transformed in the pyramid.
Returns
A dictionary mapping layer names to image shapes.
"""
shape = {
model.layers[0].name: (None,) + image_shape,
}
for layer in model.layers[1:]:
nodes = layer._inbound_nodes
for node in nodes:
if isinstance(node.inbound_layers, keras.layers.Layer):
inputs = [shape[node.inbound_layers.name]]
else:
inputs = [shape[lr.name] for lr in node.inbound_layers]
if not inputs:
continue
shape[layer.name] = layer.compute_output_shape(inputs[0] if len(inputs) == 1 else inputs)
return shape
def make_shapes_callback(model):
""" Make a function for getting the shape of the pyramid levels.
"""
def get_shapes(image_shape, pyramid_levels):
shape = layer_shapes(image_shape, model)
image_shapes = [shape["P{}".format(level)][1:3] for level in pyramid_levels]
return image_shapes
return get_shapes
def guess_shapes(image_shape, pyramid_levels):
"""Guess shapes based on pyramid levels.
Args
image_shape: The shape of the image.
pyramid_levels: A list of what pyramid levels are used.
Returns
A list of image shapes at each pyramid level.
"""
image_shape = np.array(image_shape[:2])
image_shapes = [(image_shape + 2 ** x - 1) // (2 ** x) for x in pyramid_levels]
return image_shapes
def anchors_for_shape(
image_shape,
pyramid_levels=None,
anchor_params=None,
shapes_callback=None,
):
""" Generators anchors for a given shape.
Args
image_shape: The shape of the image.
pyramid_levels: List of ints representing which pyramids to use (defaults to [3, 4, 5, 6, 7]).
anchor_params: Struct containing anchor parameters. If None, default values are used.
shapes_callback: Function to call for getting the shape of the image at different pyramid levels.
Returns
np.array of shape (N, 4) containing the (x1, y1, x2, y2) coordinates for the anchors.
"""
if pyramid_levels is None:
pyramid_levels = [3, 4, 5, 6, 7]
if anchor_params is None:
anchor_params = AnchorParameters.default
if shapes_callback is None:
shapes_callback = guess_shapes
image_shapes = shapes_callback(image_shape, pyramid_levels)
# compute anchors over all pyramid levels
all_anchors = np.zeros((0, 4))
for idx, p in enumerate(pyramid_levels):
anchors = generate_anchors(
base_size=anchor_params.sizes[idx],
ratios=anchor_params.ratios,
scales=anchor_params.scales
)
shifted_anchors = shift(image_shapes[idx], anchor_params.strides[idx], anchors)
all_anchors = np.append(all_anchors, shifted_anchors, axis=0)
return all_anchors
def shift(shape, stride, anchors):
""" Produce shifted anchors based on shape of the map and stride size.
Args
shape : Shape to shift the anchors over.
stride : Stride to shift the anchors with over the shape.
anchors: The anchors to apply at each location.
"""
# create a grid starting from half stride from the top left corner
shift_x = (np.arange(0, shape[1]) + 0.5) * stride
shift_y = (np.arange(0, shape[0]) + 0.5) * stride
shift_x, shift_y = np.meshgrid(shift_x, shift_y)
shifts = np.vstack((
shift_x.ravel(), shift_y.ravel(),
shift_x.ravel(), shift_y.ravel()
)).transpose()
# add A anchors (1, A, 4) to
# cell K shifts (K, 1, 4) to get
# shift anchors (K, A, 4)
# reshape to (K*A, 4) shifted anchors
A = anchors.shape[0]
K = shifts.shape[0]
all_anchors = (anchors.reshape((1, A, 4)) + shifts.reshape((1, K, 4)).transpose((1, 0, 2)))
all_anchors = all_anchors.reshape((K * A, 4))
return all_anchors
def generate_anchors(base_size=16, ratios=None, scales=None):
"""
Generate anchor (reference) windows by enumerating aspect ratios X
scales w.r.t. a reference window.
"""
if ratios is None:
ratios = AnchorParameters.default.ratios
if scales is None:
scales = AnchorParameters.default.scales
num_anchors = len(ratios) * len(scales)
# initialize output anchors
anchors = np.zeros((num_anchors, 4))
# scale base_size
anchors[:, 2:] = base_size * np.tile(scales, (2, len(ratios))).T
# compute areas of anchors
areas = anchors[:, 2] * anchors[:, 3]
# correct for ratios
anchors[:, 2] = np.sqrt(areas / np.repeat(ratios, len(scales)))
anchors[:, 3] = anchors[:, 2] * np.repeat(ratios, len(scales))
# transform from (x_ctr, y_ctr, w, h) -> (x1, y1, x2, y2)
anchors[:, 0::2] -= np.tile(anchors[:, 2] * 0.5, (2, 1)).T
anchors[:, 1::2] -= np.tile(anchors[:, 3] * 0.5, (2, 1)).T
return anchors
def bbox_transform(anchors, gt_boxes, mean=None, std=None):
"""Compute bounding-box regression targets for an image."""
# The Mean and std are calculated from COCO dataset.
# Bounding box normalization was firstly introduced in the Fast R-CNN paper.
# See https://github.com/fizyr/keras-retinanet/issues/1273#issuecomment-585828825 for more details
if mean is None:
mean = np.array([0, 0, 0, 0])
if std is None:
std = np.array([0.2, 0.2, 0.2, 0.2])
if isinstance(mean, (list, tuple)):
mean = np.array(mean)
elif not isinstance(mean, np.ndarray):
raise ValueError('Expected mean to be a np.ndarray, list or tuple. Received: {}'.format(type(mean)))
if isinstance(std, (list, tuple)):
std = np.array(std)
elif not isinstance(std, np.ndarray):
raise ValueError('Expected std to be a np.ndarray, list or tuple. Received: {}'.format(type(std)))
anchor_widths = anchors[:, 2] - anchors[:, 0]
anchor_heights = anchors[:, 3] - anchors[:, 1]
# According to the information provided by a keras-retinanet author, they got marginally better results using
# the following way of bounding box parametrization.
# See https://github.com/fizyr/keras-retinanet/issues/1273#issuecomment-585828825 for more details
targets_dx1 = (gt_boxes[:, 0] - anchors[:, 0]) / anchor_widths
targets_dy1 = (gt_boxes[:, 1] - anchors[:, 1]) / anchor_heights
targets_dx2 = (gt_boxes[:, 2] - anchors[:, 2]) / anchor_widths
targets_dy2 = (gt_boxes[:, 3] - anchors[:, 3]) / anchor_heights
targets = np.stack((targets_dx1, targets_dy1, targets_dx2, targets_dy2))
targets = targets.T
targets = (targets - mean) / std
return targets
| |
#! /usr/bin/env python
"""DBF accessing helpers.
FIXME: more documentation needed
Examples:
Create new table, setup structure, add records:
dbf = Dbf(filename, new=True)
dbf.addField(
("NAME", "C", 15),
("SURNAME", "C", 25),
("INITIALS", "C", 10),
("BIRTHDATE", "D"),
)
for (n, s, i, b) in (
("John", "Miller", "YC", (1980, 10, 11)),
("Andy", "Larkin", "", (1980, 4, 11)),
):
rec = dbf.newRecord()
rec["NAME"] = n
rec["SURNAME"] = s
rec["INITIALS"] = i
rec["BIRTHDATE"] = b
rec.store()
dbf.close()
Open existed dbf, read some data:
dbf = Dbf(filename, True)
for rec in dbf:
for fldName in dbf.fieldNames:
print '%s:\t %s (%s)' % (fldName, rec[fldName],
type(rec[fldName]))
print
dbf.close()
"""
"""History (most recent first):
11-feb-2007 [als] export INVALID_VALUE;
Dbf: added .ignoreErrors, .INVALID_VALUE
04-jul-2006 [als] added export declaration
20-dec-2005 [yc] removed fromStream and newDbf methods:
use argument of __init__ call must be used instead;
added class fields pointing to the header and
record classes.
17-dec-2005 [yc] split to several modules; reimplemented
13-dec-2005 [yc] adapted to the changes of the `strutil` module.
13-sep-2002 [als] support FoxPro Timestamp datatype
15-nov-1999 [jjk] documentation updates, add demo
24-aug-1998 [jjk] add some encodeValue methods (not tested), other tweaks
08-jun-1998 [jjk] fix problems, add more features
20-feb-1998 [jjk] fix problems, add more features
19-feb-1998 [jjk] add create/write capabilities
18-feb-1998 [jjk] from dbfload.py
"""
__version__ = "$Revision: 1.7 $"[11:-2]
__date__ = "$Date: 2007/02/11 09:23:13 $"[7:-2]
__author__ = "Jeff Kunce <kuncej@mail.conservation.state.mo.us>"
__all__ = ["Dbf"]
from . import header
from . import record
from .utils import INVALID_VALUE
class Dbf(object):
"""DBF accessor.
FIXME:
docs and examples needed (dont' forget to tell
about problems adding new fields on the fly)
Implementation notes:
``_new`` field is used to indicate whether this is
a new data table. `addField` could be used only for
the new tables! If at least one record was appended
to the table it's structure couldn't be changed.
"""
__slots__ = ("name", "header", "stream",
"_changed", "_new", "_ignore_errors")
HeaderClass = header.DbfHeader
RecordClass = record.DbfRecord
INVALID_VALUE = INVALID_VALUE
## initialization and creation helpers
def __init__(self, f, readOnly=False, new=False, ignoreErrors=False):
"""Initialize instance.
Arguments:
f:
Filename or file-like object.
new:
True if new data table must be created. Assume
data table exists if this argument is False.
readOnly:
if ``f`` argument is a string file will
be opend in read-only mode; in other cases
this argument is ignored. This argument is ignored
even if ``new`` argument is True.
headerObj:
`header.DbfHeader` instance or None. If this argument
is None, new empty header will be used with the
all fields set by default.
ignoreErrors:
if set, failing field value conversion will return
``INVALID_VALUE`` instead of raising conversion error.
"""
if isinstance(f, str):
# a filename
self.name = f
if new:
# new table (table file must be
# created or opened and truncated)
self.stream = open(f, "w+b")
else:
# tabe file must exist
self.stream = open(f, ("r+b", "rb")[bool(readOnly)])
else:
# a stream
self.name = getattr(f, "name", "")
self.stream = f
if new:
# if this is a new table, header will be empty
self.header = self.HeaderClass()
else:
# or instantiated using stream
self.header = self.HeaderClass.fromStream(self.stream)
self.ignoreErrors = ignoreErrors
self._new = bool(new)
self._changed = False
## properties
closed = property(lambda self: self.stream.closed)
recordCount = property(lambda self: self.header.recordCount)
fieldNames = property(
lambda self: [_fld.name for _fld in self.header.fields])
fieldDefs = property(lambda self: self.header.fields)
changed = property(lambda self: self._changed or self.header.changed)
def ignoreErrors(self, value):
"""Update `ignoreErrors` flag on the header object and self"""
self.header.ignoreErrors = self._ignore_errors = bool(value)
ignoreErrors = property(
lambda self: self._ignore_errors,
ignoreErrors,
doc="""Error processing mode for DBF field value conversion
if set, failing field value conversion will return
``INVALID_VALUE`` instead of raising conversion error.
""")
## protected methods
def _fixIndex(self, index):
"""Return fixed index.
This method fails if index isn't a numeric object
(long or int). Or index isn't in a valid range
(less or equal to the number of records in the db).
If ``index`` is a negative number, it will be
treated as a negative indexes for list objects.
Return:
Return value is numeric object maning valid index.
"""
if not isinstance(index, int):
raise TypeError("Index must be a numeric object")
if index < 0:
# index from the right side
# fix it to the left-side index
index += len(self) + 1
if index >= len(self):
raise IndexError("Record index out of range")
return index
## iterface methods
def close(self):
self.flush()
self.stream.close()
def flush(self):
"""Flush data to the associated stream."""
if self.changed:
self.header.setCurrentDate()
self.header.write(self.stream)
self.stream.flush()
self._changed = False
def indexOfFieldName(self, name):
"""Index of field named ``name``."""
# FIXME: move this to header class
names = [f.name for f in self.header.fields]
return names.index(name.upper())
def newRecord(self):
"""Return new record, which belong to this table."""
return self.RecordClass(self)
def append(self, record):
"""Append ``record`` to the database."""
record.index = self.header.recordCount
record._write()
self.header.recordCount += 1
self._changed = True
self._new = False
def addField(self, *defs):
"""Add field definitions.
For more information see `header.DbfHeader.addField`.
"""
if self._new:
self.header.addField(*defs)
else:
raise TypeError("At least one record was added, "
"structure can't be changed")
## 'magic' methods (representation and sequence interface)
def __repr__(self):
return "Dbf stream '%s'\n" % self.stream + repr(self.header)
def __len__(self):
"""Return number of records."""
return self.recordCount
def __getitem__(self, index):
"""Return `DbfRecord` instance."""
return self.RecordClass.fromStream(self, self._fixIndex(index))
def __setitem__(self, index, record):
"""Write `DbfRecord` instance to the stream."""
record.index = self._fixIndex(index)
record._write()
self._changed = True
self._new = False
#def __del__(self):
# """Flush stream upon deletion of the object."""
# self.flush()
def demoRead(filename):
_dbf = Dbf(filename, True)
for _rec in _dbf:
print()
print(repr(_rec))
_dbf.close()
def demoCreate(filename):
_dbf = Dbf(filename, new=True)
_dbf.addField(
("NAME", "C", 15),
("SURNAME", "C", 25),
("INITIALS", "C", 10),
("BIRTHDATE", "D"),
)
for (_n, _s, _i, _b) in (
("John", "Miller", "YC", (1981, 1, 2)),
("Andy", "Larkin", "AL", (1982, 3, 4)),
("Bill", "Clinth", "", (1983, 5, 6)),
("Bobb", "McNail", "", (1984, 7, 8)),
):
_rec = _dbf.newRecord()
_rec["NAME"] = _n
_rec["SURNAME"] = _s
_rec["INITIALS"] = _i
_rec["BIRTHDATE"] = _b
_rec.store()
print(repr(_dbf))
_dbf.close()
if (__name__=='__main__'):
import sys
_name = len(sys.argv) > 1 and sys.argv[1] or "county.dbf"
demoCreate(_name)
demoRead(_name)
# vim: set et sw=4 sts=4 :
| |
from __future__ import division, absolute_import, print_function
import sys
import time
from datetime import date
import numpy as np
from numpy.testing import (
assert_, assert_equal, assert_allclose, assert_raises,
)
from numpy.lib._iotools import (
LineSplitter, NameValidator, StringConverter,
has_nested_fields, easy_dtype, flatten_dtype
)
from numpy.compat import unicode
class TestLineSplitter(object):
"Tests the LineSplitter class."
def test_no_delimiter(self):
"Test LineSplitter w/o delimiter"
strg = " 1 2 3 4 5 # test"
test = LineSplitter()(strg)
assert_equal(test, ['1', '2', '3', '4', '5'])
test = LineSplitter('')(strg)
assert_equal(test, ['1', '2', '3', '4', '5'])
def test_space_delimiter(self):
"Test space delimiter"
strg = " 1 2 3 4 5 # test"
test = LineSplitter(' ')(strg)
assert_equal(test, ['1', '2', '3', '4', '', '5'])
test = LineSplitter(' ')(strg)
assert_equal(test, ['1 2 3 4', '5'])
def test_tab_delimiter(self):
"Test tab delimiter"
strg = " 1\t 2\t 3\t 4\t 5 6"
test = LineSplitter('\t')(strg)
assert_equal(test, ['1', '2', '3', '4', '5 6'])
strg = " 1 2\t 3 4\t 5 6"
test = LineSplitter('\t')(strg)
assert_equal(test, ['1 2', '3 4', '5 6'])
def test_other_delimiter(self):
"Test LineSplitter on delimiter"
strg = "1,2,3,4,,5"
test = LineSplitter(',')(strg)
assert_equal(test, ['1', '2', '3', '4', '', '5'])
#
strg = " 1,2,3,4,,5 # test"
test = LineSplitter(',')(strg)
assert_equal(test, ['1', '2', '3', '4', '', '5'])
def test_constant_fixed_width(self):
"Test LineSplitter w/ fixed-width fields"
strg = " 1 2 3 4 5 # test"
test = LineSplitter(3)(strg)
assert_equal(test, ['1', '2', '3', '4', '', '5', ''])
#
strg = " 1 3 4 5 6# test"
test = LineSplitter(20)(strg)
assert_equal(test, ['1 3 4 5 6'])
#
strg = " 1 3 4 5 6# test"
test = LineSplitter(30)(strg)
assert_equal(test, ['1 3 4 5 6'])
def test_variable_fixed_width(self):
strg = " 1 3 4 5 6# test"
test = LineSplitter((3, 6, 6, 3))(strg)
assert_equal(test, ['1', '3', '4 5', '6'])
#
strg = " 1 3 4 5 6# test"
test = LineSplitter((6, 6, 9))(strg)
assert_equal(test, ['1', '3 4', '5 6'])
# -----------------------------------------------------------------------------
class TestNameValidator(object):
def test_case_sensitivity(self):
"Test case sensitivity"
names = ['A', 'a', 'b', 'c']
test = NameValidator().validate(names)
assert_equal(test, ['A', 'a', 'b', 'c'])
test = NameValidator(case_sensitive=False).validate(names)
assert_equal(test, ['A', 'A_1', 'B', 'C'])
test = NameValidator(case_sensitive='upper').validate(names)
assert_equal(test, ['A', 'A_1', 'B', 'C'])
test = NameValidator(case_sensitive='lower').validate(names)
assert_equal(test, ['a', 'a_1', 'b', 'c'])
# check exceptions
assert_raises(ValueError, NameValidator, case_sensitive='foobar')
def test_excludelist(self):
"Test excludelist"
names = ['dates', 'data', 'Other Data', 'mask']
validator = NameValidator(excludelist=['dates', 'data', 'mask'])
test = validator.validate(names)
assert_equal(test, ['dates_', 'data_', 'Other_Data', 'mask_'])
def test_missing_names(self):
"Test validate missing names"
namelist = ('a', 'b', 'c')
validator = NameValidator()
assert_equal(validator(namelist), ['a', 'b', 'c'])
namelist = ('', 'b', 'c')
assert_equal(validator(namelist), ['f0', 'b', 'c'])
namelist = ('a', 'b', '')
assert_equal(validator(namelist), ['a', 'b', 'f0'])
namelist = ('', 'f0', '')
assert_equal(validator(namelist), ['f1', 'f0', 'f2'])
def test_validate_nb_names(self):
"Test validate nb names"
namelist = ('a', 'b', 'c')
validator = NameValidator()
assert_equal(validator(namelist, nbfields=1), ('a',))
assert_equal(validator(namelist, nbfields=5, defaultfmt="g%i"),
['a', 'b', 'c', 'g0', 'g1'])
def test_validate_wo_names(self):
"Test validate no names"
namelist = None
validator = NameValidator()
assert_(validator(namelist) is None)
assert_equal(validator(namelist, nbfields=3), ['f0', 'f1', 'f2'])
# -----------------------------------------------------------------------------
def _bytes_to_date(s):
return date(*time.strptime(s, "%Y-%m-%d")[:3])
class TestStringConverter(object):
"Test StringConverter"
def test_creation(self):
"Test creation of a StringConverter"
converter = StringConverter(int, -99999)
assert_equal(converter._status, 1)
assert_equal(converter.default, -99999)
def test_upgrade(self):
"Tests the upgrade method."
converter = StringConverter()
assert_equal(converter._status, 0)
# test int
assert_equal(converter.upgrade('0'), 0)
assert_equal(converter._status, 1)
# On systems where long defaults to 32-bit, the statuses will be
# offset by one, so we check for this here.
import numpy.core.numeric as nx
status_offset = int(nx.dtype(nx.int_).itemsize < nx.dtype(nx.int64).itemsize)
# test int > 2**32
assert_equal(converter.upgrade('17179869184'), 17179869184)
assert_equal(converter._status, 1 + status_offset)
# test float
assert_allclose(converter.upgrade('0.'), 0.0)
assert_equal(converter._status, 2 + status_offset)
# test complex
assert_equal(converter.upgrade('0j'), complex('0j'))
assert_equal(converter._status, 3 + status_offset)
# test str
# note that the longdouble type has been skipped, so the
# _status increases by 2. Everything should succeed with
# unicode conversion (5).
for s in ['a', u'a', b'a']:
res = converter.upgrade(s)
assert_(type(res) is unicode)
assert_equal(res, u'a')
assert_equal(converter._status, 5 + status_offset)
def test_missing(self):
"Tests the use of missing values."
converter = StringConverter(missing_values=('missing',
'missed'))
converter.upgrade('0')
assert_equal(converter('0'), 0)
assert_equal(converter(''), converter.default)
assert_equal(converter('missing'), converter.default)
assert_equal(converter('missed'), converter.default)
try:
converter('miss')
except ValueError:
pass
def test_upgrademapper(self):
"Tests updatemapper"
dateparser = _bytes_to_date
StringConverter.upgrade_mapper(dateparser, date(2000, 1, 1))
convert = StringConverter(dateparser, date(2000, 1, 1))
test = convert('2001-01-01')
assert_equal(test, date(2001, 1, 1))
test = convert('2009-01-01')
assert_equal(test, date(2009, 1, 1))
test = convert('')
assert_equal(test, date(2000, 1, 1))
def test_string_to_object(self):
"Make sure that string-to-object functions are properly recognized"
old_mapper = StringConverter._mapper[:] # copy of list
conv = StringConverter(_bytes_to_date)
assert_equal(conv._mapper, old_mapper)
assert_(hasattr(conv, 'default'))
def test_keep_default(self):
"Make sure we don't lose an explicit default"
converter = StringConverter(None, missing_values='',
default=-999)
converter.upgrade('3.14159265')
assert_equal(converter.default, -999)
assert_equal(converter.type, np.dtype(float))
#
converter = StringConverter(
None, missing_values='', default=0)
converter.upgrade('3.14159265')
assert_equal(converter.default, 0)
assert_equal(converter.type, np.dtype(float))
def test_keep_default_zero(self):
"Check that we don't lose a default of 0"
converter = StringConverter(int, default=0,
missing_values="N/A")
assert_equal(converter.default, 0)
def test_keep_missing_values(self):
"Check that we're not losing missing values"
converter = StringConverter(int, default=0,
missing_values="N/A")
assert_equal(
converter.missing_values, set(['', 'N/A']))
def test_int64_dtype(self):
"Check that int64 integer types can be specified"
converter = StringConverter(np.int64, default=0)
val = "-9223372036854775807"
assert_(converter(val) == -9223372036854775807)
val = "9223372036854775807"
assert_(converter(val) == 9223372036854775807)
def test_uint64_dtype(self):
"Check that uint64 integer types can be specified"
converter = StringConverter(np.uint64, default=0)
val = "9223372043271415339"
assert_(converter(val) == 9223372043271415339)
class TestMiscFunctions(object):
def test_has_nested_dtype(self):
"Test has_nested_dtype"
ndtype = np.dtype(float)
assert_equal(has_nested_fields(ndtype), False)
ndtype = np.dtype([('A', '|S3'), ('B', float)])
assert_equal(has_nested_fields(ndtype), False)
ndtype = np.dtype([('A', int), ('B', [('BA', float), ('BB', '|S1')])])
assert_equal(has_nested_fields(ndtype), True)
def test_easy_dtype(self):
"Test ndtype on dtypes"
# Simple case
ndtype = float
assert_equal(easy_dtype(ndtype), np.dtype(float))
# As string w/o names
ndtype = "i4, f8"
assert_equal(easy_dtype(ndtype),
np.dtype([('f0', "i4"), ('f1', "f8")]))
# As string w/o names but different default format
assert_equal(easy_dtype(ndtype, defaultfmt="field_%03i"),
np.dtype([('field_000', "i4"), ('field_001', "f8")]))
# As string w/ names
ndtype = "i4, f8"
assert_equal(easy_dtype(ndtype, names="a, b"),
np.dtype([('a', "i4"), ('b', "f8")]))
# As string w/ names (too many)
ndtype = "i4, f8"
assert_equal(easy_dtype(ndtype, names="a, b, c"),
np.dtype([('a', "i4"), ('b', "f8")]))
# As string w/ names (not enough)
ndtype = "i4, f8"
assert_equal(easy_dtype(ndtype, names=", b"),
np.dtype([('f0', "i4"), ('b', "f8")]))
# ... (with different default format)
assert_equal(easy_dtype(ndtype, names="a", defaultfmt="f%02i"),
np.dtype([('a', "i4"), ('f00', "f8")]))
# As list of tuples w/o names
ndtype = [('A', int), ('B', float)]
assert_equal(easy_dtype(ndtype), np.dtype([('A', int), ('B', float)]))
# As list of tuples w/ names
assert_equal(easy_dtype(ndtype, names="a,b"),
np.dtype([('a', int), ('b', float)]))
# As list of tuples w/ not enough names
assert_equal(easy_dtype(ndtype, names="a"),
np.dtype([('a', int), ('f0', float)]))
# As list of tuples w/ too many names
assert_equal(easy_dtype(ndtype, names="a,b,c"),
np.dtype([('a', int), ('b', float)]))
# As list of types w/o names
ndtype = (int, float, float)
assert_equal(easy_dtype(ndtype),
np.dtype([('f0', int), ('f1', float), ('f2', float)]))
# As list of types w names
ndtype = (int, float, float)
assert_equal(easy_dtype(ndtype, names="a, b, c"),
np.dtype([('a', int), ('b', float), ('c', float)]))
# As simple dtype w/ names
ndtype = np.dtype(float)
assert_equal(easy_dtype(ndtype, names="a, b, c"),
np.dtype([(_, float) for _ in ('a', 'b', 'c')]))
# As simple dtype w/o names (but multiple fields)
ndtype = np.dtype(float)
assert_equal(
easy_dtype(ndtype, names=['', '', ''], defaultfmt="f%02i"),
np.dtype([(_, float) for _ in ('f00', 'f01', 'f02')]))
def test_flatten_dtype(self):
"Testing flatten_dtype"
# Standard dtype
dt = np.dtype([("a", "f8"), ("b", "f8")])
dt_flat = flatten_dtype(dt)
assert_equal(dt_flat, [float, float])
# Recursive dtype
dt = np.dtype([("a", [("aa", '|S1'), ("ab", '|S2')]), ("b", int)])
dt_flat = flatten_dtype(dt)
assert_equal(dt_flat, [np.dtype('|S1'), np.dtype('|S2'), int])
# dtype with shaped fields
dt = np.dtype([("a", (float, 2)), ("b", (int, 3))])
dt_flat = flatten_dtype(dt)
assert_equal(dt_flat, [float, int])
dt_flat = flatten_dtype(dt, True)
assert_equal(dt_flat, [float] * 2 + [int] * 3)
# dtype w/ titles
dt = np.dtype([(("a", "A"), "f8"), (("b", "B"), "f8")])
dt_flat = flatten_dtype(dt)
assert_equal(dt_flat, [float, float])
| |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
import unittest
import os
import pickle
import numpy as np
import warnings
from pymatgen import SETTINGS
import scipy.constants as const
from pymatgen.util.testing import PymatgenTest
from pymatgen.io.vasp.inputs import Incar, Poscar, Kpoints, Potcar, \
PotcarSingle, VaspInput
from pymatgen import Composition, Structure
from pymatgen.electronic_structure.core import Magmom
from monty.io import zopen
"""
Created on Jul 16, 2012
"""
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyue@mit.edu"
__date__ = "Jul 16, 2012"
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..", "..",
'test_files')
class PoscarTest(PymatgenTest):
def test_init(self):
filepath = os.path.join(test_dir, 'POSCAR')
poscar = Poscar.from_file(filepath)
comp = poscar.structure.composition
self.assertEqual(comp, Composition("Fe4P4O16"))
# Vasp 4 type with symbols at the end.
poscar_string = """Test1
1.0
3.840198 0.000000 0.000000
1.920099 3.325710 0.000000
0.000000 -2.217138 3.135509
1 1
direct
0.000000 0.000000 0.000000 Si
0.750000 0.500000 0.750000 F
"""
poscar = Poscar.from_string(poscar_string)
self.assertEqual(poscar.structure.composition, Composition("SiF"))
poscar_string = ""
self.assertRaises(ValueError, Poscar.from_string, poscar_string)
# Vasp 4 tyle file with default names, i.e. no element symbol found.
poscar_string = """Test2
1.0
3.840198 0.000000 0.000000
1.920099 3.325710 0.000000
0.000000 -2.217138 3.135509
1 1
direct
0.000000 0.000000 0.000000
0.750000 0.500000 0.750000
"""
with warnings.catch_warnings():
warnings.simplefilter("ignore")
poscar = Poscar.from_string(poscar_string)
self.assertEqual(poscar.structure.composition, Composition("HHe"))
# Vasp 4 tyle file with default names, i.e. no element symbol found.
poscar_string = """Test3
1.0
3.840198 0.000000 0.000000
1.920099 3.325710 0.000000
0.000000 -2.217138 3.135509
1 1
Selective dynamics
direct
0.000000 0.000000 0.000000 T T T Si
0.750000 0.500000 0.750000 F F F O
"""
poscar = Poscar.from_string(poscar_string)
self.assertEqual(poscar.selective_dynamics, [[True, True, True],
[False, False, False]])
self.selective_poscar = poscar
def test_from_file(self):
filepath = os.path.join(test_dir, 'POSCAR.symbols_natoms_multilines')
poscar = Poscar.from_file(filepath, check_for_POTCAR=False,
read_velocities=False)
ordered_expected_elements = ['Fe', 'Cr', 'Fe', 'Fe', 'Cr', 'Cr', 'Cr',
'Cr',
'Fe', 'Fe', 'Cr', 'Fe', 'Cr', 'Fe', 'Fe',
'Cr',
'Fe', 'Cr', 'Fe', 'Fe', 'Fe', 'Fe', 'Cr',
'Fe',
'Ni', 'Fe', 'Fe', 'Fe', 'Fe', 'Fe', 'Cr',
'Cr',
'Cr', 'Fe', 'Fe', 'Fe', 'Fe', 'Fe', 'Fe',
'Cr',
'Fe', 'Fe', 'Ni', 'Fe', 'Fe', 'Fe', 'Cr',
'Cr',
'Fe', 'Fe', 'Fe', 'Fe', 'Fe']
self.assertEqual([site.specie.symbol for site in poscar.structure],
ordered_expected_elements)
def test_to_from_dict(self):
poscar_string = """Test3
1.0
3.840198 0.000000 0.000000
1.920099 3.325710 0.000000
0.000000 -2.217138 3.135509
1 1
Selective dynamics
direct
0.000000 0.000000 0.000000 T T T Si
0.750000 0.500000 0.750000 F F F O
"""
poscar = Poscar.from_string(poscar_string)
d = poscar.as_dict()
poscar2 = Poscar.from_dict(d)
self.assertEqual(poscar2.comment, "Test3")
self.assertTrue(all(poscar2.selective_dynamics[0]))
self.assertFalse(all(poscar2.selective_dynamics[1]))
def test_cart_scale(self):
poscar_string = """Test1
1.1
3.840198 0.000000 0.000000
1.920099 3.325710 0.000000
0.000000 -2.217138 3.135509
Si F
1 1
cart
0.000000 0.00000000 0.00000000
3.840198 1.50000000 2.35163175
"""
p = Poscar.from_string(poscar_string)
site = p.structure[1]
self.assertArrayAlmostEqual(site.coords,
np.array([3.840198, 1.5, 2.35163175]) * 1.1)
def test_significant_figures(self):
si = 14
coords = list()
coords.append([0, 0, 0])
coords.append([0.75, 0.5, 0.75])
# Silicon structure for testing.
latt = [[3.8401979337, 0.00, 0.00],
[1.9200989668, 3.3257101909, 0.00],
[0.00, -2.2171384943, 3.1355090603]]
struct = Structure(latt, [si, si], coords)
poscar = Poscar(struct)
expected_str = '''Si2
1.0
3.84 0.00 0.00
1.92 3.33 0.00
0.00 -2.22 3.14
Si
2
direct
0.00 0.00 0.00 Si
0.75 0.50 0.75 Si
'''
actual_str = poscar.get_string(significant_figures=2)
self.assertEqual(actual_str, expected_str, "Wrong POSCAR output!")
def test_str(self):
si = 14
coords = list()
coords.append([0, 0, 0])
coords.append([0.75, 0.5, 0.75])
# Silicon structure for testing.
latt = [[3.8401979337, 0.00, 0.00],
[1.9200989668, 3.3257101909, 0.00],
[0.00, -2.2171384943, 3.1355090603]]
struct = Structure(latt, [si, si], coords)
poscar = Poscar(struct)
expected_str = '''Si2
1.0
3.840198 0.000000 0.000000
1.920099 3.325710 0.000000
0.000000 -2.217138 3.135509
Si
2
direct
0.000000 0.000000 0.000000 Si
0.750000 0.500000 0.750000 Si
'''
self.assertEqual(str(poscar), expected_str, "Wrong POSCAR output!")
# Vasp 4 type with symbols at the end.
poscar_string = """Test1
1.0
-3.840198 0.000000 0.000000
1.920099 3.325710 0.000000
0.000000 -2.217138 3.135509
1 1
direct
0.000000 0.000000 0.000000 Si
0.750000 0.500000 0.750000 F
"""
expected = """Test1
1.0
3.840198 -0.000000 -0.000000
-1.920099 -3.325710 -0.000000
-0.000000 2.217138 -3.135509
Si F
1 1
direct
0.000000 0.000000 0.000000 Si
0.750000 0.500000 0.750000 F
"""
poscar = Poscar.from_string(poscar_string)
self.assertEqual(str(poscar), expected)
def test_from_md_run(self):
# Parsing from an MD type run with velocities and predictor corrector data
p = Poscar.from_file(os.path.join(test_dir, "CONTCAR.MD"),
check_for_POTCAR=False)
self.assertAlmostEqual(np.sum(np.array(p.velocities)), 0.0065417961324)
self.assertEqual(p.predictor_corrector[0][0][0], 0.33387820E+00)
self.assertEqual(p.predictor_corrector[0][1][1], -0.10583589E-02)
def test_write_MD_poscar(self):
# Parsing from an MD type run with velocities and predictor corrector data
# And writing a new POSCAR from the new structure
p = Poscar.from_file(os.path.join(test_dir, "CONTCAR.MD"),
check_for_POTCAR=False)
tempfname = "POSCAR.testing.md"
p.write_file(tempfname)
p3 = Poscar.from_file(tempfname)
self.assertArrayAlmostEqual(p.structure.lattice.abc,
p3.structure.lattice.abc, 5)
self.assertArrayAlmostEqual(p.velocities,
p3.velocities, 5)
self.assertArrayAlmostEqual(p.predictor_corrector,
p3.predictor_corrector, 5)
self.assertEqual(p.predictor_corrector_preamble,
p3.predictor_corrector_preamble)
os.remove(tempfname)
def test_setattr(self):
filepath = os.path.join(test_dir, 'POSCAR')
poscar = Poscar.from_file(filepath)
self.assertRaises(ValueError, setattr, poscar, 'velocities',
[[0, 0, 0]])
poscar.selective_dynamics = np.array([[True, False, False]] * 24)
ans = """
LiFePO4
1.0
10.411767 0.000000 0.000000
0.000000 6.067172 0.000000
0.000000 0.000000 4.759490
Fe P O
4 4 16
Selective dynamics
direct
0.218728 0.750000 0.474867 T F F Fe
0.281272 0.250000 0.974867 T F F Fe
0.718728 0.750000 0.025133 T F F Fe
0.781272 0.250000 0.525133 T F F Fe
0.094613 0.250000 0.418243 T F F P
0.405387 0.750000 0.918243 T F F P
0.594613 0.250000 0.081757 T F F P
0.905387 0.750000 0.581757 T F F P
0.043372 0.750000 0.707138 T F F O
0.096642 0.250000 0.741320 T F F O
0.165710 0.046072 0.285384 T F F O
0.165710 0.453928 0.285384 T F F O
0.334290 0.546072 0.785384 T F F O
0.334290 0.953928 0.785384 T F F O
0.403358 0.750000 0.241320 T F F O
0.456628 0.250000 0.207138 T F F O
0.543372 0.750000 0.792862 T F F O
0.596642 0.250000 0.758680 T F F O
0.665710 0.046072 0.214616 T F F O
0.665710 0.453928 0.214616 T F F O
0.834290 0.546072 0.714616 T F F O
0.834290 0.953928 0.714616 T F F O
0.903358 0.750000 0.258680 T F F O
0.956628 0.250000 0.292862 T F F O"""
self.assertEqual(str(poscar).strip(), ans.strip())
def test_velocities(self):
si = 14
coords = list()
coords.append([0, 0, 0])
coords.append([0.75, 0.5, 0.75])
# Silicon structure for testing.
latt = [[3.8401979337, 0.00, 0.00],
[1.9200989668, 3.3257101909, 0.00],
[0.00, -2.2171384943, 3.1355090603]]
struct = Structure(latt, [si, si], coords)
poscar = Poscar(struct)
poscar.set_temperature(900)
v = np.array(poscar.velocities)
for x in np.sum(v, axis=0):
self.assertAlmostEqual(x, 0, 7)
temperature = struct[0].specie.atomic_mass.to("kg") * \
np.sum(v ** 2) / (3 * const.k) * 1e10
self.assertAlmostEqual(temperature, 900, 4,
'Temperature instantiated incorrectly')
poscar.set_temperature(700)
v = np.array(poscar.velocities)
for x in np.sum(v, axis=0):
self.assertAlmostEqual(
x, 0, 7, 'Velocities initialized with a net momentum')
temperature = struct[0].specie.atomic_mass.to("kg") * \
np.sum(v ** 2) / (3 * const.k) * 1e10
self.assertAlmostEqual(temperature, 700, 4,
'Temperature instantiated incorrectly')
def test_write(self):
filepath = os.path.join(test_dir, 'POSCAR')
poscar = Poscar.from_file(filepath)
tempfname = "POSCAR.testing"
poscar.write_file(tempfname)
p = Poscar.from_file(tempfname)
self.assertArrayAlmostEqual(poscar.structure.lattice.abc,
p.structure.lattice.abc, 5)
os.remove(tempfname)
class IncarTest(unittest.TestCase):
def setUp(self):
file_name = os.path.join(test_dir, 'INCAR')
self.incar = Incar.from_file(file_name)
def test_init(self):
incar = self.incar
incar["LDAU"] = "T"
self.assertEqual(incar["ALGO"], "Damped", "Wrong Algo")
self.assertEqual(float(incar["EDIFF"]), 1e-4, "Wrong EDIFF")
self.assertEqual(type(incar["LORBIT"]), int)
def test_diff(self):
incar = self.incar
filepath1 = os.path.join(test_dir, 'INCAR')
incar1 = Incar.from_file(filepath1)
filepath2 = os.path.join(test_dir, 'INCAR.2')
incar2 = Incar.from_file(filepath2)
filepath3 = os.path.join(test_dir, 'INCAR.3')
incar3 = Incar.from_file(filepath2)
self.assertEqual(
incar1.diff(incar2),
{'Different': {
'NELM': {'INCAR1': None, 'INCAR2': 100},
'ISPIND': {'INCAR1': 2, 'INCAR2': None},
'LWAVE': {'INCAR1': True, 'INCAR2': False},
'LDAUPRINT': {'INCAR1': None, 'INCAR2': 1},
'MAGMOM': {'INCAR1': [6, -6, -6, 6, 0.6, 0.6, 0.6,
0.6, 0.6, 0.6, 0.6, 0.6,
0.6, 0.6, 0.6, 0.6, 0.6,
0.6, 0.6, 0.6, 0.6, 0.6, 0.6, 0.6],
'INCAR2': None},
'NELMIN': {'INCAR1': None, 'INCAR2': 3},
'ENCUTFOCK': {'INCAR1': 0.0, 'INCAR2': None},
'HFSCREEN': {'INCAR1': 0.207, 'INCAR2': None},
'LSCALU': {'INCAR1': False, 'INCAR2': None},
'ENCUT': {'INCAR1': 500, 'INCAR2': None},
'NSIM': {'INCAR1': 1, 'INCAR2': None},
'ICHARG': {'INCAR1': None, 'INCAR2': 1},
'NSW': {'INCAR1': 99, 'INCAR2': 51},
'NKRED': {'INCAR1': 2, 'INCAR2': None},
'NUPDOWN': {'INCAR1': 0, 'INCAR2': None},
'LCHARG': {'INCAR1': True, 'INCAR2': None},
'LPLANE': {'INCAR1': True, 'INCAR2': None},
'ISMEAR': {'INCAR1': 0, 'INCAR2': -5},
'NPAR': {'INCAR1': 8, 'INCAR2': 1},
'SYSTEM': {
'INCAR1': 'Id=[0] dblock_code=[97763-icsd] formula=[li mn (p o4)] sg_name=[p n m a]',
'INCAR2': 'Id=[91090] dblock_code=[20070929235612linio-59.53134651-vasp] formula=[li3 ni3 o6] sg_name=[r-3m]'},
'ALGO': {'INCAR1': 'Damped', 'INCAR2': 'Fast'},
'LHFCALC': {'INCAR1': True, 'INCAR2': None},
'TIME': {'INCAR1': 0.4, 'INCAR2': None}},
'Same': {'IBRION': 2, 'PREC': 'Accurate', 'ISIF': 3,
'LMAXMIX': 4,
'LREAL': 'Auto', 'ISPIN': 2, 'EDIFF': 0.0001,
'LORBIT': 11, 'SIGMA': 0.05}})
self.assertEqual(
incar1.diff(incar3),
{'Different': {
'NELM': {'INCAR1': None, 'INCAR2': 100},
'ISPIND': {'INCAR1': 2, 'INCAR2': None},
'LWAVE': {'INCAR1': True, 'INCAR2': False},
'LDAUPRINT': {'INCAR1': None, 'INCAR2': 1},
'MAGMOM': {'INCAR1': [6, -6, -6, 6, 0.6, 0.6, 0.6,
0.6, 0.6, 0.6, 0.6, 0.6,
0.6, 0.6, 0.6, 0.6, 0.6,
0.6, 0.6, 0.6, 0.6, 0.6, 0.6, 0.6],
'INCAR2': None},
'NELMIN': {'INCAR1': None, 'INCAR2': 3},
'ENCUTFOCK': {'INCAR1': 0.0, 'INCAR2': None},
'HFSCREEN': {'INCAR1': 0.207, 'INCAR2': None},
'LSCALU': {'INCAR1': False, 'INCAR2': None},
'ENCUT': {'INCAR1': 500, 'INCAR2': None},
'NSIM': {'INCAR1': 1, 'INCAR2': None},
'ICHARG': {'INCAR1': None, 'INCAR2': 1},
'NSW': {'INCAR1': 99, 'INCAR2': 51},
'NKRED': {'INCAR1': 2, 'INCAR2': None},
'NUPDOWN': {'INCAR1': 0, 'INCAR2': None},
'LCHARG': {'INCAR1': True, 'INCAR2': None},
'LPLANE': {'INCAR1': True, 'INCAR2': None},
'ISMEAR': {'INCAR1': 0, 'INCAR2': -5},
'NPAR': {'INCAR1': 8, 'INCAR2': 1},
'SYSTEM': {
'INCAR1': 'Id=[0] dblock_code=[97763-icsd] formula=[li mn (p o4)] sg_name=[p n m a]',
'INCAR2': 'Id=[91090] dblock_code=[20070929235612linio-59.53134651-vasp] formula=[li3 ni3 o6] sg_name=[r-3m]'},
'ALGO': {'INCAR1': 'Damped', 'INCAR2': 'Fast'},
'LHFCALC': {'INCAR1': True, 'INCAR2': None},
'TIME': {'INCAR1': 0.4, 'INCAR2': None}},
'Same': {'IBRION': 2, 'PREC': 'Accurate', 'ISIF': 3,
'LMAXMIX': 4,
'LREAL': 'Auto', 'ISPIN': 2, 'EDIFF': 0.0001,
'LORBIT': 11, 'SIGMA': 0.05}})
def test_as_dict_and_from_dict(self):
d = self.incar.as_dict()
incar2 = Incar.from_dict(d)
self.assertEqual(self.incar, incar2)
d["MAGMOM"] = [Magmom([1, 2, 3]).as_dict()]
incar3 = Incar.from_dict(d)
self.assertEqual(incar3["MAGMOM"], [Magmom([1, 2, 3])])
def test_write(self):
tempfname = "INCAR.testing"
self.incar.write_file(tempfname)
i = Incar.from_file(tempfname)
self.assertEqual(i, self.incar)
os.remove(tempfname)
def test_get_string(self):
s = self.incar.get_string(pretty=True, sort_keys=True)
ans = """ALGO = Damped
EDIFF = 0.0001
ENCUT = 500
ENCUTFOCK = 0.0
HFSCREEN = 0.207
IBRION = 2
ISIF = 3
ISMEAR = 0
ISPIN = 2
ISPIND = 2
LCHARG = True
LHFCALC = True
LMAXMIX = 4
LORBIT = 11
LPLANE = True
LREAL = Auto
LSCALU = False
LWAVE = True
MAGMOM = 1*6.0 2*-6.0 1*6.0 20*0.6
NKRED = 2
NPAR = 8
NSIM = 1
NSW = 99
NUPDOWN = 0
PREC = Accurate
SIGMA = 0.05
SYSTEM = Id=[0] dblock_code=[97763-icsd] formula=[li mn (p o4)] sg_name=[p n m a]
TIME = 0.4"""
self.assertEqual(s, ans)
def test_lsorbit_magmom(self):
magmom1 = [[0.0, 0.0, 3.0], [0, 1, 0], [2, 1, 2]]
magmom2 = [-1, -1, -1, 0, 0, 0, 0, 0]
magmom4 = [Magmom([1.0, 2.0, 2.0])]
ans_string1 = "LANGEVIN_GAMMA = 10 10 10\nLSORBIT = True\n" \
"MAGMOM = 0.0 0.0 3.0 0 1 0 2 1 2\n"
ans_string2 = "LANGEVIN_GAMMA = 10\nLSORBIT = True\n" \
"MAGMOM = 3*3*-1 3*5*0\n"
ans_string3 = "LSORBIT = False\nMAGMOM = 2*-1 2*9\n"
ans_string4_nolsorbit = "LANGEVIN_GAMMA = 10\nLSORBIT = False\nMAGMOM = 1*3.0\n"
ans_string4_lsorbit = "LANGEVIN_GAMMA = 10\nLSORBIT = True\nMAGMOM = 1.0 2.0 2.0\n"
incar = Incar({})
incar["MAGMOM"] = magmom1
incar["LSORBIT"] = "T"
incar["LANGEVIN_GAMMA"] = [10, 10, 10]
self.assertEqual(ans_string1, str(incar))
incar["MAGMOM"] = magmom2
incar["LSORBIT"] = "T"
incar["LANGEVIN_GAMMA"] = 10
self.assertEqual(ans_string2, str(incar))
incar["MAGMOM"] = magmom4
incar["LSORBIT"] = "F"
self.assertEqual(ans_string4_nolsorbit, str(incar))
incar["LSORBIT"] = "T"
self.assertEqual(ans_string4_lsorbit, str(incar))
incar = Incar.from_string(ans_string1)
self.assertEqual(incar["MAGMOM"],
[[0.0, 0.0, 3.0], [0, 1, 0], [2, 1, 2]])
self.assertEqual(incar["LANGEVIN_GAMMA"], [10, 10, 10])
incar = Incar.from_string(ans_string2)
self.assertEqual(incar["MAGMOM"], [[-1, -1, -1], [-1, -1, -1],
[-1, -1, -1], [0, 0, 0],
[0, 0, 0], [0, 0, 0],
[0, 0, 0], [0, 0, 0]])
self.assertEqual(incar["LANGEVIN_GAMMA"], [10])
incar = Incar.from_string(ans_string3)
self.assertFalse(incar["LSORBIT"])
self.assertEqual(incar["MAGMOM"], [-1, -1, 9, 9])
def test_quad_efg(self):
incar1 = Incar({})
incar1["LEFG"] = True
incar1["QUAD_EFG"] = [0.0, 146.6, -25.58]
ans_string1 = "LEFG = True\nQUAD_EFG = 0.0 146.6 -25.58\n"
self.assertEqual(ans_string1, str(incar1))
incar2 = Incar.from_string(ans_string1)
self.assertEqual(ans_string1, str(incar2))
def test_types(self):
incar_str = """ALGO = Fast
ECUT = 510
EDIFF = 1e-07
EINT = -0.85 0.85
IBRION = -1
ICHARG = 11
ISIF = 3
ISMEAR = 1
ISPIN = 1
LPARD = True
NBMOD = -3
PREC = Accurate
SIGMA = 0.1"""
i = Incar.from_string(incar_str)
self.assertIsInstance(i["EINT"], list)
self.assertEqual(i["EINT"][0], -0.85)
def test_proc_types(self):
self.assertEqual(Incar.proc_val("HELLO", "-0.85 0.85"), "-0.85 0.85")
class KpointsTest(PymatgenTest):
def test_init(self):
filepath = os.path.join(test_dir, 'KPOINTS.auto')
kpoints = Kpoints.from_file(filepath)
self.assertEqual(kpoints.kpts, [[10]], "Wrong kpoint lattice read")
filepath = os.path.join(test_dir, 'KPOINTS.cartesian')
kpoints = Kpoints.from_file(filepath)
self.assertEqual(kpoints.kpts,
[[0.25, 0, 0], [0, 0.25, 0], [0, 0, 0.25]],
"Wrong kpoint lattice read")
self.assertEqual(kpoints.kpts_shift, [0.5, 0.5, 0.5],
"Wrong kpoint shift read")
filepath = os.path.join(test_dir, 'KPOINTS')
kpoints = Kpoints.from_file(filepath)
self.kpoints = kpoints
self.assertEqual(kpoints.kpts, [[2, 4, 6]])
filepath = os.path.join(test_dir, 'KPOINTS.band')
kpoints = Kpoints.from_file(filepath)
self.assertIsNotNone(kpoints.labels)
self.assertEqual(kpoints.style, Kpoints.supported_modes.Line_mode)
kpoints_str = str(kpoints)
self.assertEqual(kpoints_str.split("\n")[3], "Reciprocal")
filepath = os.path.join(test_dir, 'KPOINTS.explicit')
kpoints = Kpoints.from_file(filepath)
self.assertIsNotNone(kpoints.kpts_weights)
self.assertEqual(str(kpoints).strip(), """Example file
4
Cartesian
0.0 0.0 0.0 1 None
0.0 0.0 0.5 1 None
0.0 0.5 0.5 2 None
0.5 0.5 0.5 4 None""")
filepath = os.path.join(test_dir, 'KPOINTS.explicit_tet')
kpoints = Kpoints.from_file(filepath)
self.assertEqual(kpoints.tet_connections, [(6, [1, 2, 3, 4])])
def test_style_setter(self):
filepath = os.path.join(test_dir, 'KPOINTS')
kpoints = Kpoints.from_file(filepath)
self.assertEqual(kpoints.style, Kpoints.supported_modes.Monkhorst)
kpoints.style = "G"
self.assertEqual(kpoints.style, Kpoints.supported_modes.Gamma)
def test_static_constructors(self):
kpoints = Kpoints.gamma_automatic([3, 3, 3], [0, 0, 0])
self.assertEqual(kpoints.style, Kpoints.supported_modes.Gamma)
self.assertEqual(kpoints.kpts, [[3, 3, 3]])
kpoints = Kpoints.monkhorst_automatic([2, 2, 2], [0, 0, 0])
self.assertEqual(kpoints.style, Kpoints.supported_modes.Monkhorst)
self.assertEqual(kpoints.kpts, [[2, 2, 2]])
kpoints = Kpoints.automatic(100)
self.assertEqual(kpoints.style, Kpoints.supported_modes.Automatic)
self.assertEqual(kpoints.kpts, [[100]])
filepath = os.path.join(test_dir, 'POSCAR')
poscar = Poscar.from_file(filepath)
kpoints = Kpoints.automatic_density(poscar.structure, 500)
self.assertEqual(kpoints.kpts, [[1, 3, 3]])
self.assertEqual(kpoints.style, Kpoints.supported_modes.Gamma)
kpoints = Kpoints.automatic_density(poscar.structure, 500, True)
self.assertEqual(kpoints.style, Kpoints.supported_modes.Gamma)
kpoints = Kpoints.automatic_density_by_vol(poscar.structure, 1000)
self.assertEqual(kpoints.kpts, [[6, 10, 13]])
self.assertEqual(kpoints.style, Kpoints.supported_modes.Gamma)
s = poscar.structure
s.make_supercell(3)
kpoints = Kpoints.automatic_density(s, 500)
self.assertEqual(kpoints.kpts, [[1, 1, 1]])
self.assertEqual(kpoints.style, Kpoints.supported_modes.Gamma)
def test_as_dict_from_dict(self):
k = Kpoints.monkhorst_automatic([2, 2, 2], [0, 0, 0])
d = k.as_dict()
k2 = Kpoints.from_dict(d)
self.assertEqual(k.kpts, k2.kpts)
self.assertEqual(k.style, k2.style)
self.assertEqual(k.kpts_shift, k2.kpts_shift)
def test_kpt_bands_as_dict_from_dict(self):
file_name = os.path.join(test_dir, 'KPOINTS.band')
k = Kpoints.from_file(file_name)
d = k.as_dict()
import json
json.dumps(d)
# This doesn't work
k2 = Kpoints.from_dict(d)
self.assertEqual(k.kpts, k2.kpts)
self.assertEqual(k.style, k2.style)
self.assertEqual(k.kpts_shift, k2.kpts_shift)
self.assertEqual(k.num_kpts, k2.num_kpts)
def test_pickle(self):
k = Kpoints.gamma_automatic()
pickle.dumps(k)
def test_automatic_kpoint(self):
# s = PymatgenTest.get_structure("Li2O")
p = Poscar.from_string("""Al1
1.0
2.473329 0.000000 1.427977
0.824443 2.331877 1.427977
0.000000 0.000000 2.855955
Al
1
direct
0.000000 0.000000 0.000000 Al""")
kpoints = Kpoints.automatic_density(p.structure, 1000)
self.assertArrayAlmostEqual(kpoints.kpts[0], [10, 10, 10])
class PotcarSingleTest(unittest.TestCase):
def setUp(self):
self.psingle = PotcarSingle.from_file(
os.path.join(test_dir, "POT_GGA_PAW_PBE", "POTCAR.Mn_pv.gz"))
def test_keywords(self):
data = {'VRHFIN': 'Mn: 3p4s3d', 'LPAW': True, 'DEXC': -.003,
'STEP': [20.000, 1.050],
'RPACOR': 2.080, 'LEXCH': 'PE',
'ENMAX': 269.865, 'QCUT': -4.454,
'TITEL': 'PAW_PBE Mn_pv 07Sep2000',
'LCOR': True, 'EAUG': 569.085,
'RMAX': 2.807,
'ZVAL': 13.000,
'EATOM': 2024.8347, 'NDATA': 100,
'LULTRA': False,
'QGAM': 8.907,
'ENMIN': 202.399,
'RCLOC': 1.725,
'RCORE': 2.300,
'RDEP': 2.338,
'IUNSCR': 1,
'RAUG': 1.300,
'POMASS': 54.938,
'RWIGS': 1.323}
self.assertEqual(self.psingle.keywords, data)
def test_nelectrons(self):
self.assertEqual(self.psingle.nelectrons, 13)
def test_electron_config(self):
config = self.psingle.electron_configuration
self.assertEqual(config[-1], (3, "p", 6))
def test_attributes(self):
for k in ['DEXC', 'RPACOR', 'ENMAX', 'QCUT', 'EAUG', 'RMAX',
'ZVAL', 'EATOM', 'NDATA', 'QGAM', 'ENMIN', 'RCLOC',
'RCORE', 'RDEP', 'RAUG', 'POMASS', 'RWIGS']:
self.assertIsNotNone(getattr(self.psingle, k))
def test_found_unknown_key(self):
with self.assertRaises(KeyError):
PotcarSingle.parse_functions['BAD_KEY']
def test_bad_value(self):
self.assertRaises(ValueError, PotcarSingle.parse_functions['ENMAX'],
"ThisShouldBeAFloat")
def test_hash(self):
self.assertEqual(self.psingle.get_potcar_hash(),
"fa52f891f234d49bb4cb5ea96aae8f98")
def test_from_functional_and_symbols(self):
test_potcar_dir = os.path.abspath(
os.path.join(os.path.dirname(__file__),
"..", "..", "..", "..", "test_files"))
SETTINGS["PMG_VASP_PSP_DIR"] = test_potcar_dir
p = PotcarSingle.from_symbol_and_functional("Li_sv", "PBE")
self.assertEqual(p.enmax, 271.649)
def test_functional_types(self):
self.assertEqual(self.psingle.functional, 'PBE')
self.assertEqual(self.psingle.functional_class, 'GGA')
self.assertEqual(self.psingle.potential_type, 'PAW')
psingle = PotcarSingle.from_file(os.path.join(test_dir, "POT_LDA_PAW",
"POTCAR.Fe.gz"))
self.assertEqual(psingle.functional, 'Perdew-Zunger81')
self.assertEqual(psingle.functional_class, 'LDA')
self.assertEqual(psingle.potential_type, 'PAW')
def test_default_functional(self):
p = PotcarSingle.from_symbol_and_functional("Fe")
self.assertEqual(p.functional_class, 'GGA')
SETTINGS["PMG_DEFAULT_FUNCTIONAL"] = "LDA"
p = PotcarSingle.from_symbol_and_functional("Fe")
self.assertEqual(p.functional_class, 'LDA')
def tearDown(self):
SETTINGS["PMG_DEFAULT_FUNCTIONAL"] = "PBE"
class PotcarTest(unittest.TestCase):
def setUp(self):
if "PMG_VASP_PSP_DIR" not in os.environ:
test_potcar_dir = os.path.abspath(
os.path.join(os.path.dirname(__file__), "..", "..", "..", "..",
"test_files"))
os.environ["PMG_VASP_PSP_DIR"] = test_potcar_dir
filepath = os.path.join(test_dir, 'POTCAR')
self.potcar = Potcar.from_file(filepath)
def test_init(self):
self.assertEqual(self.potcar.symbols, ["Fe", "P", "O"],
"Wrong symbols read in for POTCAR")
potcar = Potcar(["Fe_pv", "O"])
self.assertEqual(potcar[0].enmax, 293.238)
def test_potcar_map(self):
fe_potcar = zopen(os.path.join(test_dir, "POT_GGA_PAW_PBE",
"POTCAR.Fe_pv.gz")).read().decode(
"utf-8")
# specify V instead of Fe - this makes sure the test won't pass if the
# code just grabs the POTCAR from the config file (the config file would
# grab the V POTCAR)
potcar = Potcar(["V"], sym_potcar_map={"V": fe_potcar})
self.assertEqual(potcar.symbols, ["Fe_pv"], "Wrong symbols read in "
"for POTCAR")
def test_to_from_dict(self):
d = self.potcar.as_dict()
potcar = Potcar.from_dict(d)
self.assertEqual(potcar.symbols, ["Fe", "P", "O"])
def test_write(self):
tempfname = "POTCAR.testing"
self.potcar.write_file(tempfname)
p = Potcar.from_file(tempfname)
self.assertEqual(p.symbols, self.potcar.symbols)
os.remove(tempfname)
def test_set_symbol(self):
self.assertEqual(self.potcar.symbols, ["Fe", "P", "O"])
self.assertEqual(self.potcar[0].nelectrons, 8)
self.potcar.symbols = ["Fe_pv", "O"]
self.assertEqual(self.potcar.symbols, ["Fe_pv", "O"])
self.assertEqual(self.potcar[0].nelectrons, 14)
def test_default_functional(self):
p = Potcar(["Fe", "P"])
self.assertEqual(p[0].functional_class, 'GGA')
self.assertEqual(p[1].functional_class, 'GGA')
SETTINGS["PMG_DEFAULT_FUNCTIONAL"] = "LDA"
p = Potcar(["Fe", "P"])
self.assertEqual(p[0].functional_class, 'LDA')
self.assertEqual(p[1].functional_class, 'LDA')
def test_pickle(self):
pickle.dumps(self.potcar)
def tearDown(self):
SETTINGS["PMG_DEFAULT_FUNCTIONAL"] = "PBE"
class VaspInputTest(unittest.TestCase):
def setUp(self):
filepath = os.path.join(test_dir, 'INCAR')
incar = Incar.from_file(filepath)
filepath = os.path.join(test_dir, 'POSCAR')
poscar = Poscar.from_file(filepath)
if "PMG_VASP_PSP_DIR" not in os.environ:
test_potcar_dir = os.path.abspath(
os.path.join(os.path.dirname(__file__), "..", "..", "..", "..",
"test_files"))
os.environ["PMG_VASP_PSP_DIR"] = test_potcar_dir
filepath = os.path.join(test_dir, 'POTCAR')
potcar = Potcar.from_file(filepath)
filepath = os.path.join(test_dir, 'KPOINTS.auto')
kpoints = Kpoints.from_file(filepath)
self.vinput = VaspInput(incar, kpoints, poscar, potcar)
def test_to_from_dict(self):
d = self.vinput.as_dict()
vinput = VaspInput.from_dict(d)
comp = vinput["POSCAR"].structure.composition
self.assertEqual(comp, Composition("Fe4P4O16"))
def test_write(self):
tmp_dir = "VaspInput.testing"
self.vinput.write_input(tmp_dir)
filepath = os.path.join(tmp_dir, "INCAR")
incar = Incar.from_file(filepath)
self.assertEqual(incar["NSW"], 99)
for name in ("INCAR", "POSCAR", "POTCAR", "KPOINTS"):
os.remove(os.path.join(tmp_dir, name))
os.rmdir(tmp_dir)
def test_from_directory(self):
vi = VaspInput.from_directory(test_dir,
optional_files={"CONTCAR.Li2O": Poscar})
self.assertEqual(vi["INCAR"]["ALGO"], "Damped")
self.assertIn("CONTCAR.Li2O", vi)
d = vi.as_dict()
vinput = VaspInput.from_dict(d)
self.assertIn("CONTCAR.Li2O", vinput)
if __name__ == "__main__":
unittest.main()
| |
#!/usr/bin/env python
import ROOT as r
import tutils as tu
import numpy as np
from cStringIO import StringIO
import dlist
import sys
import pyutils as ut
import IPython
from string import atof,atoi
def read_data(fn = None):
f = sys.stdin
if fn != None:
f = open(fn)
lines = f.read()
if f != sys.stdin:
f.close()
d = StringIO(lines)
return d
class DrawString(object):
def __init__(self, s):
self.s = s
self.fname = ''
self.hname = ''
self.dopt = ''
self.opts = ''
self.process()
print '[i] fname={} hname={} dopt={} opts={}'.format(self.fname, self.hname, self.dopt, self.opts)
def process(self):
sargs = self.s.split(':')
try:
self.fname = sargs[0].strip()
except:
pass
try:
self.hname = sargs[1].strip()
except:
pass
try:
self.dopt = sargs[2].strip()
except:
pass
try:
self.opts = sargs[3].strip()
except:
pass
def get_arg(self, sarg):
s = self.opts.split(',')
for xs in s:
if sarg in xs:
return xs.replace(sarg, '')
return None
def is_arg(self, sarg):
s = self.opts.split(',')
for xs in s:
if sarg in xs:
return True
return False
def title(self):
st = self.get_arg('title=')
if st == None:
st = self.fname
return st
def miny(self):
st = self.get_arg('miny=')
if st != None:
st = atof(st)
return st
def maxy(self):
st = self.get_arg('maxy=')
if st != None:
st = atof(st)
return st
def xt(self):
st = self.get_arg('xt=')
return st
def yt(self):
st = self.get_arg('yt=')
return st
def zt(self):
st = self.get_arg('zt=')
return st
def logy(self):
return self.is_arg('logy')
def logx(self):
return self.is_arg('logx')
def logz(self):
return self.is_arg('logz')
class Comment(object):
def __init__(self, s):
self.s = s
self.box = self.get_box()
self.text = self.get_text()
def get_box(self):
x1 = 0.1
x2 = 0.9
y1 = 0.1
y2 = 0.9
if self.s == None:
return [x1, y1, x2, y2]
try:
x1 = atof(self.s.split(',')[0])
y1 = atof(self.s.split(',')[1])
x2 = atof(self.s.split(',')[2])
y2 = atof(self.s.split(',')[3])
except:
print '[w] trouble with comment position? x1,y1,x2,y2',self.s
return [x1, y1, x2, y2]
def get_settings(self, sitem):
retval = []
splits = self.s.split(sitem)
for n in xrange(1,len(splits)):
retval.append(self.filter_known_settings(splits[n]))
if len(retval) <= 0:
retval.append(None)
return retval
def filter_known_settings(self, s):
known = ['tx_size=', 'color=', 'font=', 'alpha=']
for k in known:
if k in s:
s=s.split(k)[0]
return s
def get_setting(self, sitem):
setting = 0
if self.get_settings(sitem)[-1]:
setting = self.get_settings(sitem)[-1]
return setting
def get_text(self):
return self.get_settings('item=')
def get_text_size(self):
if self.get_setting('tx_size='):
return atof(self.get_setting('tx_size='))
return 0.025
def get_color(self):
if self.get_setting('color='):
return atoi(self.get_setting('color='))
return 1
def get_font(self):
if self.get_setting('font='):
return self.get_setting('font=')
return 42
def get_alpha(self):
if self.get_setting('alpha='):
return self.get_setting('alpha=')/100.
return 0
def legend(self):
tleg = None
if len(self.text) <=0:
print '[e] no text in comment tag'
else:
tleg = r.TLegend(self.box[0], self.box[1], self.box[2], self.box[3])
tleg.SetNColumns(1)
tleg.SetBorderSize(0)
tleg.SetFillColor(r.kWhite)
tleg.SetFillStyle(1001)
#tleg.SetFillColorAlpha(ROOT.kWhite, 0.9)
tleg.SetFillColorAlpha(r.kWhite, self.get_alpha())
tleg.SetTextAlign(12)
tleg.SetTextSize(self.get_text_size())
tleg.SetTextFont(self.get_font())
tleg.SetTextColor(self.get_color())
for s in self.text:
print s
tleg.AddEntry(0, s, '')
return tleg
def legend_position(sleg):
x1 = None
x2 = None
y1 = None
y2 = None
if sleg == None:
return x1, y1, x2, y2
try:
x1 = atof(sleg.split(',')[0])
y1 = atof(sleg.split(',')[1])
x2 = atof(sleg.split(',')[2])
y2 = atof(sleg.split(',')[3])
except:
print '[w] trouble with legend position? x1,y1,x2,y2',sleg
return x1, y1, x2, y2
def get_tag_from_file(tag, fname, default=None, split=None):
retval = default
clines = ut.load_file_to_strings(fname)
for l in clines:
if tag+' ' in l[:len(tag)+1]:
retval = l.replace(tag+' ','')
if split != None and retval != None:
retval = retval.split(split)
return retval
def axis_range(sleg):
x1 = None
x2 = None
if sleg == None:
return x1, x2
try:
x1 = atof(sleg.split(',')[0])
x2 = atof(sleg.split(',')[1])
except:
print '[w] trouble with x-range? x1,x2',sleg
return x1, x2
def main():
tu.setup_basic_root()
fname = ut.get_arg_with('-f')
#vals = read_data(fname)
vals = ut.load_file_to_strings(fname)
if fname == None:
hlname = 'stdin'
else:
hlname = fname
hls = dlist.ListStorage(hlname+'storage')
hl = hls.get_list(hlname)
ds = None
#for cline in vals.getvalue().split('\n'):
for cline in vals:
if len(cline) < 1:
continue
if cline[0] == '#':
continue
ds = DrawString(cline)
hl.add_from_file(ds.hname, ds.fname, ds.title(), ds.dopt)
if ds == None:
print '[e] nothing to draw...'
return
hl.make_canvas()
hl.reset_axis_titles(ds.xt(), ds.yt(), ds.zt())
xt = get_tag_from_file('#x', fname, None)
yt = get_tag_from_file('#y', fname, None)
zt = get_tag_from_file('#z', fname, None)
hl.reset_axis_titles(xt, yt, zt)
rebin = get_tag_from_file('#rebin', fname, None, ' ')
if rebin!=None:
print atoi(rebin[0])
if len(rebin) > 0:
hl.rebin(atoi(rebin[0]))
if len(rebin) > 1:
if 'true' in rebin[1].lower():
print '[i] rebin with renorm...'
hl.rebin(atoi(rebin[0]), True)
else:
print '[i] rebin w/o renorm...'
hl.rebin(atoi(rebin[0]), False)
normalize = get_tag_from_file('#normalize', fname, None)
if normalize == 'self':
hl.normalize_self()
miny = get_tag_from_file('#miny', fname, None)
if miny == None:
miny=ds.miny()
else:
miny=atof(miny)
maxy = get_tag_from_file('#maxy', fname, None)
if maxy == None:
maxy=ds.maxy()
else:
maxy=atof(maxy)
logy = get_tag_from_file('#logy', fname, None)
if logy == 'true':
logy=True
if logy == None:
logy=ds.logy()
else:
logy=False
logx = get_tag_from_file('#logx', fname, None)
if logx == 'true':
logx=True
if logx == None:
logx=ds.logx()
else:
logx=False
logz = get_tag_from_file('#logz', fname, None)
if logz == 'true':
logz=True
if logz == None:
logz=ds.logz()
else:
logz=False
print 'logy is',logy
sxrange = get_tag_from_file('#xrange', fname, None)
if sxrange != None:
x1, x2 = axis_range(sxrange)
hl.zoom_axis(0, x1, x2)
hl.draw(miny=miny,maxy=maxy,logy=logy)
if logy:
hl.set_log_multipad('y')
if logx:
hl.set_log_multipad('x')
if logz:
hl.set_log_multipad('z')
#legend
stitle = ut.get_arg_with('--title')
if stitle == None:
stitle = get_tag_from_file('#title', fname, '')
sleg = ut.get_arg_with('--leg')
if sleg == None:
sleg = get_tag_from_file('#legend',fname)
x1,y1,x2,y2 = legend_position(sleg)
tx_size = None
try:
tx_size = atof(sleg.split('tx_size=')[1])
except:
pass
print tx_size
_lopts = 'brNDC'
try:
_lopts = sleg.split('options=')[1]
except:
_lopts = ''
hl.self_legend(title=stitle,x1=x1,x2=x2,y1=y1,y2=y2,tx_size=tx_size, option=_lopts)
#size of the window
x = 400
y = 300
gs = tu.get_arg_with('--geom')
if gs == None:
gs = get_tag_from_file('#geom', fname)
if gs != None:
try:
x = atoi(gs.split('x')[0])
y = atoi(gs.split('x')[1])
except:
print '[e] unable to understand the --geom argument',gs
hl.resize_window(x,y)
cs = get_tag_from_file('#comment', fname)
if cs:
tc = Comment(cs)
leg = tc.legend()
leg.Draw()
tu.gList.append(leg)
hl.update()
if '--print' in sys.argv:
hl.pdf()
if __name__ == '__main__':
main()
if not ut.is_arg_set('-b'):
IPython.embed()
| |
"""numerical differentiation function, gradient, Jacobian, and Hessian
Author : josef-pkt
License : BSD
"""
from __future__ import print_function
from statsmodels.compat.python import range
#These are simple forward differentiation, so that we have them available
#without dependencies.
#
#* Jacobian should be faster than numdifftools because it doesn't use loop over observations.
#* numerical precision will vary and depend on the choice of stepsizes
#
#Todo:
#* some cleanup
#* check numerical accuracy (and bugs) with numdifftools and analytical derivatives
# - linear least squares case: (hess - 2*X'X) is 1e-8 or so
# - gradient and Hessian agree with numdifftools when evaluated away from minimum
# - forward gradient, Jacobian evaluated at minimum is inaccurate, centered (+/- epsilon) is ok
#* dot product of Jacobian is different from Hessian, either wrong example or a bug (unlikely),
# or a real difference
#
#
#What are the conditions that Jacobian dotproduct and Hessian are the same?
#see also:
#BHHH: Greene p481 17.4.6, MLE Jacobian = d loglike / d beta , where loglike is vector for each observation
# see also example 17.4 when J'J is very different from Hessian
# also does it hold only at the minimum, what's relationship to covariance of Jacobian matrix
#http://projects.scipy.org/scipy/ticket/1157
#http://en.wikipedia.org/wiki/Levenberg%E2%80%93Marquardt_algorithm
# objective: sum((y-f(beta,x)**2), Jacobian = d f/d beta and not d objective/d beta as in MLE Greene
# similar: http://crsouza.blogspot.com/2009/11/neural-network-learning-by-levenberg_18.html#hessian
#
#in example: if J = d x*beta / d beta then J'J == X'X
# similar to http://en.wikipedia.org/wiki/Levenberg%E2%80%93Marquardt_algorithm
import numpy as np
#NOTE: we only do double precision internally so far
EPS = np.MachAr().eps
_hessian_docs = """
Calculate Hessian with finite difference derivative approximation
Parameters
----------
x : array_like
value at which function derivative is evaluated
f : function
function of one array f(x, `*args`, `**kwargs`)
epsilon : float or array-like, optional
Stepsize used, if None, then stepsize is automatically chosen
according to EPS**(1/%(scale)s)*x.
args : tuple
Arguments for function `f`.
kwargs : dict
Keyword arguments for function `f`.
%(extra_params)s
Returns
-------
hess : ndarray
array of partial second derivatives, Hessian
%(extra_returns)s
Notes
-----
Equation (%(equation_number)s) in Ridout. Computes the Hessian as::
%(equation)s
where e[j] is a vector with element j == 1 and the rest are zero and
d[i] is epsilon[i].
References
----------:
Ridout, M.S. (2009) Statistical applications of the complex-step method
of numerical differentiation. The American Statistician, 63, 66-74
"""
def _get_epsilon(x, s, epsilon, n):
if epsilon is None:
h = EPS**(1. / s) * np.maximum(np.abs(x), 0.1)
else:
if np.isscalar(epsilon):
h = np.empty(n)
h.fill(epsilon)
else: # pragma : no cover
h = np.asarray(epsilon)
if h.shape != x.shape:
raise ValueError("If h is not a scalar it must have the same"
" shape as x.")
return h
def approx_fprime(x, f, epsilon=None, args=(), kwargs={}, centered=False):
'''
Gradient of function, or Jacobian if function f returns 1d array
Parameters
----------
x : array
parameters at which the derivative is evaluated
f : function
`f(*((x,)+args), **kwargs)` returning either one value or 1d array
epsilon : float, optional
Stepsize, if None, optimal stepsize is used. This is EPS**(1/2)*x for
`centered` == False and EPS**(1/3)*x for `centered` == True.
args : tuple
Tuple of additional arguments for function `f`.
kwargs : dict
Dictionary of additional keyword arguments for function `f`.
centered : bool
Whether central difference should be returned. If not, does forward
differencing.
Returns
-------
grad : array
gradient or Jacobian
Notes
-----
If f returns a 1d array, it returns a Jacobian. If a 2d array is returned
by f (e.g., with a value for each observation), it returns a 3d array
with the Jacobian of each observation with shape xk x nobs x xk. I.e.,
the Jacobian of the first observation would be [:, 0, :]
'''
n = len(x)
#TODO: add scaled stepsize
f0 = f(*((x,)+args), **kwargs)
dim = np.atleast_1d(f0).shape # it could be a scalar
grad = np.zeros((n,) + dim, float)
ei = np.zeros((n,), float)
if not centered:
epsilon = _get_epsilon(x, 2, epsilon, n)
for k in range(n):
ei[k] = epsilon[k]
grad[k,:] = (f(*((x+ei,)+args), **kwargs) - f0)/epsilon[k]
ei[k] = 0.0
else:
epsilon = _get_epsilon(x, 3, epsilon, n) / 2.
for k in range(len(x)):
ei[k] = epsilon[k]
grad[k,:] = (f(*((x+ei,)+args), **kwargs) - \
f(*((x-ei,)+args), **kwargs))/(2 * epsilon[k])
ei[k] = 0.0
return grad.squeeze().T
def approx_fprime_cs(x, f, epsilon=None, args=(), kwargs={}):
'''
Calculate gradient or Jacobian with complex step derivative approximation
Parameters
----------
x : array
parameters at which the derivative is evaluated
f : function
`f(*((x,)+args), **kwargs)` returning either one value or 1d array
epsilon : float, optional
Stepsize, if None, optimal stepsize is used. Optimal step-size is
EPS*x. See note.
args : tuple
Tuple of additional arguments for function `f`.
kwargs : dict
Dictionary of additional keyword arguments for function `f`.
Returns
-------
partials : ndarray
array of partial derivatives, Gradient or Jacobian
Notes
-----
The complex-step derivative has truncation error O(epsilon**2), so
truncation error can be eliminated by choosing epsilon to be very small.
The complex-step derivative avoids the problem of round-off error with
small epsilon because there is no subtraction.
'''
#From Guilherme P. de Freitas, numpy mailing list
#May 04 2010 thread "Improvement of performance"
#http://mail.scipy.org/pipermail/numpy-discussion/2010-May/050250.html
n = len(x)
epsilon = _get_epsilon(x, 1, epsilon, n)
increments = np.identity(n) * 1j * epsilon
#TODO: see if this can be vectorized, but usually dim is small
partials = [f(x+ih, *args, **kwargs).imag / epsilon[i]
for i,ih in enumerate(increments)]
return np.array(partials).T
def approx_hess_cs(x, f, epsilon=None, args=(), kwargs={}):
'''Calculate Hessian with complex-step derivative approximation
Parameters
----------
x : array_like
value at which function derivative is evaluated
f : function
function of one array f(x)
epsilon : float
stepsize, if None, then stepsize is automatically chosen
Returns
-------
hess : ndarray
array of partial second derivatives, Hessian
Notes
-----
based on equation 10 in
M. S. RIDOUT: Statistical Applications of the Complex-step Method
of Numerical Differentiation, University of Kent, Canterbury, Kent, U.K.
The stepsize is the same for the complex and the finite difference part.
'''
#TODO: might want to consider lowering the step for pure derivatives
n = len(x)
h = _get_epsilon(x, 3, epsilon, n)
ee = np.diag(h)
hess = np.outer(h,h)
n = len(x)
for i in range(n):
for j in range(i,n):
hess[i,j] = (f(*((x + 1j*ee[i,:] + ee[j,:],)+args), **kwargs)
- f(*((x + 1j*ee[i,:] - ee[j,:],)+args), **kwargs)).imag/\
2./hess[i,j]
hess[j,i] = hess[i,j]
return hess
approx_hess_cs.__doc__ = "Calculate Hessian with complex-step derivative " +\
"approximation\n" +\
"\n".join(_hessian_docs.split("\n")[1:]) % dict(
scale="3", extra_params="",
extra_returns="", equation_number="10",
equation = """1/(2*d_j*d_k) * imag(f(x + i*d[j]*e[j] + d[k]*e[k]) -
f(x + i*d[j]*e[j] - d[k]*e[k]))
""")
def approx_hess1(x, f, epsilon=None, args=(), kwargs={}, return_grad=False):
n = len(x)
h = _get_epsilon(x, 3, epsilon, n)
ee = np.diag(h)
f0 = f(*((x,)+args), **kwargs)
# Compute forward step
g = np.zeros(n);
for i in range(n):
g[i] = f(*((x+ee[i,:],)+args), **kwargs)
hess = np.outer(h,h) # this is now epsilon**2
# Compute "double" forward step
for i in range(n):
for j in range(i,n):
hess[i,j] = (f(*((x + ee[i,:] + ee[j,:],) + args), **kwargs) - \
g[i] - g[j] + f0)/hess[i,j]
hess[j,i] = hess[i,j]
if return_grad:
grad = (g - f0)/h
return hess, grad
else:
return hess
approx_hess1.__doc__ = _hessian_docs % dict(scale="3",
extra_params = """return_grad : bool
Whether or not to also return the gradient
""",
extra_returns = """grad : nparray
Gradient if return_grad == True
""",
equation_number = "7",
equation = """1/(d_j*d_k) * ((f(x + d[j]*e[j] + d[k]*e[k]) - f(x + d[j]*e[j])))
""")
def approx_hess2(x, f, epsilon=None, args=(), kwargs={}, return_grad=False):
#
n = len(x)
#NOTE: ridout suggesting using eps**(1/4)*theta
h = _get_epsilon(x, 3, epsilon, n)
ee = np.diag(h)
f0 = f(*((x,)+args), **kwargs)
# Compute forward step
g = np.zeros(n)
gg = np.zeros(n)
for i in range(n):
g[i] = f(*((x+ee[i,:],)+args), **kwargs)
gg[i] = f(*((x-ee[i,:],)+args), **kwargs)
hess = np.outer(h,h) # this is now epsilon**2
# Compute "double" forward step
for i in range(n):
for j in range(i,n):
hess[i,j] = (f(*((x + ee[i,:] + ee[j,:],) + args), **kwargs) - \
g[i] - g[j] + f0 + \
f(*((x - ee[i,:] - ee[j,:],) + args), **kwargs) - \
gg[i] - gg[j] + f0
)/(2*hess[i,j])
hess[j,i] = hess[i,j]
if return_grad:
grad = (g - f0)/h
return hess, grad
else:
return hess
approx_hess2.__doc__ = _hessian_docs % dict(scale="3",
extra_params = """return_grad : bool
Whether or not to also return the gradient
""",
extra_returns = """grad : nparray
Gradient if return_grad == True
""",
equation_number = "8",
equation = """1/(2*d_j*d_k) * ((f(x + d[j]*e[j] + d[k]*e[k]) - f(x + d[j]*e[j])) -
(f(x + d[k]*e[k]) - f(x)) +
(f(x - d[j]*e[j] - d[k]*e[k]) - f(x + d[j]*e[j])) -
(f(x - d[k]*e[k]) - f(x)))
""")
def approx_hess3(x, f, epsilon=None, args=(), kwargs={}):
n = len(x)
h = _get_epsilon(x, 4, epsilon, n)
ee = np.diag(h)
hess = np.outer(h,h)
for i in range(n):
for j in range(i,n):
hess[i,j] = (f(*((x + ee[i,:] + ee[j,:],)+args), **kwargs)
- f(*((x + ee[i,:] - ee[j,:],)+args), **kwargs)
- (f(*((x - ee[i,:] + ee[j,:],)+args), **kwargs)
- f(*((x - ee[i,:] - ee[j,:],)+args), **kwargs),)
)/(4.*hess[i,j])
hess[j,i] = hess[i,j]
return hess
approx_hess3.__doc__ = _hessian_docs % dict(scale="4", extra_params="",
extra_returns="", equation_number = "9",
equation = """1/(4*d_j*d_k) * ((f(x + d[j]*e[j] + d[k]*e[k]) - f(x + d[j]*e[j]
- d[k]*e[k])) -
(f(x - d[j]*e[j] + d[k]*e[k]) - f(x - d[j]*e[j]
- d[k]*e[k]))""")
approx_hess = approx_hess3
approx_hess.__doc__ += "\n This is an alias for approx_hess3"
if __name__ == '__main__': #pragma : no cover
import statsmodels.api as sm
from scipy.optimize.optimize import approx_fhess_p
import numpy as np
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog, prepend=False)
mod = sm.Probit(data.endog, data.exog)
res = mod.fit(method="newton")
test_params = [1,0.25,1.4,-7]
llf = mod.loglike
score = mod.score
hess = mod.hessian
# below is Josef's scratch work
def approx_hess_cs_old(x, func, args=(), h=1.0e-20, epsilon=1e-6):
def grad(x):
return approx_fprime_cs(x, func, args=args, h=1.0e-20)
#Hessian from gradient:
return (approx_fprime(x, grad, epsilon)
+ approx_fprime(x, grad, -epsilon))/2.
def fun(beta, x):
return np.dot(x, beta).sum(0)
def fun1(beta, y, x):
#print(beta.shape, x.shape)
xb = np.dot(x, beta)
return (y-xb)**2 #(xb-xb.mean(0))**2
def fun2(beta, y, x):
#print(beta.shape, x.shape)
return fun1(beta, y, x).sum(0)
nobs = 200
x = np.arange(nobs*3).reshape(nobs,-1)
x = np.random.randn(nobs,3)
xk = np.array([1,2,3])
xk = np.array([1.,1.,1.])
#xk = np.zeros(3)
beta = xk
y = np.dot(x, beta) + 0.1*np.random.randn(nobs)
xk = np.dot(np.linalg.pinv(x),y)
epsilon = 1e-6
args = (y,x)
from scipy import optimize
xfmin = optimize.fmin(fun2, (0,0,0), args)
print(approx_fprime((1,2,3),fun,epsilon,x))
jac = approx_fprime(xk,fun1,epsilon,args)
jacmin = approx_fprime(xk,fun1,-epsilon,args)
#print(jac)
print(jac.sum(0))
print('\nnp.dot(jac.T, jac)')
print(np.dot(jac.T, jac))
print('\n2*np.dot(x.T, x)')
print(2*np.dot(x.T, x))
jac2 = (jac+jacmin)/2.
print(np.dot(jac2.T, jac2))
#he = approx_hess(xk,fun2,epsilon,*args)
print(approx_hess_old(xk,fun2,1e-3,args))
he = approx_hess_old(xk,fun2,None,args)
print('hessfd')
print(he)
print('epsilon =', None)
print(he[0] - 2*np.dot(x.T, x))
for eps in [1e-3,1e-4,1e-5,1e-6]:
print('eps =', eps)
print(approx_hess_old(xk,fun2,eps,args)[0] - 2*np.dot(x.T, x))
hcs2 = approx_hess_cs(xk,fun2,args=args)
print('hcs2')
print(hcs2 - 2*np.dot(x.T, x))
hfd3 = approx_hess(xk,fun2,args=args)
print('hfd3')
print(hfd3 - 2*np.dot(x.T, x))
import numdifftools as nd
hnd = nd.Hessian(lambda a: fun2(a, y, x))
hessnd = hnd(xk)
print('numdiff')
print(hessnd - 2*np.dot(x.T, x))
#assert_almost_equal(hessnd, he[0])
gnd = nd.Gradient(lambda a: fun2(a, y, x))
gradnd = gnd(xk)
| |
"""Copy number detection with CNVkit with specific support for targeted sequencing.
http://cnvkit.readthedocs.org
"""
import copy
import os
import sys
import tempfile
import pybedtools
import numpy as np
import toolz as tz
from bcbio import install, utils
from bcbio.bam import ref
from bcbio.distributed.multi import run_multicore, zeromq_aware_logging
from bcbio.distributed.transaction import file_transaction
from bcbio.heterogeneity import chromhacks
from bcbio.pipeline import datadict as dd
from bcbio.pipeline import config_utils
from bcbio.variation import bedutils, effects, vcfutils
from bcbio.provenance import do
from bcbio.structural import annotate, shared, plot
def run(items, background=None):
"""Detect copy number variations from batched set of samples using CNVkit.
"""
if not background: background = []
return _cnvkit_by_type(items, background)
def _sv_workdir(data):
return utils.safe_makedir(os.path.join(data["dirs"]["work"], "structural",
dd.get_sample_name(data), "cnvkit"))
def _cnvkit_by_type(items, background):
"""Dispatch to specific CNVkit functionality based on input type.
"""
if len(items + background) == 1:
return _run_cnvkit_single(items[0])
elif vcfutils.get_paired_phenotype(items[0]):
return _run_cnvkit_cancer(items, background)
else:
return _run_cnvkit_population(items, background)
def _associate_cnvkit_out(ckouts, items):
"""Associate cnvkit output with individual items.
"""
assert len(ckouts) == len(items)
out = []
for ckout, data in zip(ckouts, items):
ckout = copy.deepcopy(ckout)
ckout["variantcaller"] = "cnvkit"
if utils.file_exists(ckout["cns"]):
ckout = _add_seg_to_output(ckout, data)
ckout = _add_gainloss_to_output(ckout, data)
ckout = _add_segmetrics_to_output(ckout, data)
ckout = _add_variantcalls_to_output(ckout, data)
# ckout = _add_coverage_bedgraph_to_output(ckout, data)
ckout = _add_cnr_bedgraph_and_bed_to_output(ckout, data)
if "svplots" in dd.get_tools_on(data):
ckout = _add_plots_to_output(ckout, data)
if "sv" not in data:
data["sv"] = []
data["sv"].append(ckout)
out.append(data)
return out
def _run_cnvkit_single(data, background=None):
"""Process a single input file with BAM or uniform background.
"""
work_dir = _sv_workdir(data)
test_bams = [data["align_bam"]]
if background:
background_bams = [x["align_bam"] for x in background]
background_name = os.path.splitext(os.path.basename(background_bams[0]))[0]
else:
background_bams = []
background_name = None
ckouts = _run_cnvkit_shared([data], test_bams, background_bams, work_dir,
background_name=background_name)
if not ckouts:
return [data]
else:
assert len(ckouts) == 1
return _associate_cnvkit_out(ckouts, [data])
def _run_cnvkit_cancer(items, background):
"""Run CNVkit on a tumor/normal pair.
"""
paired = vcfutils.get_paired_bams([x["align_bam"] for x in items], items)
work_dir = _sv_workdir(paired.tumor_data)
ckouts = _run_cnvkit_shared([paired.tumor_data], [paired.tumor_bam], [paired.normal_bam],
work_dir, background_name=paired.normal_name)
if not ckouts:
return items
assert len(ckouts) == 1
tumor_data = _associate_cnvkit_out(ckouts, [paired.tumor_data])
normal_data = [x for x in items if dd.get_sample_name(x) != paired.tumor_name]
return tumor_data + normal_data
def _run_cnvkit_population(items, background):
"""Run CNVkit on a population of samples.
Tries to calculate background based on case/controls, otherwise uses
a flat background for each sample and calls independently.
"""
assert not background
inputs, background = shared.find_case_control(items)
work_dir = _sv_workdir(inputs[0])
ckouts = _run_cnvkit_shared(inputs, [x["align_bam"] for x in inputs],
[x["align_bam"] for x in background], work_dir,
background_name=dd.get_sample_name(background[0]) if len(background) > 0 else None)
return _associate_cnvkit_out(ckouts, inputs) + background
def _get_cmd():
return os.path.join(os.path.dirname(sys.executable), "cnvkit.py")
def _bam_to_outbase(bam_file, work_dir):
"""Convert an input BAM file into CNVkit expected output.
"""
out_base = os.path.splitext(os.path.basename(bam_file))[0].split(".")[0]
return os.path.join(work_dir, out_base)
def _run_cnvkit_shared(items, test_bams, background_bams, work_dir, background_name=None):
"""Shared functionality to run CNVkit, parallelizing over multiple BAM files.
"""
raw_work_dir = utils.safe_makedir(os.path.join(work_dir, "raw"))
background_cnn = os.path.join(raw_work_dir,
"%s_background.cnn" % (background_name if background_name else "flat"))
ckouts = []
for test_bam in test_bams:
out_base = _bam_to_outbase(test_bam, raw_work_dir)
ckouts.append({"cnr": "%s.cns" % out_base,
"cns": "%s.cns" % out_base,
"back_cnn": background_cnn})
if not utils.file_exists(ckouts[0]["cnr"]):
data = items[0]
cov_interval = dd.get_coverage_interval(data)
raw_target_bed, access_bed = _get_target_access_files(cov_interval, data, work_dir)
# bail out if we ended up with no regions
if not utils.file_exists(raw_target_bed):
return {}
raw_target_bed = annotate.add_genes(raw_target_bed, data)
parallel = {"type": "local", "cores": dd.get_cores(data), "progs": ["cnvkit"]}
target_bed, antitarget_bed = _cnvkit_targets(raw_target_bed, access_bed, cov_interval, raw_work_dir, data)
def _bam_to_itype(bam):
return "background" if bam in background_bams else "evaluate"
split_cnns = run_multicore(_cnvkit_coverage,
[(bam, bed, _bam_to_itype(bam), raw_work_dir, data)
for bam in test_bams + background_bams
for bed in _split_bed(target_bed, data) + _split_bed(antitarget_bed, data)],
data["config"], parallel)
coverage_cnns = _merge_coverage(split_cnns, data)
background_cnn = _cnvkit_background([x["file"] for x in coverage_cnns if x["itype"] == "background"],
background_cnn, target_bed, antitarget_bed, data)
fixed_cnrs = run_multicore(_cnvkit_fix,
[(cnns, background_cnn, data) for cnns in
tz.groupby("bam", [x for x in coverage_cnns
if x["itype"] == "evaluate"]).values()],
data["config"], parallel)
called_segs = run_multicore(_cnvkit_segment,
[(cnr, cov_interval, data) for cnr in fixed_cnrs],
data["config"], parallel)
return ckouts
@utils.map_wrap
@zeromq_aware_logging
def _cnvkit_segment(cnr_file, cov_interval, data):
"""Perform segmentation and copy number calling on normalized inputs
"""
out_file = "%s.cns" % os.path.splitext(cnr_file)[0]
if not utils.file_uptodate(out_file, cnr_file):
with file_transaction(data, out_file) as tx_out_file:
local_sitelib = os.path.join(install.get_defaults().get("tooldir", "/usr/local"),
"lib", "R", "site-library")
cmd = [_get_cmd(), "segment", "-o", tx_out_file, "--rlibpath", local_sitelib, cnr_file]
if cov_interval == "genome":
cmd += ["--threshold", "0.00001"]
# preferentially use conda installed Rscript
export_cmd = "unset R_HOME && export PATH=%s:$PATH && " % os.path.dirname(utils.Rscript_cmd())
do.run(export_cmd + " ".join(cmd), "CNVkit segment")
return out_file
@utils.map_wrap
@zeromq_aware_logging
def _cnvkit_fix(cnns, background_cnn, data):
"""Normalize samples, correcting sources of bias.
"""
assert len(cnns) == 2, "Expected target and antitarget CNNs: %s" % cnns
target_cnn = [x["file"] for x in cnns if x["cnntype"] == "target"][0]
antitarget_cnn = [x["file"] for x in cnns if x["cnntype"] == "antitarget"][0]
out_file = "%scnr" % os.path.commonprefix([target_cnn, antitarget_cnn])
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
cmd = [_get_cmd(), "fix", "-o", tx_out_file, target_cnn, antitarget_cnn, background_cnn]
do.run(cmd, "CNVkit fix")
return [out_file]
def _cnvkit_background(background_cnns, out_file, target_bed, antitarget_bed, data):
"""Calculate background reference, handling flat case with no normal sample.
"""
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
cmd = [_get_cmd(), "reference", "-f", dd.get_ref_file(data), "-o", tx_out_file]
if len(background_cnns) == 0:
cmd += ["-t", target_bed, "-a", antitarget_bed]
else:
cmd += background_cnns
do.run(cmd, "CNVkit background")
return out_file
def _split_bed(bed_input, data):
"""Split BED file into sections for processing, allowing better multicore usage.
"""
split_lines = 100000
split_info = []
base, ext = os.path.splitext(bed_input)
base, ext2 = os.path.splitext(base)
ext = ext2 + ext
with open(bed_input) as in_handle:
for cur_index, line_group in enumerate(tz.partition_all(split_lines, in_handle)):
cur_file = "%s-%s%s" % (base, cur_index, ext)
if not utils.file_uptodate(cur_file, bed_input):
with file_transaction(data, cur_file) as tx_out_file:
with open(tx_out_file, "w") as out_handle:
for line in line_group:
out_handle.write(line)
split_info.append({"i": cur_index, "orig": bed_input, "file": cur_file})
if not split_info: # empty input file
split_info.append({"file": bed_input, "orig": bed_input})
return split_info
def _merge_coverage(cnns, data):
"""Merge split CNN outputs into final consolidated output.
"""
out = []
for (out_file, _), members in tz.groupby(lambda x: (x["final_out"], x["bed_orig"]), cnns).items():
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
with open(tx_out_file, "w") as out_handle:
for i, in_file in enumerate([x["file"] for x in sorted(members, key=lambda x: x["bed_i"])]):
with open(in_file) as in_handle:
header = in_handle.readline()
if i == 0:
out_handle.write(header)
for line in in_handle:
out_handle.write(line)
base = copy.deepcopy(members[0])
base = tz.dissoc(base, "final_out", "bed_i", "bed_orig")
base["file"] = out_file
out.append(base)
return out
@utils.map_wrap
@zeromq_aware_logging
def _cnvkit_coverage(bam_file, bed_info, input_type, work_dir, data):
"""Calculate coverage in a BED file for CNVkit.
"""
bed_file = bed_info["file"]
exts = {".target.bed": ("target", "targetcoverage.cnn"),
".antitarget.bed": ("antitarget", "antitargetcoverage.cnn")}
assert bed_file.endswith(tuple(exts.keys())), "Unexpected BED file extension for coverage %s" % bed_file
for orig, (cnntype, ext) in exts.items():
if bed_file.endswith(orig):
break
base = _bam_to_outbase(bam_file, work_dir)
merged_out_file = "%s.%s" % (base, ext)
out_file = "%s-%s.%s" % (base, bed_info["i"], ext) if "i" in bed_info else merged_out_file
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
cmd = [_get_cmd(), "coverage", bam_file, bed_file, "-o", tx_out_file]
do.run(cmd, "CNVkit coverage")
return [{"itype": input_type, "file": out_file, "bam": bam_file, "cnntype": cnntype,
"final_out": merged_out_file, "bed_i": bed_info.get("i"), "bed_orig": bed_info["orig"]}]
def _cnvkit_targets(raw_target_bed, access_bed, cov_interval, work_dir, data):
"""Create target and antitarget regions from target and access files.
"""
target_bed = os.path.join(work_dir, "%s.target.bed" % os.path.splitext(os.path.basename(raw_target_bed))[0])
if not utils.file_uptodate(target_bed, raw_target_bed):
with file_transaction(data, target_bed) as tx_out_file:
cmd = [_get_cmd(), "target", raw_target_bed, "--split", "-o", tx_out_file]
if cov_interval == "genome":
cmd += ["--avg-size", "500"]
do.run(cmd, "CNVkit target")
antitarget_bed = os.path.join(work_dir, "%s.antitarget.bed" % os.path.splitext(os.path.basename(raw_target_bed))[0])
if not os.path.exists(antitarget_bed):
with file_transaction(data, antitarget_bed) as tx_out_file:
cmd = [_get_cmd(), "antitarget", "-g", access_bed, target_bed, "-o", tx_out_file]
do.run(cmd, "CNVkit antitarget")
return target_bed, antitarget_bed
def _get_target_access_files(cov_interval, data, work_dir):
"""Retrieve target and access files based on the type of data to process.
pick targets, anti-targets and access files based on analysis type
http://cnvkit.readthedocs.org/en/latest/nonhybrid.html
"""
base_regions = shared.get_base_cnv_regions(data, work_dir)
target_bed = bedutils.merge_overlaps(base_regions, data, out_dir=work_dir)
if cov_interval == "amplicon":
return target_bed, target_bed
elif cov_interval == "genome":
return target_bed, target_bed
else:
access_file = _create_access_file(dd.get_ref_file(data), _sv_workdir(data), data)
return target_bed, access_file
def _add_seg_to_output(out, data):
"""Export outputs to 'seg' format compatible with IGV and GenePattern.
"""
out_file = "%s.seg" % os.path.splitext(out["cns"])[0]
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
cmd = [os.path.join(os.path.dirname(sys.executable), "cnvkit.py"), "export",
"seg", "-o", tx_out_file, out["cns"]]
do.run(cmd, "CNVkit export seg")
out["seg"] = out_file
return out
def _add_cnr_bedgraph_and_bed_to_output(out, data):
cnr_file = out["cnr"]
bedgraph_file = cnr_file + ".bedgraph"
if not utils.file_exists(bedgraph_file):
with file_transaction(data, bedgraph_file) as tx_out_file:
cmd = "sed 1d {cnr_file} | cut -f1,2,3,5 > {tx_out_file}"
do.run(cmd.format(**locals()), "Converting cnr to bedgraph format")
out["cnr_bedgraph"] = bedgraph_file
bed_file = cnr_file + ".bed"
if not utils.file_exists(bed_file):
with file_transaction(data, bed_file) as tx_out_file:
cmd = "sed 1d {cnr_file} | cut -f1,2,3,4,5 > {tx_out_file}"
do.run(cmd.format(**locals()), "Converting cnr to bed format")
out["cnr_bed"] = bed_file
return out
def _add_variantcalls_to_output(out, data):
"""Call ploidy and convert into VCF and BED representations.
"""
call_file = "%s-call%s" % os.path.splitext(out["cns"])
gender = dd.get_gender(data)
if not utils.file_exists(call_file):
with file_transaction(data, call_file) as tx_call_file:
cmd = [os.path.join(os.path.dirname(sys.executable), "cnvkit.py"), "call",
"--ploidy", str(dd.get_ploidy(data)),
"-o", tx_call_file, out["cns"]]
if gender:
cmd += ["--gender", gender]
if gender.lower() == "male":
cmd += ["--male-reference"]
do.run(cmd, "CNVkit call ploidy")
calls = {}
for outformat in ["bed", "vcf"]:
out_file = "%s.%s" % (os.path.splitext(call_file)[0], outformat)
calls[outformat] = out_file
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
cmd = [os.path.join(os.path.dirname(sys.executable), "cnvkit.py"), "export",
outformat, "--sample-id", dd.get_sample_name(data),
"--ploidy", str(dd.get_ploidy(data)),
"-o", tx_out_file, call_file]
if gender and gender.lower() == "male":
cmd += ["--male-reference"]
do.run(cmd, "CNVkit export %s" % outformat)
out["call_file"] = call_file
out["vrn_bed"] = annotate.add_genes(calls["bed"], data)
effects_vcf, _ = effects.add_to_vcf(calls["vcf"], data, "snpeff")
out["vrn_file"] = effects_vcf or calls["vcf"]
return out
def _add_segmetrics_to_output(out, data):
"""Add metrics for measuring reliability of CNV estimates.
"""
out_file = "%s-segmetrics.txt" % os.path.splitext(out["cns"])[0]
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
cmd = [os.path.join(os.path.dirname(sys.executable), "cnvkit.py"), "segmetrics",
"--iqr", "--ci", "--pi",
"-s", out["cns"], "-o", tx_out_file, out["cnr"]]
do.run(cmd, "CNVkit segmetrics")
out["segmetrics"] = out_file
return out
def _add_gainloss_to_output(out, data):
"""Add gainloss based on genes, helpful for identifying changes in smaller genes.
"""
out_file = "%s-gainloss.txt" % os.path.splitext(out["cns"])[0]
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
cmd = [os.path.join(os.path.dirname(sys.executable), "cnvkit.py"), "gainloss",
"-s", out["cns"], "-o", tx_out_file, out["cnr"]]
do.run(cmd, "CNVkit gainloss")
out["gainloss"] = out_file
return out
def _add_coverage_bedgraph_to_output(out, data):
"""Add BedGraph representation of coverage to the output
"""
out_file = "%s.coverage.bedgraph" % os.path.splitext(out["cns"])[0]
if utils.file_exists(out_file):
out["bedgraph"] = out_file
return out
bam_file = dd.get_align_bam(data)
bedtools = config_utils.get_program("bedtools", data["config"])
samtools = config_utils.get_program("samtools", data["config"])
cns_file = out["cns"]
bed_file = tempfile.NamedTemporaryFile(suffix=".bed", delete=False).name
with file_transaction(data, out_file) as tx_out_file:
cmd = ("sed 1d {cns_file} | cut -f1,2,3 > {bed_file}; "
"{samtools} view -b -L {bed_file} {bam_file} | "
"{bedtools} genomecov -bg -ibam - -g {bed_file} >"
"{tx_out_file}").format(**locals())
do.run(cmd, "CNVkit bedGraph conversion")
os.remove(bed_file)
out["bedgraph"] = out_file
return out
def _add_plots_to_output(out, data):
"""Add CNVkit plots summarizing called copy number values.
"""
out["plot"] = {}
diagram_plot = _add_diagram_plot(out, data)
if diagram_plot:
out["plot"]["diagram"] = diagram_plot
loh_plot = _add_loh_plot(out, data)
if loh_plot:
out["plot"]["loh"] = loh_plot
scatter = _add_scatter_plot(out, data)
if scatter:
out["plot"]["scatter"] = scatter
scatter_global = _add_global_scatter_plot(out, data)
if scatter_global:
out["plot"]["scatter_global"] = scatter_global
return out
def _get_larger_chroms(ref_file):
"""Retrieve larger chromosomes, avoiding the smaller ones for plotting.
"""
from scipy.cluster.vq import kmeans, vq
all_sizes = []
for c in ref.file_contigs(ref_file):
all_sizes.append(float(c.size))
all_sizes.sort()
# separate out smaller chromosomes and haplotypes with kmeans
centroids, _ = kmeans(np.array(all_sizes), 2)
idx, _ = vq(np.array(all_sizes), centroids)
little_sizes = tz.first(tz.partitionby(lambda xs: xs[0], zip(idx, all_sizes)))
little_sizes = [x[1] for x in little_sizes]
# create one more cluster with the smaller, removing the haplotypes
centroids2, _ = kmeans(np.array(little_sizes), 2)
idx2, _ = vq(np.array(little_sizes), centroids2)
little_sizes2 = tz.first(tz.partitionby(lambda xs: xs[0], zip(idx2, little_sizes)))
little_sizes2 = [x[1] for x in little_sizes2]
# get any chromosomes not in haplotype/random bin
thresh = max(little_sizes2)
larger_chroms = []
for c in ref.file_contigs(ref_file):
if c.size > thresh:
larger_chroms.append(c.name)
return larger_chroms
def _remove_haplotype_chroms(in_file, data):
"""Remove shorter haplotype chromosomes from cns/cnr files for plotting.
"""
larger_chroms = set(_get_larger_chroms(dd.get_ref_file(data)))
out_file = "%s-chromfilter%s" % utils.splitext_plus(in_file)
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
with open(in_file) as in_handle:
with open(tx_out_file, "w") as out_handle:
for line in in_handle:
if line.startswith("chromosome") or line.split()[0] in larger_chroms:
out_handle.write(line)
return out_file
def _add_global_scatter_plot(out, data):
out_file = "%s-scatter_global.pdf" % os.path.splitext(out["cnr"])[0]
if utils.file_exists(out_file):
return out_file
cnr = _remove_haplotype_chroms(out["cnr"], data)
cns = _remove_haplotype_chroms(out["cns"], data)
with file_transaction(data, out_file) as tx_out_file:
cmd = [_get_cmd(), "scatter", "-s", cns, "-o", tx_out_file, cnr]
do.run(cmd, "CNVkit global scatter plot")
return out_file
def _add_scatter_plot(out, data):
out_file = "%s-scatter.pdf" % os.path.splitext(out["cnr"])[0]
priority_bed = dd.get_svprioritize(data)
if not priority_bed:
return None
priority_bed = plot._prioritize_plot_regions(pybedtools.BedTool(priority_bed), data, os.path.dirname(out_file))
if utils.file_exists(out_file):
return out_file
cnr = _remove_haplotype_chroms(out["cnr"], data)
cns = _remove_haplotype_chroms(out["cns"], data)
with file_transaction(data, out_file) as tx_out_file:
cmd = [_get_cmd(), "scatter", "-s", cns, "-o", tx_out_file, "-l",
priority_bed, cnr]
do.run(cmd, "CNVkit scatter plot")
return out_file
def _cnx_is_empty(in_file):
"""Check if cnr or cns files are empty (only have a header)
"""
with open(in_file) as in_handle:
for i, line in enumerate(in_handle):
if i > 0:
return False
return True
def _add_diagram_plot(out, data):
out_file = "%s-diagram.pdf" % os.path.splitext(out["cnr"])[0]
cnr = _remove_haplotype_chroms(out["cnr"], data)
cns = _remove_haplotype_chroms(out["cns"], data)
if _cnx_is_empty(cnr) or _cnx_is_empty(cns):
return None
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
cmd = [_get_cmd(), "diagram", "-s", cns,
"-o", tx_out_file, cnr]
gender = dd.get_gender(data)
if gender and gender.lower() == "male":
cmd += ["--male-reference"]
do.run(cmd, "CNVkit diagram plot")
return out_file
def _add_loh_plot(out, data):
vrn_files = filter(lambda x: x is not None, [x.get("vrn_file") for x in data.get("variants", [])])
if len(vrn_files) > 0:
out_file = "%s-loh.pdf" % os.path.splitext(out["cnr"])[0]
cns = _remove_haplotype_chroms(out["cns"], data)
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
cmd = [_get_cmd(), "loh", "-t", "-s", cns,
"-o", tx_out_file, vrn_files[0]]
do.run(cmd, "CNVkit diagram plot")
return out_file
def _create_access_file(ref_file, out_dir, data):
"""Create genome access file for CNVlib to define available genomic regions.
XXX Can move to installation/upgrade process if too slow here.
"""
out_file = os.path.join(out_dir, "%s-access.bed" % os.path.splitext(os.path.basename(ref_file))[0])
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
cmd = [_get_cmd(), "access",
ref_file, "-s", "10000", "-o", tx_out_file]
do.run(cmd, "Create CNVkit access file")
return out_file
# ## Theta support
def export_theta(ckout, data):
"""Provide updated set of data with export information for TheTA2 input.
"""
cns_file = chromhacks.bed_to_standardonly(ckout["cns"], data, headers="chromosome")
cnr_file = chromhacks.bed_to_standardonly(ckout["cnr"], data, headers="chromosome")
out_file = "%s-theta.input" % utils.splitext_plus(cns_file)[0]
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
cmd = [_get_cmd(), "export", "theta", cns_file, cnr_file, "-o", tx_out_file]
do.run(cmd, "Export CNVkit calls as inputs for TheTA2")
ckout["theta_input"] = out_file
return ckout
| |
#!/usr/bin/python
# Copyright 2013 WibiData, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
kijistats aggregates the data collected by profiling KijiSchema and
KijiMR during a MapReduce job. The KijiMR-profiling jar generates
a file for every task attempt, whose format is as follows:
"Job Name, Job ID, Task Attempt, Function Signature,
Aggregate Time (nanoseconds), Number of Invocations,
Time per call (nanoseconds)\n"
A user may want to aggregate these numbers based on JobId to get a
picture at job-level instead of task attempt granularity.
The stats folder is under the job's working directory in HDFS. You will
need to copy this to your local filesystem before passing it as input
to this command.
This should be run as:
./kijistats.py [..arguments]
On MacOSX, you may need to run the script as
arch -i386 /usr/bin/python ./kijistats.py [..arguments]
Use the -h option to see the various ways in which the profiling stats
can be aggregated.
The output can be stored as a csv file or displayed as bar graphs of
aggregate times spent in functions, number of invocations of functions
and average time per function call for functions. This will be per task
attempt or per function based on context.
NOTE: you may need to install some python libraries if they do not exist
on your system. (You can use either pip or easy_install for this.)
pip install matplotlib
pip install numpy
'''
import argparse
from collections import defaultdict
import matplotlib.pyplot as plt
import os
import re
import sys
"""
This script parses comma delimited files. The various fields are defined positionally below.
"""
JOB_NAME = 0
JOB_ID = 1
TASK_ATTEMPT = 2
FUNC_SIG = 3
AGGREGATE_TIME = 4
INVOCATIONS = 5
PER_CALL_TIME = 6
"""
Graph colors
"""
# Color for aggregate time spent in function in the output graph
COLOR_AGGR = '#cc99ff'
# Color of number of invocations of function in the output graph
COLOR_INV = '#ff99ff'
# Color of average time spent per function call in the output graph
COLOR_PCT = '#cc0099'
def BuildFlagParser():
"""Flag parsers for the Kiji stats tool.
Returns:
Command-line flag parser.
"""
parser = argparse.ArgumentParser(
description='Kiji Stats tool to analyze profiling data.'
)
parser.add_argument(
'--stats-dir',
dest='stats_dir',
type=str,
default=os.getcwd(),
help='Local directory where profiling data is stored. ' +
'Usually called `kijistats` in the working directory of the MapReduce job. ' +
'You will need to copy this directory on hdfs to your local filesystem and supply it.'
)
jobgrp = parser.add_mutually_exclusive_group(required=False)
jobgrp.add_argument(
'--by-job',
dest='job',
type=str,
help='Job ID for which to collect stats.'
)
jobgrp.add_argument(
'--by-jobname',
dest='jobname',
type=str,
help='Name of the job for which to collect stats. This will be ignored if' +
' the job ID has been specified.' +
' Note that multiple jobs may have the same name. Example: MapFamilyGatherer.'
)
jobgrp.add_argument(
'--by-task',
dest='taskid',
type=str,
help='Task attempt ID for which to collect stats.'
)
parser.add_argument(
'--by-function',
dest='function_name',
type=str,
help='Function for which to collect stats. Can be used to collect stats about' +
' this function across all task attempts or jobs or for a single task or job.'
)
grp = parser.add_mutually_exclusive_group(required=True)
grp.add_argument('--as-graph', action='store_true')
grp.add_argument('--to-file', type=str,
help='File name to store aggregated result.')
return parser
'''
This function filters the input files based on the options specified by the user.
The user may aggregate by jobID, job name, task ID, function name or a combination
where applicable. (Use kijistats -h for more information.). Before we aggregate the
numbers, we would like only the relevant data to be present.
The output is a list of select lines from the input file that match the filtering
criteria, but have now been split on ", "
E.g.
[
[job id, job name ...]
[job id, job name...]
]
You can use the above defined positional constants like JOB_NAME to index into each
line.
'''
def gatherData(flags):
stats_dir = flags.stats_dir
taskid = flags.taskid
jobid = flags.job
jobname = flags.jobname
function_name = flags.function_name
profile_data = []
if not taskid:
# Aggregate all files under the stats directory
for filename in os.listdir(stats_dir):
filename = os.path.join(stats_dir, filename)
if os.path.isfile(filename) and not filename.startswith(".") \
and not filename.startswith("_") and not filename.endswith("crc"):
with open(filename) as f:
for line in f.readlines():
if line.startswith('Job Name, Job ID,'):
continue
# We split first so that we dont accidentally match strings like
# "FooTestingGatherer" to "TestingGatherer" while filtering.
# We need regular expressions because otherwise we might
# split on commas in function signatures
# This regular expression splits on comma only if the comma is not
# present in the middle of parentheses. The regex matches:
# - non-comma, non-open-paren characters
# - strings that start with an open paren, contain 0 or more
# non-close-parens, and then a close paren
r = re.compile(r'(?:[^,(]|\([^)]*\))+')
splitline = [x.strip() for x in r.findall(line)]
if len(splitline) > PER_CALL_TIME + 1:
raise RuntimeError('Possible error in input format. More than 6 +'
'elements found in line', line)
# Filtering on job id
if jobid:
if jobid == splitline[JOB_ID]:
# Filtering on function within jobid
if (function_name and function_name in splitline[FUNC_SIG]) or \
not function_name:
profile_data.append(splitline)
# Filtering on job name (need not be perfect match)
elif jobname:
if jobname in splitline[JOB_NAME]:
# Filtering on function within job name
if (function_name and function_name in splitline[FUNC_SIG]) or \
not function_name:
profile_data.append(splitline)
# Filtering on function name across all jobs
elif function_name and function_name in splitline[FUNC_SIG]:
profile_data.append(splitline)
else:
# We only need to read the file which represents this task attempt
if os.path.exists(os.path.join(stats_dir, taskid)):
with (open(os.path.join(stats_dir, taskid))) as f:
for line in f.readlines():
# disregard the header line
if line.startswith('Job Name, Job ID,'):
continue
# Space after ',' is important because that is how profile data is
# formatted. We split first so that we dont accidentally match strings
# "FooTestingGatherer" to "TestingGatherer" while filtering.
# We need regular expressions because otherwise we might
# split on commas in function signatures
r = re.compile(r'(?:[^,(]|\([^)]*\))+')
splitline = [x.strip() for x in r.findall(line)]
if len(splitline) > PER_CALL_TIME + 1:
raise RuntimeError('Possible error in input format. More than 6 +'
'elements found in line', line)
# Filtering on function within jobid
if (function_name and function_name in splitline[FUNC_SIG]) or \
not function_name:
profile_data.append(splitline)
return profile_data
'''
This plots 3 graphs for: aggregate time in a function, number of invocations of a function and
average time per call spent in a function. All the times are in nanoseconds.
If we are aggregating by function name, the graph is displayed by task attempt id. In all other
cases, the numbers are displayed by function name.
'''
def plotgraph(data):
N = len(data)
labels =[]
aggr = []
inv = []
pct = []
for key, value in data.items():
labels.append(key)
aggr.append(value[0])
inv.append(value[1])
pct.append(value[2])
ind = range(N) # the x locations for the groups
height = 0.7 # the width of the bars
plt.figure()
plt.barh(ind, aggr, align='center', height=height, color=COLOR_AGGR)
plt.yticks(ind, labels, horizontalalignment='left')
plt.title('Aggregate time in nanoseconds')
plt.figure()
plt.barh(ind, inv, align='center', height=height, color=COLOR_INV)
plt.yticks(ind, labels, horizontalalignment='left')
plt.title('Number of invocations')
plt.figure()
plt.barh(ind, pct, align='center', height=height, color=COLOR_PCT)
plt.yticks(ind, labels, horizontalalignment='left')
plt.title('Per call time')
plt.show()
'''
Combine the data based on context. By this point, raw_data contains the profiling stats
either by function signature or by task attempt id, depending on what the user specified.
Add up the numbers to present an aggregate view.
The input raw_data is a list of select lines from the input file that match the filtering
criteria, but have now been split on ", "
E.g.
[
[job id, job name ...]
[job id, job name...]
]
You can use the above defined positional constants like JOB_NAME to index into each
line.
The output is a dictionary from (function name | attempt id) depending on context to a
tuple of (aggregate time, number of invocations, average time we call). Times are all
in nanoseconds.
'''
def aggregateData(flags, raw_data):
taskid = flags.taskid
jobid = flags.job
jobname = flags.jobname
function_name = flags.function_name
aggregated = {}
if jobid or jobname or taskid:
# We have either accumulated one (or all functions) from all task attempts
# Combine these by function name
d_aggr = defaultdict(int)
d_inv = defaultdict(int)
d_pct = defaultdict(float)
for row in raw_data:
d_aggr[row[FUNC_SIG]] += int(row[AGGREGATE_TIME])
d_inv[row[FUNC_SIG]] += int(row[INVOCATIONS])
d_pct[row[FUNC_SIG]] += float(row[PER_CALL_TIME])
for key in d_aggr.keys():
aggregated[key] = (d_aggr[key], d_inv[key], d_pct[key])
elif function_name:
# At this point, we are trying to view this function across task attempts
d_aggr = defaultdict(int)
d_inv = defaultdict(int)
d_pct = defaultdict(float)
for row in raw_data:
d_aggr[row[TASK_ATTEMPT]] += int(row[AGGREGATE_TIME])
d_inv[row[TASK_ATTEMPT]] += int(row[INVOCATIONS])
d_pct[row[TASK_ATTEMPT]] += float(row[PER_CALL_TIME])
for key in d_aggr.keys():
aggregated[key] = (d_aggr[key], d_inv[key], d_pct[key])
return aggregated
'''
Either plot the graphs described above in plotgraph or serialize to a file. The format is
a comma separated list of (function signature | task attempt id), aggregate time spent in function
(nanoseconds), number of invocations, average time spent per function (nanoseconds)'\n'
'''
def displayData(flags, aggregated):
if flags.to_file:
with open(flags.to_file, 'w') as f:
for key in aggregated:
f.write(key + ', ' + ', '.join([str(x) for x in aggregated[key]]) + '\n')
else:
plotgraph(aggregated)
def main(args):
FLAGS = BuildFlagParser().parse_args(args[1:])
FLAGS.stats_dir = os.path.abspath(os.path.expanduser(FLAGS.stats_dir))
if not (FLAGS.job or FLAGS.jobname or FLAGS.taskid or FLAGS.function_name):
raise ValueError('Incorrect Arguments: You must specify either job id, job name, '
'task attempt or function for which you wish to collect stats.')
raw_data = gatherData(FLAGS)
aggregated = aggregateData(FLAGS, raw_data)
displayData(FLAGS, aggregated)
if __name__ == '__main__':
main(sys.argv)
| |
import string
import sys
from direct.showbase import DirectObject
from otp.otpbase import OTPGlobals
from direct.fsm import ClassicFSM
from direct.fsm import State
from otp.login import SecretFriendsInfoPanel
from otp.login import PrivacyPolicyPanel
from otp.otpbase import OTPLocalizer
from direct.directnotify import DirectNotifyGlobal
from otp.login import LeaveToPayDialog
from direct.gui.DirectGui import *
from panda3d.core import *
from panda3d.direct import *
ChatEvent = 'ChatEvent'
NormalChatEvent = 'NormalChatEvent'
SCChatEvent = 'SCChatEvent'
SCCustomChatEvent = 'SCCustomChatEvent'
SCEmoteChatEvent = 'SCEmoteChatEvent'
OnScreen = 0
OffScreen = 1
Thought = 2
ThoughtPrefix = '.'
def isThought(message):
if len(message) == 0:
return 0
elif message.find(ThoughtPrefix, 0, len(ThoughtPrefix)) >= 0:
return 1
else:
return 0
def removeThoughtPrefix(message):
if isThought(message):
return message[len(ThoughtPrefix):]
else:
return message
class ChatManager(DirectObject.DirectObject):
notify = DirectNotifyGlobal.directNotify.newCategory('ChatManager')
execChat = config.GetBool('exec-chat', 0)
def __init__(self, cr, localAvatar):
self.cr = cr
self.localAvatar = localAvatar
self.wantBackgroundFocus = not base.wantCustomControls
self.chatHotkey = base.CHAT_HOTKEY
self.__scObscured = 0
self.__normalObscured = 0
self.openChatWarning = None
self.unpaidChatWarning = None
self.teaser = None
self.paidNoParentPassword = None
self.noSecretChatAtAll = None
self.noSecretChatAtAllAndNoWhitelist = None
self.noSecretChatWarning = None
self.activateChatGui = None
self.chatMoreInfo = None
self.chatPrivacyPolicy = None
self.secretChatActivated = None
self.problemActivatingChat = None
self.leaveToPayDialog = None
self.fsm = ClassicFSM.ClassicFSM('chatManager', [State.State('off', self.enterOff, self.exitOff),
State.State('mainMenu', self.enterMainMenu, self.exitMainMenu),
State.State('speedChat', self.enterSpeedChat, self.exitSpeedChat),
State.State('normalChat', self.enterNormalChat, self.exitNormalChat),
State.State('whisper', self.enterWhisper, self.exitWhisper),
State.State('whisperChat', self.enterWhisperChat, self.exitWhisperChat),
State.State('whisperChatPlayer', self.enterWhisperChatPlayer, self.exitWhisperChatPlayer),
State.State('whisperSpeedChat', self.enterWhisperSpeedChat, self.exitWhisperSpeedChat),
State.State('whisperSpeedChatPlayer', self.enterWhisperSpeedChatPlayer, self.exitWhisperSpeedChatPlayer),
State.State('openChatWarning', self.enterOpenChatWarning, self.exitOpenChatWarning),
State.State('leaveToPayDialog', self.enterLeaveToPayDialog, self.exitLeaveToPayDialog),
State.State('unpaidChatWarning', self.enterUnpaidChatWarning, self.exitUnpaidChatWarning),
State.State('noSecretChatAtAll', self.enterNoSecretChatAtAll, self.exitNoSecretChatAtAll),
State.State('noSecretChatAtAllAndNoWhitelist', self.enterNoSecretChatAtAllAndNoWhitelist, self.exitNoSecretChatAtAllAndNoWhitelist),
State.State('noSecretChatWarning', self.enterNoSecretChatWarning, self.exitNoSecretChatWarning),
State.State('noFriendsWarning', self.enterNoFriendsWarning, self.exitNoFriendsWarning),
State.State('otherDialog', self.enterOtherDialog, self.exitOtherDialog),
State.State('activateChat', self.enterActivateChat, self.exitActivateChat),
State.State('chatMoreInfo', self.enterChatMoreInfo, self.exitChatMoreInfo),
State.State('chatPrivacyPolicy', self.enterChatPrivacyPolicy, self.exitChatPrivacyPolicy),
State.State('secretChatActivated', self.enterSecretChatActivated, self.exitSecretChatActivated),
State.State('problemActivatingChat', self.enterProblemActivatingChat, self.exitProblemActivatingChat),
State.State('whiteListOpenChat', self.enterWhiteListOpenChat, self.exitWhiteListOpenChat),
State.State('whiteListAvatarChat', self.enterWhiteListAvatarChat, self.exitWhiteListAvatarChat),
State.State('whiteListPlayerChat', self.enterWhiteListPlayerChat, self.exitWhiteListPlayerChat),
State.State('trueFriendTeaserPanel', self.enterTrueFriendTeaserPanel, self.exitTrueFriendTeaserPanel)], 'off', 'off')
self.fsm.enterInitialState()
return
def delete(self):
self.ignoreAll()
del self.fsm
if hasattr(self.chatInputNormal, 'destroy'):
self.chatInputNormal.destroy()
self.chatInputNormal.delete()
del self.chatInputNormal
self.chatInputSpeedChat.delete()
del self.chatInputSpeedChat
if self.openChatWarning:
self.openChatWarning.destroy()
self.openChatWarning = None
if self.unpaidChatWarning:
self.payButton = None
self.unpaidChatWarning.destroy()
self.unpaidChatWarning = None
if self.teaser:
self.teaser.cleanup()
self.teaser.unload()
self.teaser = None
if self.noSecretChatAtAll:
self.noSecretChatAtAll.destroy()
self.noSecretChatAtAll = None
if self.noSecretChatAtAllAndNoWhitelist:
self.noSecretChatAtAllAndNoWhitelist.destroy()
self.noSecretChatAtAllAndNoWhitelist = None
if self.noSecretChatWarning:
self.noSecretChatWarning.destroy()
self.noSecretChatWarning = None
if self.activateChatGui:
self.activateChatGui.destroy()
self.activateChatGui = None
if self.chatMoreInfo:
self.chatMoreInfo.destroy()
self.chatMoreInfo = None
if self.chatPrivacyPolicy:
self.chatPrivacyPolicy.destroy()
self.chatPrivacyPolicy = None
if self.secretChatActivated:
self.secretChatActivated.destroy()
self.secretChatActivated = None
if self.problemActivatingChat:
self.problemActivatingChat.destroy()
self.problemActivatingChat = None
del self.localAvatar
del self.cr
return
def obscure(self, normal, sc):
self.__scObscured = sc
if self.__scObscured:
self.scButton.hide()
self.__normalObscured = normal
if self.__normalObscured:
self.normalButton.hide()
def isObscured(self):
return (self.__normalObscured, self.__scObscured)
def stop(self):
self.fsm.request('off')
self.ignoreAll()
def start(self):
self.fsm.request('mainMenu')
def announceChat(self):
messenger.send(ChatEvent)
def announceSCChat(self):
messenger.send(SCChatEvent)
self.announceChat()
def sendChatString(self, message):
chatFlags = CFSpeech | CFTimeout
if base.cr.wantSwitchboardHacks:
from otp.switchboard import badwordpy
badwordpy.init('', '')
message = badwordpy.scrub(message)
if isThought(message):
message = removeThoughtPrefix(message)
chatFlags = CFThought
messenger.send(NormalChatEvent)
self.announceChat()
def sendWhisperString(self, message, whisperAvatarId):
pass
def sendSCChatMessage(self, msgIndex):
base.talkAssistant.sendOpenSpeedChat(1, msgIndex)
def sendSCWhisperMessage(self, msgIndex, whisperAvatarId, toPlayer):
if toPlayer:
base.talkAssistant.sendPlayerWhisperSpeedChat(1, msgIndex, whisperAvatarId)
else:
base.talkAssistant.sendAvatarWhisperSpeedChat(1, msgIndex, whisperAvatarId)
def sendSCCustomChatMessage(self, msgIndex):
base.talkAssistant.sendOpenSpeedChat(3, msgIndex)
def sendSCCustomWhisperMessage(self, msgIndex, whisperAvatarId, toPlayer):
if toPlayer:
base.talkAssistant.sendPlayerWhisperSpeedChat(3, msgIndex, whisperAvatarId)
else:
base.talkAssistant.sendAvatarWhisperSpeedChat(3, msgIndex, whisperAvatarId)
def sendSCEmoteChatMessage(self, emoteId):
base.talkAssistant.sendOpenSpeedChat(2, emoteId)
def sendSCEmoteWhisperMessage(self, emoteId, whisperAvatarId, toPlayer):
if toPlayer:
base.talkAssistant.sendPlayerWhisperSpeedChat(2, emoteId, whisperAvatarId)
else:
base.talkAssistant.sendAvatarWhisperSpeedChat(2, emoteId, whisperAvatarId)
def enterOff(self):
self.scButton.hide()
self.normalButton.hide()
self.ignoreAll()
def exitOff(self):
pass
def enterMainMenu(self):
self.checkObscurred()
if self.localAvatar.canChat():
if self.wantBackgroundFocus:
self.chatInputNormal.chatEntry['backgroundFocus'] = 1
self.acceptOnce('enterNormalChat', self.fsm.request, ['normalChat'])
if not self.wantBackgroundFocus:
self.accept('t', messenger.send, ['enterNormalChat'])
def checkObscurred(self):
if not self.__scObscured:
self.scButton.show()
if not self.__normalObscured:
self.normalButton.show()
def exitMainMenu(self):
self.scButton.hide()
self.normalButton.hide()
self.ignore('enterNormalChat')
if self.wantBackgroundFocus:
self.chatInputNormal.chatEntry['backgroundFocus'] = 0
def whisperTo(self, avatarName, avatarId, playerId = None):
self.fsm.request('whisper', [avatarName, avatarId, playerId])
def noWhisper(self):
self.fsm.request('mainMenu')
def handleWhiteListSelect(self):
self.fsm.request('whiteListOpenChat')
def enterWhiteListOpenChat(self):
self.checkObscurred()
if self.wantBackgroundFocus:
self.chatInputNormal.chatEntry['backgroundFocus'] = 0
base.localAvatar.chatMgr.chatInputWhiteList.activateByData()
def exitWhiteListOpenChat(self):
pass
def enterWhiteListAvatarChat(self, receiverId):
if self.wantBackgroundFocus:
self.chatInputNormal.chatEntry['backgroundFocus'] = 0
base.localAvatar.chatMgr.chatInputWhiteList.activateByData(receiverId, 0)
def exitWhiteListAvatarChat(self):
pass
def enterWhiteListPlayerChat(self, receiverId):
if self.wantBackgroundFocus:
self.chatInputNormal.chatEntry['backgroundFocus'] = 0
base.localAvatar.chatMgr.chatInputWhiteList.activateByData(receiverId, 1)
def exitWhiteListPlayerChat(self):
pass
def enterWhisper(self, avatarName, avatarId, playerId = None):
self.whisperScButton['extraArgs'] = [avatarName, avatarId, playerId]
self.whisperButton['extraArgs'] = [avatarName, avatarId, playerId]
playerName = None
chatToToon = 1
online = 0
if self.cr.doId2do.has_key(avatarId):
online = 1
elif self.cr.isFriend(avatarId):
online = self.cr.isFriendOnline(avatarId)
hasManager = hasattr(base.cr, 'playerFriendsManager')
if hasManager:
if base.cr.playerFriendsManager.askAvatarOnline(avatarId):
online = 1
avatarUnderstandable = base.cr.config.GetBool('force-avatar-understandable', False)
playerUnderstandable = base.cr.config.GetBool('force-player-understandable', False)
av = None
if avatarId:
av = self.cr.identifyAvatar(avatarId)
if av != None:
avatarUnderstandable = av.isUnderstandable()
if playerId:
if base.cr.playerFriendsManager.playerId2Info.has_key(playerId):
playerInfo = base.cr.playerFriendsManager.playerId2Info.get(playerId)
playerName = playerInfo.playerName
online = 1
playerUnderstandable = playerInfo.understandableYesNo
if playerUnderstandable or not avatarId:
chatToToon = 0
if chatToToon:
chatName = avatarName
else:
chatName = playerName
normalButtonObscured, scButtonObscured = self.isObscured()
if (avatarUnderstandable or playerUnderstandable) and online and not normalButtonObscured:
self.whisperButton['state'] = 'normal'
self.enablewhisperButton()
else:
self.whisperButton['state'] = 'inactive'
self.disablewhisperButton()
if online:
self.whisperScButton['state'] = 'normal'
self.changeFrameText(OTPLocalizer.ChatManagerWhisperToName % chatName)
else:
self.whisperScButton['state'] = 'inactive'
self.changeFrameText(OTPLocalizer.ChatManagerWhisperOffline % chatName)
self.whisperFrame.show()
self.refreshWhisperFrame()
if avatarUnderstandable or playerUnderstandable:
if playerId and not chatToToon:
if self.wantBackgroundFocus:
self.chatInputNormal.chatEntry['backgroundFocus'] = 1
self.acceptOnce('enterNormalChat', self.fsm.request, ['whisperChatPlayer', [avatarName, playerId]])
elif online and chatToToon:
if self.wantBackgroundFocus:
self.chatInputNormal.chatEntry['backgroundFocus'] = 1
self.acceptOnce('enterNormalChat', self.fsm.request, ['whisperChat', [avatarName, avatarId]])
if base.cr.config.GetBool('force-typed-whisper-enabled', 0):
self.whisperButton['state'] = 'normal'
self.enablewhisperButton()
return
def disablewhisperButton(self):
pass
def enablewhisperButton(self):
pass
def refreshWhisperFrame(self):
pass
def changeFrameText(self, newText):
self.whisperFrame['text'] = newText
if len(newText) > 24:
self.whisperFrame['text_pos'] = (0.18, 0.042)
def exitWhisper(self):
self.whisperFrame.hide()
self.ignore('enterNormalChat')
self.chatInputNormal.chatEntry['backgroundFocus'] = 0
def enterWhisperSpeedChat(self, avatarId):
self.whisperFrame.show()
if self.wantBackgroundFocus:
self.chatInputNormal.chatEntry['backgroundFocus'] = 0
self.chatInputSpeedChat.show(avatarId)
def exitWhisperSpeedChat(self):
self.whisperFrame.hide()
self.chatInputSpeedChat.hide()
def enterWhisperSpeedChatPlayer(self, playerId):
self.whisperFrame.show()
if self.wantBackgroundFocus:
self.chatInputNormal.chatEntry['backgroundFocus'] = 0
self.chatInputSpeedChat.show(playerId, 1)
def exitWhisperSpeedChatPlayer(self):
self.whisperFrame.hide()
self.chatInputSpeedChat.hide()
def enterWhisperChat(self, avatarName, avatarId):
result = self.chatInputNormal.activateByData(avatarId)
return result
def exitWhisperChat(self):
self.chatInputNormal.deactivate()
def enterWhisperChatPlayer(self, avatarName, playerId):
playerInfo = base.cr.playerFriendsManager.getFriendInfo(playerId)
if playerInfo:
avatarName = playerInfo.playerName
result = self.chatInputNormal.activateByData(playerId, 1)
return result
def exitWhisperChatPlayer(self):
self.chatInputNormal.deactivate()
def enterSpeedChat(self):
messenger.send('enterSpeedChat')
if not self.__scObscured:
self.scButton.show()
if not self.__normalObscured:
self.normalButton.show()
if self.wantBackgroundFocus:
self.chatInputNormal.chatEntry['backgroundFocus'] = 0
self.chatInputSpeedChat.show()
def exitSpeedChat(self):
self.scButton.hide()
self.normalButton.hide()
self.chatInputSpeedChat.hide()
def enterNormalChat(self):
if base.wantCustomControls:
base.localAvatar.controlManager.disableWASD()
result = self.chatInputNormal.activateByData()
return result
def exitNormalChat(self):
if base.wantCustomControls:
base.localAvatar.controlManager.enableWASD()
self.chatInputNormal.deactivate()
def enterOpenChatWarning(self):
self.notify.error('called enterOpenChatWarning() on parent class')
def exitOpenChatWarning(self):
self.notify.error('called exitOpenChatWarning() on parent class')
def enterLeaveToPayDialog(self):
if self.leaveToPayDialog == None:
self.leaveToPayDialog = LeaveToPayDialog.LeaveToPayDialog(self.paidNoParentPassword)
self.leaveToPayDialog.setCancel(self.__handleLeaveToPayCancel)
self.leaveToPayDialog.show()
return
def exitLeaveToPayDialog(self):
if self.leaveToPayDialog:
self.leaveToPayDialog.destroy()
self.leaveToPayDialog = None
return
def enterUnpaidChatWarning(self):
self.notify.error('called enterUnpaidChatWarning() on parent class')
def exitUnpaidChatWarning(self):
self.notify.error('called exitUnpaidChatWarning() on parent class')
def enterNoSecretChatAtAll(self):
self.notify.error('called enterNoSecretChatAtAll() on parent class')
def exitNoSecretChatAtAll(self):
self.notify.error('called exitNoSecretChatAtAll() on parent class')
def enterNoSecretChatAtAllAndNoWhitelist(self):
self.notify.error('called enterNoSecretChatAtAllAndNoWhitelist() on parent class')
def exitNoSecretChatAtAllAndNoWhitelist(self):
self.notify.error('called exitNoSecretChatAtAllAndNoWhitelist() on parent class')
def enterNoSecretChatWarning(self):
self.notify.error('called enterNoSecretChatWarning() on parent class')
def exitNoSecretChatWarning(self):
self.notify.error('called exitNoSecretChatWarning() on parent class')
def enterNoFriendsWarning(self):
self.notify.error('called enterNoFriendsWarning() on parent class')
def exitNoFriendsWarning(self):
self.notify.error('called exitNoFriendsWarning() on parent class')
def enterActivateChat(self):
self.notify.error('called enterActivateChat() on parent class')
def exitActivateChat(self):
self.notify.error('called exitActivateChat() on parent class')
def enterOtherDialog(self):
pass
def exitOtherDialog(self):
pass
def enterChatMoreInfo(self):
if self.chatMoreInfo == None:
self.chatMoreInfo = SecretFriendsInfoPanel.SecretFriendsInfoPanel('secretFriendsInfoDone')
self.chatMoreInfo.show()
self.accept('secretFriendsInfoDone', self.__secretFriendsInfoDone)
return
def exitChatMoreInfo(self):
self.chatMoreInfo.hide()
self.ignore('secretFriendsInfoDone')
def enterChatPrivacyPolicy(self):
if self.chatPrivacyPolicy == None:
self.chatPrivacyPolicy = PrivacyPolicyPanel.PrivacyPolicyPanel('privacyPolicyDone')
self.chatPrivacyPolicy.show()
self.accept('privacyPolicyDone', self.__privacyPolicyDone)
return
def exitChatPrivacyPolicy(self):
cleanupDialog('privacyPolicyDialog')
self.chatPrivacyPolicy = None
self.ignore('privacyPolicyDone')
return
def enterSecretChatActivated(self):
self.notify.error('called enterSecretChatActivated() on parent class')
def exitSecretChatActivated(self):
self.notify.error('called exitSecretChatActivated() on parent class')
def enterProblemActivatingChat(self):
self.notify.error('called enterProblemActivatingChat() on parent class')
def exitProblemActivatingChat(self):
self.notify.error('called exitProblemActivatingChat() on parent class')
def enterTrueFriendTeaserPanel(self):
self.notify.error('called enterTrueFriendTeaserPanel () on parent class')
def exitTrueFriendTeaserPanel(self):
self.notify.error('called exitTrueFriendTeaserPanel () on parent class')
def __handleLeaveToPayCancel(self):
self.fsm.request('mainMenu')
def __secretFriendsInfoDone(self):
self.fsm.request('activateChat')
def __privacyPolicyDone(self):
self.fsm.request('activateChat')
def reloadWASD(self):
self.wantBackgroundFocus = not base.wantCustomControls
self.ignore(self.chatHotkey)
if self.wantBackgroundFocus:
self.chatInputNormal.chatEntry['backgroundFocus'] = 1
else:
self.chatHotkey = base.CHAT_HOTKEY
self.chatInputNormal.chatEntry['backgroundFocus'] = 0
def disableBackgroundFocus(self):
self.chatInputNormal.chatEntry['backgroundFocus'] = 0
| |
##
# Copyright 2009-2017 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for Quantum ESPRESSO, implemented as an easyblock
@author: Kenneth Hoste (Ghent University)
"""
import fileinput
import os
import re
import shutil
import sys
import shlex
from distutils.version import LooseVersion
from subprocess import Popen
import easybuild.tools.environment as env
import easybuild.tools.toolchain as toolchain
from easybuild.easyblocks.generic.configuremake import ConfigureMake
from easybuild.framework.easyconfig import CUSTOM
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.modules import get_software_root
class EB_QuantumESPRESSO(ConfigureMake):
"""Support for building and installing Quantum ESPRESSO."""
@staticmethod
def extra_options():
"""Custom easyconfig parameters for Quantum ESPRESSO."""
extra_vars = {
'hybrid': [False, "Enable hybrid build (with OpenMP)", CUSTOM],
'with_scalapack': [True, "Enable ScaLAPACK support", CUSTOM],
}
return ConfigureMake.extra_options(extra_vars)
def __init__(self, *args, **kwargs):
"""Add extra config options specific to Quantum ESPRESSO."""
super(EB_QuantumESPRESSO, self).__init__(*args, **kwargs)
self.build_in_installdir = True
if LooseVersion(self.version) >= LooseVersion("6"):
self.install_subdir = "qe-%s" % self.version
else:
self.install_subdir = "espresso-%s" % self.version
def patch_step(self):
"""Patch files from build dir (not start dir)."""
super(EB_QuantumESPRESSO, self).patch_step(beginpath=self.builddir)
def configure_step(self):
"""Custom configuration procedure for Quantum ESPRESSO."""
if self.toolchain.options.get('openmp', False) or self.cfg['hybrid']:
self.cfg.update('configopts', '--enable-openmp')
if not self.toolchain.options.get('usempi', None):
self.cfg.update('configopts', '--disable-parallel')
if not self.cfg['with_scalapack']:
self.cfg.update('configopts', '--without-scalapack')
repls = []
if self.toolchain.comp_family() in [toolchain.INTELCOMP]:
# set preprocessor command (-E to stop after preprocessing, -C to preserve comments)
cpp = "%s -E -C" % os.getenv('CC')
repls.append(('CPP', cpp, False))
env.setvar('CPP', cpp)
# also define $FCCPP, but do *not* include -C (comments should not be preserved when preprocessing Fortran)
env.setvar('FCCPP', "%s -E" % os.getenv('CC'))
super(EB_QuantumESPRESSO, self).configure_step()
# compose list of DFLAGS (flag, value, keep_stuff)
# for guidelines, see include/defs.h.README in sources
dflags = []
comp_fam_dflags = {
toolchain.INTELCOMP: '-D__INTEL',
toolchain.GCC: '-D__GFORTRAN -D__STD_F95',
}
dflags.append(comp_fam_dflags[self.toolchain.comp_family()])
if self.toolchain.options.get('openmp', False):
libfft = os.getenv('LIBFFT_MT')
else:
libfft = os.getenv('LIBFFT')
if libfft:
if "fftw3" in libfft:
dflags.append('-D__FFTW3')
else:
dflags.append('-D__FFTW')
env.setvar('FFTW_LIBS', libfft)
if get_software_root('ACML'):
dflags.append('-D__ACML')
if self.toolchain.options.get('usempi', None):
dflags.append('-D__MPI -D__PARA')
if self.toolchain.options.get('openmp', False) or self.cfg['hybrid']:
dflags.append(" -D__OPENMP")
if self.cfg['with_scalapack']:
dflags.append(" -D__SCALAPACK")
# always include -w to supress warnings
dflags.append('-w')
repls.append(('DFLAGS', ' '.join(dflags), False))
# complete C/Fortran compiler and LD flags
if self.toolchain.options.get('openmp', False) or self.cfg['hybrid']:
repls.append(('LDFLAGS', self.toolchain.get_flag('openmp'), True))
repls.append(('(?:C|F90|F)FLAGS', self.toolchain.get_flag('openmp'), True))
# obtain library settings
libs = []
for lib in ['BLAS', 'LAPACK', 'FFT', 'SCALAPACK']:
if self.toolchain.options.get('openmp', False):
val = os.getenv('LIB%s_MT' % lib)
else:
val = os.getenv('LIB%s' % lib)
repls.append(('%s_LIBS' % lib, val, False))
libs.append(val)
libs = ' '.join(libs)
repls.append(('BLAS_LIBS_SWITCH', 'external', False))
repls.append(('LAPACK_LIBS_SWITCH', 'external', False))
repls.append(('LD_LIBS', os.getenv('LIBS'), False))
self.log.debug("List of replacements to perform: %s" % repls)
if LooseVersion(self.version) >= LooseVersion("6"):
make_ext = '.inc'
else:
make_ext = '.sys'
# patch make.sys file
fn = os.path.join(self.cfg['start_dir'], 'make' + make_ext)
try:
for line in fileinput.input(fn, inplace=1, backup='.orig.eb'):
for (k, v, keep) in repls:
# need to use [ \t]* instead of \s*, because vars may be undefined as empty,
# and we don't want to include newlines
if keep:
line = re.sub(r"^(%s\s*=[ \t]*)(.*)$" % k, r"\1\2 %s" % v, line)
else:
line = re.sub(r"^(%s\s*=[ \t]*).*$" % k, r"\1%s" % v, line)
# fix preprocessing directives for .f90 files in make.sys if required
if self.toolchain.comp_family() in [toolchain.GCC]:
line = re.sub(r"\$\(MPIF90\) \$\(F90FLAGS\) -c \$<",
"$(CPP) -C $(CPPFLAGS) $< -o $*.F90\n" +
"\t$(MPIF90) $(F90FLAGS) -c $*.F90 -o $*.o",
line)
sys.stdout.write(line)
except IOError, err:
raise EasyBuildError("Failed to patch %s: %s", fn, err)
self.log.debug("Contents of patched %s: %s" % (fn, open(fn, "r").read()))
# patch default make.sys for wannier
if LooseVersion(self.version) >= LooseVersion("5"):
fn = os.path.join(self.cfg['start_dir'], 'install', 'make_wannier90' + make_ext)
else:
fn = os.path.join(self.cfg['start_dir'], 'plugins', 'install', 'make_wannier90.sys')
try:
for line in fileinput.input(fn, inplace=1, backup='.orig.eb'):
line = re.sub(r"^(LIBS\s*=\s*).*", r"\1%s" % libs, line)
sys.stdout.write(line)
except IOError, err:
raise EasyBuildError("Failed to patch %s: %s", fn, err)
self.log.debug("Contents of patched %s: %s" % (fn, open(fn, "r").read()))
# patch Makefile of want plugin
wantprefix = 'want-'
wantdirs = [d for d in os.listdir(self.builddir) if d.startswith(wantprefix)]
if len(wantdirs) > 1:
raise EasyBuildError("Found more than one directory with %s prefix, help!", wantprefix)
if len(wantdirs) != 0:
wantdir = os.path.join(self.builddir, wantdirs[0])
make_sys_in_path = None
cand_paths = [os.path.join('conf', 'make.sys.in'), os.path.join('config', 'make.sys.in')]
for path in cand_paths:
full_path = os.path.join(wantdir, path)
if os.path.exists(full_path):
make_sys_in_path = full_path
break
if make_sys_in_path is None:
raise EasyBuildError("Failed to find make.sys.in in want directory %s, paths considered: %s",
wantdir, ', '.join(cand_paths))
try:
for line in fileinput.input(make_sys_in_path, inplace=1, backup='.orig.eb'):
# fix preprocessing directives for .f90 files in make.sys if required
if self.toolchain.comp_family() in [toolchain.GCC]:
line = re.sub("@f90rule@",
"$(CPP) -C $(CPPFLAGS) $< -o $*.F90\n" +
"\t$(MPIF90) $(F90FLAGS) -c $*.F90 -o $*.o",
line)
sys.stdout.write(line)
except IOError, err:
raise EasyBuildError("Failed to patch %s: %s", fn, err)
# move non-espresso directories to where they're expected and create symlinks
try:
dirnames = [d for d in os.listdir(self.builddir) if not d == self.install_subdir]
targetdir = os.path.join(self.builddir, self.install_subdir)
for dirname in dirnames:
shutil.move(os.path.join(self.builddir, dirname), os.path.join(targetdir, dirname))
self.log.info("Moved %s into %s" % (dirname, targetdir))
dirname_head = dirname.split('-')[0]
# Handle the case where the directory is preceded by 'qe-'
if dirname_head == 'qe':
dirname_head = dirname.split('-')[1]
linkname = None
if dirname_head == 'sax':
linkname = 'SaX'
if dirname_head == 'wannier90':
linkname = 'W90'
elif dirname_head in ['gipaw', 'plumed', 'want', 'yambo']:
linkname = dirname_head.upper()
if linkname:
os.symlink(os.path.join(targetdir, dirname), os.path.join(targetdir, linkname))
except OSError, err:
raise EasyBuildError("Failed to move non-espresso directories: %s", err)
def build_step(self):
self.log.info("Started build step")#. saving current directory")
#cwd = os.getcwd()
#self.log.info("Current directory is "+cwd+". Getting target directory")
self.log.info("Getting target directory")
targetdir = os.path.join(self.builddir, self.install_subdir)
self.log.info("fixing directory structure")
wrongdir = os.path.join(targetdir,"q-e-qe-6.2.1")
for filename in os.listdir(wrongdir):
shutil.move(os.path.join(wrongdir,filename),os.path.join(targetdir,filename))
os.rmdir(wrongdir)
self.log.info("Switching to target directory "+targetdir)
os.chdir(targetdir)
args = shlex.split("make "+self.cfg["buildopts"])
sys.stdout.write("== running "+str(args)+" in directory "+targetdir+"\n")
self.log.info("Calling make with options in config buildopts")
p = Popen(args)
p.communicate()
#heck_call(["make",self.cfg["buildopts"]], shell=True)
#self.log.info("returning to original directory "+cwd)
#os.chdir(cwd)
def install_step(self):
"""Skip install step, since we're building in the install directory."""
pass
def sanity_check_step(self):
"""Custom sanity check for Quantum ESPRESSO."""
# build list of expected binaries based on make targets
bins = ["iotk", "iotk.x", "iotk_print_kinds.x"]
if 'cp' in self.cfg['buildopts'] or 'all' in self.cfg['buildopts']:
bins.extend(["cp.x", "cppp.x", "wfdd.x"])
if 'gww' in self.cfg['buildopts']: # only for v4.x, not in v5.0 anymore
bins.extend(["gww_fit.x", "gww.x", "head.x", "pw4gww.x"])
if 'ld1' in self.cfg['buildopts'] or 'all' in self.cfg['buildopts']:
bins.extend(["ld1.x"])
if 'gipaw' in self.cfg['buildopts']:
bins.extend(["gipaw.x"])
if 'neb' in self.cfg['buildopts'] or 'pwall' in self.cfg['buildopts'] or \
'all' in self.cfg['buildopts']:
if LooseVersion(self.version) > LooseVersion("5"):
bins.extend(["neb.x", "path_interpolation.x"])
if 'ph' in self.cfg['buildopts'] or 'all' in self.cfg['buildopts']:
bins.extend(["dynmat.x", "lambda.x", "matdyn.x", "ph.x", "phcg.x", "q2r.x"])
if LooseVersion(self.version) < LooseVersion("6"):
bins.extend(["d3.x"])
if LooseVersion(self.version) > LooseVersion("5"):
bins.extend(["fqha.x", "q2qstar.x"])
if 'pp' in self.cfg['buildopts'] or 'pwall' in self.cfg['buildopts'] or \
'all' in self.cfg['buildopts']:
bins.extend(["average.x", "bands.x", "dos.x", "epsilon.x", "initial_state.x",
"plan_avg.x", "plotband.x", "plotproj.x", "plotrho.x", "pmw.x", "pp.x",
"projwfc.x", "sumpdos.x", "pw2wannier90.x", "pw_export.x", "pw2gw.x",
"wannier_ham.x", "wannier_plot.x"])
if LooseVersion(self.version) > LooseVersion("5"):
bins.extend(["pw2bgw.x", "bgw2pw.x"])
else:
bins.extend(["pw2casino.x"])
if 'pw' in self.cfg['buildopts'] or 'all' in self.cfg['buildopts']:
bins.extend(["dist.x", "ev.x", "kpoints.x", "pw.x", "pwi2xsf.x"])
if LooseVersion(self.version) > LooseVersion("5"):
bins.extend(["generate_vdW_kernel_table.x"])
else:
bins.extend(["path_int.x"])
if LooseVersion(self.version) < LooseVersion("5.3.0"):
bins.extend(["band_plot.x", "bands_FS.x", "kvecs_FS.x"])
if 'pwcond' in self.cfg['buildopts'] or 'pwall' in self.cfg['buildopts'] or \
'all' in self.cfg['buildopts']:
bins.extend(["pwcond.x"])
if 'tddfpt' in self.cfg['buildopts'] or 'all' in self.cfg['buildopts']:
if LooseVersion(self.version) > LooseVersion("5"):
bins.extend(["turbo_lanczos.x", "turbo_spectrum.x"])
upftools = []
if 'upf' in self.cfg['buildopts'] or 'all' in self.cfg['buildopts']:
upftools = ["casino2upf.x", "cpmd2upf.x", "fhi2upf.x", "fpmd2upf.x", "ncpp2upf.x",
"oldcp2upf.x", "read_upf_tofile.x", "rrkj2upf.x", "uspp2upf.x", "vdb2upf.x",
"virtual.x"]
if LooseVersion(self.version) > LooseVersion("5"):
upftools.extend(["interpolate.x", "upf2casino.x"])
if 'vdw' in self.cfg['buildopts']: # only for v4.x, not in v5.0 anymore
bins.extend(["vdw.x"])
if 'w90' in self.cfg['buildopts']:
bins.extend(["wannier90.x"])
want_bins = []
if 'want' in self.cfg['buildopts']:
want_bins = ["bands.x", "blc2wan.x", "conductor.x", "current.x", "disentangle.x",
"dos.x", "gcube2plt.x", "kgrid.x", "midpoint.x", "plot.x", "sumpdos",
"wannier.x", "wfk2etsf.x"]
if LooseVersion(self.version) > LooseVersion("5"):
want_bins.extend(["cmplx_bands.x", "decay.x", "sax2qexml.x", "sum_sgm.x"])
if 'xspectra' in self.cfg['buildopts']:
bins.extend(["xspectra.x"])
yambo_bins = []
if 'yambo' in self.cfg['buildopts']:
yambo_bins = ["a2y", "p2y", "yambo", "ypp"]
pref = self.install_subdir
custom_paths = {
'files': [os.path.join(pref, 'bin', x) for x in bins] +
[os.path.join(pref, 'upftools', x) for x in upftools] +
[os.path.join(pref, 'WANT', 'bin', x) for x in want_bins] +
[os.path.join(pref, 'YAMBO', 'bin', x) for x in yambo_bins],
'dirs': [os.path.join(pref, 'include')]
}
super(EB_QuantumESPRESSO, self).sanity_check_step(custom_paths=custom_paths)
def make_module_req_guess(self):
"""Custom path suggestions for Quantum ESPRESSO."""
guesses = super(EB_QuantumESPRESSO, self).make_module_req_guess()
# order matters here, 'bin' should be *last* in this list to ensure it gets prepended to $PATH last,
# so it gets preference over the others
# this is important since some binaries are available in two places (e.g. dos.x in both bin and WANT/bin)
bindirs = ['upftools', 'WANT/bin', 'YAMBO/bin', 'bin']
guesses.update({
'PATH': [os.path.join(self.install_subdir, bindir) for bindir in bindirs],
'CPATH': [os.path.join(self.install_subdir, 'include')],
})
return guesses
| |
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Matti Hamalainen <msh@nmr.mgh.harvard.edu>
# Martin Luessi <mluessi@nmr.mgh.harvard.edu>
# Eric Larson <larsoner@uw.edu>
#
# License: BSD (3-clause)
import os
from os import path as op
import numpy as np
from ..io import read_info, _loc_to_coil_trans, _loc_to_eeg_loc, Info
from ..io.pick import _has_kit_refs, pick_types, pick_info
from ..io.constants import FIFF
from ..transforms import (_ensure_trans, transform_surface_to, apply_trans,
_get_trans, _print_coord_trans, _coord_frame_name,
Transform)
from ..utils import logger, verbose
from ..source_space import _ensure_src, _filter_source_spaces
from ..surface import _normalize_vectors
from ..bem import read_bem_solution, _bem_find_surface, ConductorModel
from ..externals.six import string_types
from .forward import Forward, write_forward_solution, _merge_meg_eeg_fwds
from ._compute_forward import _compute_forwards
_accuracy_dict = dict(normal=FIFF.FWD_COIL_ACCURACY_NORMAL,
accurate=FIFF.FWD_COIL_ACCURACY_ACCURATE)
@verbose
def _read_coil_defs(elekta_defs=False, verbose=None):
"""Read a coil definition file.
Parameters
----------
elekta_defs : bool
If true, prepend Elekta's coil definitions for numerical
integration (from Abramowitz and Stegun section 25.4.62).
Note that this will likely cause duplicate coil definitions,
so the first matching coil should be selected for optimal
integration parameters.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Defaults to raw.verbose.
Returns
-------
res : list of dict
The coils. It is a dictionary with valid keys:
'cosmag' | 'coil_class' | 'coord_frame' | 'rmag' | 'type' |
'chname' | 'accuracy'.
cosmag contains the direction of the coils and rmag contains the
position vector.
"""
coil_dir = op.join(op.split(__file__)[0], '..', 'data')
coils = list()
if elekta_defs:
coils += _read_coil_def_file(op.join(coil_dir, 'coil_def_Elekta.dat'))
coils += _read_coil_def_file(op.join(coil_dir, 'coil_def.dat'))
return coils
def _read_coil_def_file(fname):
"""Helper to read a coil def file"""
big_val = 0.5
coils = list()
with open(fname, 'r') as fid:
lines = fid.readlines()
lines = lines[::-1]
while len(lines) > 0:
line = lines.pop()
if line[0] != '#':
vals = np.fromstring(line, sep=' ')
assert len(vals) in (6, 7) # newer numpy can truncate comment
start = line.find('"')
end = len(line.strip()) - 1
assert line.strip()[end] == '"'
desc = line[start:end]
npts = int(vals[3])
coil = dict(coil_type=vals[1], coil_class=vals[0], desc=desc,
accuracy=vals[2], size=vals[4], base=vals[5])
# get parameters of each component
rmag = list()
cosmag = list()
w = list()
for p in range(npts):
# get next non-comment line
line = lines.pop()
while(line[0] == '#'):
line = lines.pop()
vals = np.fromstring(line, sep=' ')
assert len(vals) == 7
# Read and verify data for each integration point
w.append(vals[0])
rmag.append(vals[[1, 2, 3]])
cosmag.append(vals[[4, 5, 6]])
w = np.array(w)
rmag = np.array(rmag)
cosmag = np.array(cosmag)
size = np.sqrt(np.sum(cosmag ** 2, axis=1))
if np.any(np.sqrt(np.sum(rmag ** 2, axis=1)) > big_val):
raise RuntimeError('Unreasonable integration point')
if np.any(size <= 0):
raise RuntimeError('Unreasonable normal')
cosmag /= size[:, np.newaxis]
coil.update(dict(w=w, cosmag=cosmag, rmag=rmag))
coils.append(coil)
logger.info('%d coil definitions read', len(coils))
return coils
def _create_meg_coil(coilset, ch, acc, t):
"""Create a coil definition using templates, transform if necessary"""
# Also change the coordinate frame if so desired
if t is None:
t = Transform('meg', 'meg', np.eye(4)) # identity, no change
if ch['kind'] not in [FIFF.FIFFV_MEG_CH, FIFF.FIFFV_REF_MEG_CH]:
raise RuntimeError('%s is not a MEG channel' % ch['ch_name'])
# Simple linear search from the coil definitions
for coil in coilset:
if coil['coil_type'] == (ch['coil_type'] & 0xFFFF) and \
coil['accuracy'] == acc:
break
else:
raise RuntimeError('Desired coil definition not found '
'(type = %d acc = %d)' % (ch['coil_type'], acc))
# Apply a coordinate transformation if so desired
coil_trans = np.dot(t['trans'], _loc_to_coil_trans(ch['loc']))
# Create the result
res = dict(chname=ch['ch_name'], coil_class=coil['coil_class'],
accuracy=coil['accuracy'], base=coil['base'], size=coil['size'],
type=ch['coil_type'], w=coil['w'], desc=coil['desc'],
coord_frame=t['to'], rmag=apply_trans(coil_trans, coil['rmag']),
cosmag=apply_trans(coil_trans, coil['cosmag'], False))
r0_exey = (np.dot(coil['rmag'][:, :2], coil_trans[:3, :2].T) +
coil_trans[:3, 3])
res.update(ex=coil_trans[:3, 0], ey=coil_trans[:3, 1],
ez=coil_trans[:3, 2], r0=coil_trans[:3, 3], r0_exey=r0_exey)
return res
def _create_eeg_el(ch, t=None):
"""Create an electrode definition, transform coords if necessary"""
if ch['kind'] != FIFF.FIFFV_EEG_CH:
raise RuntimeError('%s is not an EEG channel. Cannot create an '
'electrode definition.' % ch['ch_name'])
if t is None:
t = Transform('head', 'head', np.eye(4)) # identity, no change
if t.from_str != 'head':
raise RuntimeError('Inappropriate coordinate transformation')
r0ex = _loc_to_eeg_loc(ch['loc'])
if r0ex.shape[1] == 1: # no reference
w = np.array([1.])
else: # has reference
w = np.array([1., -1.])
# Optional coordinate transformation
r0ex = apply_trans(t['trans'], r0ex.T)
# The electrode location
cosmag = r0ex.copy()
_normalize_vectors(cosmag)
res = dict(chname=ch['ch_name'], coil_class=FIFF.FWD_COILC_EEG, w=w,
accuracy=_accuracy_dict['normal'], type=ch['coil_type'],
coord_frame=t['to'], rmag=r0ex, cosmag=cosmag)
return res
def _create_meg_coils(chs, acc=None, t=None, coilset=None):
"""Create a set of MEG coils in the head coordinate frame"""
acc = _accuracy_dict[acc] if isinstance(acc, string_types) else acc
coilset = _read_coil_defs(verbose=False) if coilset is None else coilset
coils = [_create_meg_coil(coilset, ch, acc, t) for ch in chs]
return coils
def _create_eeg_els(chs):
"""Create a set of EEG electrodes in the head coordinate frame"""
return [_create_eeg_el(ch) for ch in chs]
@verbose
def _setup_bem(bem, bem_extra, neeg, mri_head_t, verbose=None):
"""Set up a BEM for forward computation"""
logger.info('')
if isinstance(bem, string_types):
logger.info('Setting up the BEM model using %s...\n' % bem_extra)
bem = read_bem_solution(bem)
if not isinstance(bem, ConductorModel):
raise TypeError('bem must be a string or ConductorModel')
if bem['is_sphere']:
logger.info('Using the sphere model.\n')
if len(bem['layers']) == 0:
raise RuntimeError('Spherical model has zero layers')
if bem['coord_frame'] != FIFF.FIFFV_COORD_HEAD:
raise RuntimeError('Spherical model is not in head coordinates')
else:
if neeg > 0 and len(bem['surfs']) == 1:
raise RuntimeError('Cannot use a homogeneous model in EEG '
'calculations')
logger.info('Employing the head->MRI coordinate transform with the '
'BEM model.')
# fwd_bem_set_head_mri_t: Set the coordinate transformation
bem['head_mri_t'] = _ensure_trans(mri_head_t, 'head', 'mri')
logger.info('BEM model %s is now set up' % op.split(bem_extra)[1])
logger.info('')
return bem
@verbose
def _prep_meg_channels(info, accurate=True, exclude=(), ignore_ref=False,
elekta_defs=False, head_frame=True, verbose=None):
"""Prepare MEG coil definitions for forward calculation
Parameters
----------
info : instance of Info
The measurement information dictionary
accurate : bool
If true (default) then use `accurate` coil definitions (more
integration points)
exclude : list of str | str
List of channels to exclude. If 'bads', exclude channels in
info['bads']
ignore_ref : bool
If true, ignore compensation coils
elekta_defs : bool
If True, use Elekta's coil definitions, which use different integration
point geometry. False by default.
head_frame : bool
If True (default), use head frame coords. Otherwise, use device frame.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Defaults to raw.verbose.
Returns
-------
megcoils : list of dict
Information for each prepped MEG coil
compcoils : list of dict
Information for each prepped MEG coil
megnames : list of str
Name of each prepped MEG coil
meginfo : Info
Information subselected for just the set of MEG coils
"""
accuracy = 'accurate' if accurate else 'normal'
info_extra = 'info'
meg_info = None
megnames, megcoils, compcoils = [], [], []
# Find MEG channels
picks = pick_types(info, meg=True, eeg=False, ref_meg=False,
exclude=exclude)
# Make sure MEG coils exist
nmeg = len(picks)
if nmeg <= 0:
raise RuntimeError('Could not find any MEG channels')
# Get channel info and names for MEG channels
megchs = pick_info(info, picks)['chs']
megnames = [info['ch_names'][p] for p in picks]
logger.info('Read %3d MEG channels from %s'
% (len(picks), info_extra))
# Get MEG compensation channels
if not ignore_ref:
picks = pick_types(info, meg=False, ref_meg=True, exclude=exclude)
ncomp = len(picks)
if (ncomp > 0):
compchs = pick_info(info, picks)['chs']
logger.info('Read %3d MEG compensation channels from %s'
% (ncomp, info_extra))
# We need to check to make sure these are NOT KIT refs
if _has_kit_refs(info, picks):
raise NotImplementedError(
'Cannot create forward solution with KIT reference '
'channels. Consider using "ignore_ref=True" in '
'calculation')
else:
ncomp = 0
# Make info structure to allow making compensator later
ncomp_data = len(info['comps'])
ref_meg = True if not ignore_ref else False
picks = pick_types(info, meg=True, ref_meg=ref_meg, exclude=exclude)
meg_info = pick_info(info, picks) if nmeg > 0 else None
# Create coil descriptions with transformation to head or device frame
templates = _read_coil_defs(elekta_defs=elekta_defs)
if head_frame:
_print_coord_trans(info['dev_head_t'])
transform = info['dev_head_t']
else:
transform = None
megcoils = _create_meg_coils(megchs, accuracy, transform, templates)
if ncomp > 0:
logger.info('%d compensation data sets in %s' % (ncomp_data,
info_extra))
compcoils = _create_meg_coils(compchs, 'normal', transform, templates)
# Check that coordinate frame is correct and log it
if head_frame:
assert megcoils[0]['coord_frame'] == FIFF.FIFFV_COORD_HEAD
logger.info('MEG coil definitions created in head coordinates.')
else:
assert megcoils[0]['coord_frame'] == FIFF.FIFFV_COORD_DEVICE
logger.info('MEG coil definitions created in device coordinate.')
return megcoils, compcoils, megnames, meg_info
@verbose
def _prep_eeg_channels(info, exclude=(), verbose=None):
"""Prepare EEG electrode definitions for forward calculation
Parameters
----------
info : instance of Info
The measurement information dictionary
exclude : list of str | str
List of channels to exclude. If 'bads', exclude channels in
info['bads']
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Defaults to raw.verbose.
Returns
-------
eegels : list of dict
Information for each prepped EEG electrode
eegnames : list of str
Name of each prepped EEG electrode
"""
eegnames, eegels = [], []
info_extra = 'info'
# Find EEG electrodes
picks = pick_types(info, meg=False, eeg=True, ref_meg=False,
exclude=exclude)
# Make sure EEG electrodes exist
neeg = len(picks)
if neeg <= 0:
raise RuntimeError('Could not find any EEG channels')
# Get channel info and names for EEG channels
eegchs = pick_info(info, picks)['chs']
eegnames = [info['ch_names'][p] for p in picks]
logger.info('Read %3d EEG channels from %s' % (len(picks), info_extra))
# Create EEG electrode descriptions
eegels = _create_eeg_els(eegchs)
logger.info('Head coordinate coil definitions created.')
return eegels, eegnames
@verbose
def _prepare_for_forward(src, mri_head_t, info, bem, mindist, n_jobs,
bem_extra='', trans='', info_extra='',
meg=True, eeg=True, ignore_ref=False, fname=None,
overwrite=False, verbose=None):
"""Helper to prepare for forward computation"""
# Read the source locations
logger.info('')
# let's make a copy in case we modify something
src = _ensure_src(src).copy()
nsource = sum(s['nuse'] for s in src)
if nsource == 0:
raise RuntimeError('No sources are active in these source spaces. '
'"do_all" option should be used.')
logger.info('Read %d source spaces a total of %d active source locations'
% (len(src), nsource))
# Delete some keys to clean up the source space:
for key in ['working_dir', 'command_line']:
if key in src.info:
del src.info[key]
# Read the MRI -> head coordinate transformation
logger.info('')
_print_coord_trans(mri_head_t)
# make a new dict with the relevant information
arg_list = [info_extra, trans, src, bem_extra, fname, meg, eeg,
mindist, overwrite, n_jobs, verbose]
cmd = 'make_forward_solution(%s)' % (', '.join([str(a) for a in arg_list]))
mri_id = dict(machid=np.zeros(2, np.int32), version=0, secs=0, usecs=0)
info = Info(nchan=info['nchan'], chs=info['chs'], comps=info['comps'],
ch_names=info['ch_names'], dev_head_t=info['dev_head_t'],
mri_file=trans, mri_id=mri_id, meas_file=info_extra,
meas_id=None, working_dir=os.getcwd(),
command_line=cmd, bads=info['bads'], mri_head_t=mri_head_t)
logger.info('')
megcoils, compcoils, megnames, meg_info = [], [], [], []
eegels, eegnames = [], []
if meg and len(pick_types(info, ref_meg=False, exclude=[])) > 0:
megcoils, compcoils, megnames, meg_info = \
_prep_meg_channels(info, ignore_ref=ignore_ref)
if eeg and len(pick_types(info, meg=False, eeg=True, ref_meg=False,
exclude=[])) > 0:
eegels, eegnames = _prep_eeg_channels(info)
# Check that some channels were found
if len(megcoils + eegels) == 0:
raise RuntimeError('No MEG or EEG channels found.')
# pick out final info
info = pick_info(info, pick_types(info, meg=meg, eeg=eeg, ref_meg=False,
exclude=[]))
# Transform the source spaces into the appropriate coordinates
# (will either be HEAD or MRI)
for s in src:
transform_surface_to(s, 'head', mri_head_t)
logger.info('Source spaces are now in %s coordinates.'
% _coord_frame_name(s['coord_frame']))
# Prepare the BEM model
bem = _setup_bem(bem, bem_extra, len(eegnames), mri_head_t)
# Circumvent numerical problems by excluding points too close to the skull
if not bem['is_sphere']:
inner_skull = _bem_find_surface(bem, 'inner_skull')
_filter_source_spaces(inner_skull, mindist, mri_head_t, src, n_jobs)
logger.info('')
rr = np.concatenate([s['rr'][s['vertno']] for s in src])
# deal with free orientations:
source_nn = np.tile(np.eye(3), (len(rr), 1))
update_kwargs = dict(nchan=len(info['ch_names']), nsource=len(rr),
info=info, src=src, source_nn=source_nn,
source_rr=rr, surf_ori=False, mri_head_t=mri_head_t)
return megcoils, meg_info, compcoils, megnames, eegels, eegnames, rr, \
info, update_kwargs, bem
@verbose
def make_forward_solution(info, trans, src, bem, fname=None, meg=True,
eeg=True, mindist=0.0, ignore_ref=False,
overwrite=False, n_jobs=1, verbose=None):
"""Calculate a forward solution for a subject
Parameters
----------
info : instance of mne.io.meas_info.Info | str
If str, then it should be a filename to a Raw, Epochs, or Evoked
file with measurement information. If dict, should be an info
dict (such as one from Raw, Epochs, or Evoked).
trans : dict | str | None
Either a transformation filename (usually made using mne_analyze)
or an info dict (usually opened using read_trans()).
If string, an ending of `.fif` or `.fif.gz` will be assumed to
be in FIF format, any other ending will be assumed to be a text
file with a 4x4 transformation matrix (like the `--trans` MNE-C
option). Can be None to use the identity transform.
src : str | instance of SourceSpaces
If string, should be a source space filename. Can also be an
instance of loaded or generated SourceSpaces.
bem : dict | str
Filename of the BEM (e.g., "sample-5120-5120-5120-bem-sol.fif") to
use, or a loaded sphere model (dict).
fname : str | None
Destination forward solution filename. If None, the solution
will not be saved.
meg : bool
If True (Default), include MEG computations.
eeg : bool
If True (Default), include EEG computations.
mindist : float
Minimum distance of sources from inner skull surface (in mm).
ignore_ref : bool
If True, do not include reference channels in compensation. This
option should be True for KIT files, since forward computation
with reference channels is not currently supported.
overwrite : bool
If True, the destination file (if it exists) will be overwritten.
If False (default), an error will be raised if the file exists.
n_jobs : int
Number of jobs to run in parallel.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
fwd : instance of Forward
The forward solution.
See Also
--------
do_forward_solution
Notes
-----
Some of the forward solution calculation options from the C code
(e.g., `--grad`, `--fixed`) are not implemented here. For those,
consider using the C command line tools or the Python wrapper
`do_forward_solution`.
"""
# Currently not (sup)ported:
# 1. --grad option (gradients of the field, not used much)
# 2. --fixed option (can be computed post-hoc)
# 3. --mricoord option (probably not necessary)
# read the transformation from MRI to HEAD coordinates
# (could also be HEAD to MRI)
mri_head_t, trans = _get_trans(trans)
bem_extra = 'dict' if isinstance(bem, dict) else bem
if fname is not None and op.isfile(fname) and not overwrite:
raise IOError('file "%s" exists, consider using overwrite=True'
% fname)
if not isinstance(info, (dict, string_types)):
raise TypeError('info should be a dict or string')
if isinstance(info, string_types):
info_extra = op.split(info)[1]
info = read_info(info, verbose=False)
else:
info_extra = 'info dict'
# Report the setup
logger.info('Source space : %s' % src)
logger.info('MRI -> head transform source : %s' % trans)
logger.info('Measurement data : %s' % info_extra)
if isinstance(bem, dict) and bem['is_sphere']:
logger.info('Sphere model : origin at %s mm'
% (bem['r0'],))
logger.info('Standard field computations')
else:
logger.info('BEM model : %s' % bem_extra)
logger.info('Accurate field computations')
logger.info('Do computations in %s coordinates',
_coord_frame_name(FIFF.FIFFV_COORD_HEAD))
logger.info('Free source orientations')
logger.info('Destination for the solution : %s' % fname)
megcoils, meg_info, compcoils, megnames, eegels, eegnames, rr, info, \
update_kwargs, bem = _prepare_for_forward(
src, mri_head_t, info, bem, mindist, n_jobs, bem_extra, trans,
info_extra, meg, eeg, ignore_ref, fname, overwrite)
del (src, mri_head_t, trans, info_extra, bem_extra, mindist,
meg, eeg, ignore_ref)
# Time to do the heavy lifting: MEG first, then EEG
coil_types = ['meg', 'eeg']
coils = [megcoils, eegels]
ccoils = [compcoils, None]
infos = [meg_info, None]
megfwd, eegfwd = _compute_forwards(rr, bem, coils, ccoils,
infos, coil_types, n_jobs)
# merge forwards
fwd = _merge_meg_eeg_fwds(_to_forward_dict(megfwd, megnames),
_to_forward_dict(eegfwd, eegnames),
verbose=False)
logger.info('')
# Don't transform the source spaces back into MRI coordinates (which is
# done in the C code) because mne-python assumes forward solution source
# spaces are in head coords.
fwd.update(**update_kwargs)
if fname is not None:
logger.info('writing %s...', fname)
write_forward_solution(fname, fwd, overwrite, verbose=False)
logger.info('Finished.')
return fwd
def _to_forward_dict(fwd, names, fwd_grad=None,
coord_frame=FIFF.FIFFV_COORD_HEAD,
source_ori=FIFF.FIFFV_MNE_FREE_ORI):
"""Convert forward solution matrices to dicts"""
assert names is not None
if len(fwd) == 0:
return None
sol = dict(data=fwd.T, nrow=fwd.shape[1], ncol=fwd.shape[0],
row_names=names, col_names=[])
fwd = Forward(sol=sol, source_ori=source_ori, nsource=sol['ncol'],
coord_frame=coord_frame, sol_grad=None,
nchan=sol['nrow'], _orig_source_ori=source_ori,
_orig_sol=sol['data'].copy(), _orig_sol_grad=None)
if fwd_grad is not None:
sol_grad = dict(data=fwd_grad.T, nrow=fwd_grad.shape[1],
ncol=fwd_grad.shape[0], row_names=names,
col_names=[])
fwd.update(dict(sol_grad=sol_grad),
_orig_sol_grad=sol_grad['data'].copy())
return fwd
| |
"""
Django settings for scale_test project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
import os
import scale
import sys
import dj_database_url
def get_env_boolean(variable_name, default=False):
return os.getenv(variable_name, str(default)).lower() in ('yes', 'true', 't', '1')
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Project version
VERSION = scale.__version__
DOCKER_VERSION = scale.__docker_version__
# Mesos connection information. Default for -m
# This can be something like "127.0.0.1:5050"
# or a zookeeper url like 'zk://host1:port1,host2:port2,.../path`
MESOS_MASTER = os.getenv('MESOS_MASTER', 'zk://leader.mesos:2181/mesos')
# We by default, use the '*' role, meaning all resources are unreserved offers are received
# By default, use the '*' role, meaning all resources are unreserved offers are received
MESOS_ROLE = os.getenv('MESOS_ROLE', '*')
# Used to set the user that Mesos tasks are launched by Docker. This should NEVER be set to root
# and must be a user name NOT a Linux UID. Mesos chokes on UIDs.
CONTAINER_PROCESS_OWNER = os.getenv('CONTAINER_PROCESS_OWNER', 'nobody')
# By default, the accepted resources match reservations to the MESOS_ROLE
ACCEPTED_RESOURCE_ROLE = os.getenv('ACCEPTED_RESOURCE_ROLE', MESOS_ROLE)
# By default, all API calls require authentication.
PUBLIC_READ_API = get_env_boolean('PUBLIC_READ_API')
# Placeholder for service secret that will be overridden in local_settings_docker
SERVICE_SECRET = None
# Zookeeper URL for scheduler leader election. If this is None, only a single scheduler is used.
SCHEDULER_ZK = None
# The full name for the Scale Docker image (without version tag)
SCALE_DOCKER_IMAGE = 'geoint/scale'
# The location of the config file containing Docker credentials
# The URI value should point to an externally hosted location such as a webserver or hosted S3 bucket.
# The value will be an http URL such as 'http://static.mysite.com/foo/.dockercfg'
CONFIG_URI = None
# Directory for rotating metrics storage
METRICS_DIR = None
# fluentd warning levels, or -1 to disable warnings
FLUENTD_BUFFER_WARN = int(os.environ.get('FLUENTD_BUFFER_WARN', -1))
FLUENTD_BUFFER_SIZE_WARN = int(os.environ.get('FLUENTD_BUFFER_SIZE_WARN', -1))
# URL for fluentd, or None to disable fluentd
LOGGING_ADDRESS = os.environ.get('LOGGING_ADDRESS')
LOGGING_HEALTH_ADDRESS = os.environ.get('LOGGING_HEALTH_ADDRESS')
# Base URL of elasticsearch nodes
ELASTICSEARCH_URL = os.environ.get('ELASTICSEARCH_URL')
# Placeholder for elasticsearch version. Supplied in production by local_settings_docker.py
ELASTICSEARCH_VERSION = None
# Placeholder for Elasticsearch object. Needed for unit tests.
ELASTICSEARCH = None
DATABASE_URL = os.getenv('DATABASE_URL')
#root url for scale installation
SCALE_VHOST = os.getenv('SCALE_VHOST', 'localhost:8000')
# Broker URL for connection to messaging backend
BROKER_URL = 'amqp://guest:guest@localhost:5672//'
QUEUE_NAME = 'scale-command-messages'
MESSSAGE_QUEUE_DEPTH_WARN = int(os.environ.get('MESSSAGE_QUEUE_DEPTH_WARN', -1))
# Queue limit
SCHEDULER_QUEUE_LIMIT = int(os.environ.get('SCHEDULER_QUEUE_LIMIT', 500))
# The max number of times the scheduler will try to reconnect to
# mesos if disconnected.
SCHEDULER_MAX_RECONNECT = int(os.environ.get('SCHEDULER_MAX_RECONNECT', 3))
# Base URL of vault or DCOS secrets store, or None to disable secrets
SECRETS_URL = None
# Public token if DCOS secrets store, or privleged token for vault
SECRETS_TOKEN = None
# DCOS service account name, or None if not DCOS secrets store
DCOS_SERVICE_ACCOUNT = None
# Flag for raising SSL warnings associated with secrets transactions.
SECRETS_SSL_WARNINGS = True
# SECURITY WARNING: keep the secret key used in production secret!
INSECURE_DEFAULT_KEY = 'this-key-is-insecure-and-should-never-be-used-in-production'
SECRET_KEY = INSECURE_DEFAULT_KEY
# Used to write the superuser password
MESOS_SANDBOX = os.getenv('MESOS_SANDBOX')
# Security settings for production
SECURE_CONTENT_TYPE_NOSNIFF = True
SECURE_BROWSER_XSS_FILTER = True
SESSION_COOKIE_SECURE = get_env_boolean('SESSION_COOKIE_SECURE', True)
X_FRAME_OPTIONS = 'DENY'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
# authentication toggle, to be used for testing
AUTHENTICATION_ENABLED = get_env_boolean('AUTHENTICATION_ENABLED', True)
ALLOWED_HOSTS = ['localhost', '127.0.0.1']
# used primarily by debug-toolbar to dictate what client url has access
if os.environ.get('INTERNAL_IP'):
INTERNAL_IPS = [os.environ.get('INTERNAL_IP')]
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.gis',
'rest_framework',
'rest_framework.authtoken',
'debug_toolbar',
###############
# Social Auth #
###############
'oauth2_provider',
'social_django',
'rest_framework_social_oauth2',
# Scale apps
'accounts',
'batch',
'cli',
'data',
'diagnostic',
'error',
'ingest',
'job',
'mesos_api',
'messaging',
'metrics',
'node',
'product',
'queue',
'recipe',
'scheduler',
'shared_resource',
'source',
'storage',
'trigger',
'util',
'vault'
)
MIDDLEWARE = [
'debug_toolbar.middleware.DebugToolbarMiddleware',
'util.middleware.MultipleProxyMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'util.middleware.ExceptionLoggingMiddleware',
]
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'debug': False,
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
###############
# Social Auth #
###############
'social_django.context_processors.backends',
'social_django.context_processors.login_redirect',
],
},
},
]
AUTHENTICATION_BACKENDS = [
'django.contrib.auth.backends.ModelBackend',
]
REST_FRAMEWORK = {
'DEFAULT_FILTER_BACKENDS': (
'django_filters.rest_framework.DjangoFilterBackend',
),
'DEFAULT_PAGINATION_CLASS': 'util.rest.DefaultPagination',
'DEFAULT_RENDERER_CLASSES': (
'rest_framework.renderers.JSONRenderer',
'rest_framework.renderers.BrowsableAPIRenderer',
'rest_framework.renderers.AdminRenderer',
),
'ALLOWED_VERSIONS': ('v6', 'v7'),
'DEFAULT_VERSION': 'v6',
'DEFAULT_VERSIONING_CLASS': 'rest_framework.versioning.NamespaceVersioning',
}
if AUTHENTICATION_ENABLED:
REST_FRAMEWORK['DEFAULT_AUTHENTICATION_CLASSES'] = (
'rest_framework.authentication.SessionAuthentication',
'rest_framework.authentication.TokenAuthentication',
###############
# Social Auth #
###############
'oauth2_provider.contrib.rest_framework.OAuth2Authentication',
'rest_framework_social_oauth2.authentication.SocialAuthentication',
)
REST_FRAMEWORK['DEFAULT_PERMISSION_CLASSES'] = (
'util.rest.ScaleAPIPermissions',
)
else:
REST_FRAMEWORK['DEFAULT_AUTHENTICATION_CLASSES'] = ()
REST_FRAMEWORK['DEFAULT_PERMISSION_CLASSES'] = ()
REST_FRAMEWORK['UNAUTHENTICATED_USER'] = None
ROOT_URLCONF = 'scale.urls'
WSGI_APPLICATION = 'scale.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': dj_database_url.config(default='sqlite://%s' % os.path.join(BASE_DIR, 'db.sqlite3'))
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
LOGIN_REDIRECT_URL = '/'
#############################
# GEOAxIS specific settings #
#############################
SOCIAL_AUTH_NEW_USER_REDIRECT_URL = '/'
# Redirect after directly hitting login endpoint
SOCIAL_AUTH_LOGIN_REDIRECT_URL = '/'
DEFAULT_AUTH_PIPELINE = (
'social_core.pipeline.social_auth.social_details',
'social_core.pipeline.social_auth.social_uid',
'social_core.pipeline.social_auth.auth_allowed',
'social_core.pipeline.social_auth.social_user',
'social_core.pipeline.user.get_username',
'social_core.pipeline.mail.mail_validation',
'social_core.pipeline.social_auth.associate_by_email',
'social_core.pipeline.user.create_user',
'social_core.pipeline.social_auth.associate_user',
'social_core.pipeline.social_auth.load_extra_data',
'social_core.pipeline.user.user_details'
)
SOCIAL_AUTH_GEOAXIS_KEY = os.getenv('GEOAXIS_KEY')
SOCIAL_AUTH_GEOAXIS_SECRET = os.getenv('GEOAXIS_SECRET')
SOCIAL_AUTH_GEOAXIS_HOST = os.getenv('GEOAXIS_HOST', 'geoaxis.gxaccess.com')
OAUTH_GEOAXIS_USER_FIELDS = os.getenv(
'GEOAXIS_USER_FIELDS', 'username, email, last_name, first_name')
SOCIAL_AUTH_GEOAXIS_USER_FIELDS = map(
str.strip, OAUTH_GEOAXIS_USER_FIELDS.split(','))
OAUTH_GEOAXIS_SCOPES = os.getenv('GEOAXIS_SCOPES', 'UserProfile.me')
SOCIAL_AUTH_GEOAXIS_SCOPE = map(str.strip, OAUTH_GEOAXIS_SCOPES.split(','))
# GeoAxisOAuth2 will cause all login attempt to fail if
# SOCIAL_AUTH_GEOAXIS_HOST is None
GEOAXIS_ENABLED = False
if SOCIAL_AUTH_GEOAXIS_KEY and len(SOCIAL_AUTH_GEOAXIS_KEY) > 0:
GEOAXIS_ENABLED = True
AUTHENTICATION_BACKENDS += (
'django_geoaxis.backends.geoaxis.GeoAxisOAuth2',
)
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = 'static/'
STATICFILES_DIRS = ()
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
# Logging configuration
LOG_DIR = os.path.join(BASE_DIR, 'logs')
if not os.path.exists(LOG_DIR):
os.makedirs(LOG_DIR)
LOG_NAME = 'scale'
LOG_FORMATTERS = {
'standard': {
'format': ('%(asctime)s %(levelname)s ' +
'[%(name)s(%(lineno)s)] %(message)s'),
'datefmt': '%Y-%m-%d %H:%M:%S',
},
'db-standard': {
'format': ('[%(name)s(%(lineno)s)] %(message)s'),
}
}
LOG_FILTERS = {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
},
'debug_info_only':{
'()':'scale.custom_logging.UserFilter',
}
}
LOG_HANDLERS = {
'null': {
'level': 'DEBUG',
'class': 'logging.NullHandler',
},
'mesoshttp' : {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'standard',
'stream': sys.stdout
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'standard',
'stream': sys.stdout
},
'console-stderr': {
'level': 'WARNING',
'class': 'logging.StreamHandler',
'formatter': 'standard',
'stream': sys.stderr
},
'console-stdout': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'standard',
'stream': sys.stdout,
'filters':['debug_info_only']
},
'log-db': {
'level': 'WARNING',
'class': 'error.handlers.DatabaseLogHandler',
'formatter': 'db-standard',
'model': 'error.models.LogEntry',
},
}
LOG_CONSOLE_DEBUG = {
'version': 1,
'formatters': LOG_FORMATTERS,
'filters': LOG_FILTERS,
'handlers': LOG_HANDLERS,
'loggers': {
'': {
'handlers': ['console-stdout','console-stderr'],
'level': 'DEBUG',
},
},
}
LOG_CONSOLE_INFO = {
'version': 1,
'formatters': LOG_FORMATTERS,
'filters': LOG_FILTERS,
'handlers': LOG_HANDLERS,
'loggers': {
'': {
'handlers': ['console-stdout','console-stderr'],
'level': 'INFO',
},
},
}
LOG_CONSOLE_ERROR = {
'version': 1,
'formatters': LOG_FORMATTERS,
'filters': LOG_FILTERS,
'handlers': LOG_HANDLERS,
'loggers': {
'': {
'handlers': ['console-stderr'],
'level': 'ERROR',
},
},
}
LOG_CONSOLE_WARNING = {
'version': 1,
'formatters': LOG_FORMATTERS,
'filters': LOG_FILTERS,
'handlers': LOG_HANDLERS,
'loggers': {
'': {
'handlers': ['console-stderr'],
'level': 'WARNING',
},
},
}
LOG_CONSOLE_CRITICAL = {
'version': 1,
'formatters': LOG_FORMATTERS,
'filters': LOG_FILTERS,
'handlers': LOG_HANDLERS,
'loggers': {
'': {
'handlers': ['console-stderr'],
'level': 'CRITICAL',
},
},
}
LOGGING = LOG_CONSOLE_DEBUG
# Hack to fix ISO8601 for datetime filters.
# This should be taken care of by a future django fix. And might even be handled
# by a newer version of django-rest-framework. Unfortunately, both of these solutions
# will accept datetimes without timezone information which we do not want to allow
# see https://code.djangoproject.com/tickets/23448
# Solution modified from http://akinfold.blogspot.com/2012/12/datetimefield-doesnt-accept-iso-8601.html
from django.forms import fields
from util.parse import parse_datetime
fields.DateTimeField.strptime = lambda _self, datetime_string, _format: parse_datetime(datetime_string)
| |
"""
A module for generic classification purpose.
Funtionality include:
normalize_l2norm: Normalize each row has unit l_2 norm.
normalize_mean0std1: Normalize each feature to have mean 0 and std 1.
balance_sample_size: Balance sample size of a data set among classes.
change_class_labels: Change class labels to {0,1,2,3,...,C-1}.
change_class_labels_to_given: Change original class labels to a given labels.
merge_class_labels: Merge class labels into several super groups/classes.
take_some_classes: Only take sevaral classes, and remove the rest.
partition_train_valid_test: Partition the whole data into training, validation, and test sets.
reduce_sample_size: Reduce sample by to 1/times.
perform: Compute the classification performance.
write_feature_weight: Write the input layer weights to a file.
Only applicable to deep feature selection.
write_feature_weight2: Write the input layer weights and other information to a file.
Only applicable to deep feature selection.
Yifeng Li
CMMT, UBC, Vancouver
Sep 23, 2014
Contact: yifeng.li.cn@gmail.com
"""
from __future__ import division
import numpy as np
from sklearn import cross_validation
import math
def normalize_l2norm(data):
"""
Normalize each row has unit l_2 norm.
INPUTS:
data: numpy 2d array or matrix, each row should be a sample.
OUTPUTS:
data: numpy 2d array or matrix, normalized data.
Example:
data=[[3,5,7,9],[3.0,2,1.1,8.4],[5.9,9,8,10]]
data=np.array(data)
data_normalized=normalize_l2norm(data)
print data_normalized
"""
data_sqrt=np.sqrt(np.square(data).sum(axis=1))
data_sqrt.shape=(data_sqrt.shape[0],1)
tol=2**-30
data=(data+tol)/(data_sqrt+tol)
return data
def normalize_mean0std1(data,data_mean=None,data_std=None):
"""
Normalize each feature to have mean 0 and std 1.
INPUTS:
data: numpy 2d array or matrix, each row should be a sample.
data_mean: numpy 1d array or vector, the given means of samples, useful for normalize test data.
data_std: numpy 1d array or vector, the given standard deviation of samples, useful for normalize test data.
OUTPUTS:
data: numpy 2d array or matrix, normalized data.
data_mean: numpy 1d array or vector, the given means of samples, useful for normalize test data.
data_std: numpy 1d array or vector, the given standard deviation of samples, useful for normalize test data.
"""
if data_mean is None:
data_mean=np.mean(data,axis=0)
data_mean.reshape((1,data_mean.shape[0]))
if data_std is None:
data_std=np.std(data,axis=0)
data_std.reshape((1,data_std.shape[0]))
tol=1e-16
return (data-data_mean)/(data_std+tol),data_mean,data_std
def balance_sample_size(data,classes,others=None,min_size_given=None,rng=np.random.RandomState(100)):
"""
Balance sample size of a data set among classes.
INPUTS:
data: numpy 2d array or matrix, each row should be a sample.
classes: numpy 1d array or vector, class labels.
others: numpy 2d array or matrix, extra information of samples if available,
each row should associated to a row of data.
min_size_given: int, the size of each class wanted.
rng: numpy random state.
OUTPUTS:
data: numpy 2d array or matrix, each row should be a sample, balanced data.
classes: numpy 1d array or vector, balanced class labels.
others: numpy 2d array or matrix, balanced other information.
Example:
data=[[1,1,1],[2,2,2],[3,3,3],[4,4,4],[5,5,5],[6,6,6],[7,7,7]]
data=np.array(data)
classes=np.array(['zz','xx','xx','yy','zz','yy','xx'])
balance_sample_size(data,classes)
"""
u, indices = np.unique(classes,return_inverse=True)
indices=np.asarray(indices)
num_u=len(u)
sample_sizes=[]
# get sample size of each class
for i in xrange(num_u):
sample_size_this=np.sum(indices==i)
sample_sizes.append(sample_size_this)
size_min=np.amin(sample_sizes) # smallest sample size
if min_size_given and size_min>min_size_given:
size_min=min_size_given
indices_all=np.array([],dtype=indices.dtype)
indices_range=np.array(range(len(indices)))
for i in xrange(num_u):
ind_this_num=indices_range[indices==i]
ind_this_reduced=ind_this_num[rng.choice(len(ind_this_num),size=size_min,replace=False)]
indices_all=np.append(indices_all,ind_this_reduced)
# reduce the data
data=data[indices_all]
classes=classes[indices_all]
if others:
others=others[indices_all]
return data,classes,others
def change_class_labels(classes):
"""
Change class labels to {0,1,2,3,...,C-1}.
INPUTS:
classes: numpy 1d array or vector, the original class labels.
OUTPUTS:
u: numpy 1d array or vector, the unique class labels of the original class labels.
indices: numpy 1d array or vector, the new class labels from {0,1,2,3,...,C-1}.
Example:
classes=['c2','c3','c2','c1','c2','c1','c3','c2']
change_class_labels(classes)
Yifeng Li, in UBC
Aug 22, 2014.
"""
u,indices=np.unique(classes,return_inverse=True)
return u,indices
def change_class_labels_to_given(classes,given):
"""
Change original class labels to a given labels.
INPUTS:
classes: numpy 1 d array or vector, the original class labels.
given: dic, pairs of old and new labels.
OUTPUTS:
classes_new: numpy 1 d array or vector, changed class labels.
Example:
classes=[1,2,0,0,2,1,1,2]
given={1:"class1", 2:"class2", 0:"class0"}
change_class_labels_to_given(classes,given)
"""
classes=np.asarray(classes)
classes_new=np.zeros(classes.shape,dtype=object)
for i in given:
classes_new[classes==i]=given[i]
return classes_new
def merge_class_labels(classes,group):
"""
Merge class labels into several super groups/classes.
INPUTS:
classes: numpy 1 d array or vector, the original class labels.
group: tuple of tuples or lists,
group[i] indicates which original classes to be merged to the i-th super class.
OUTPUTS:
classes_merged: numpy 1 d array or vector, the merged class labels.
If original labels are strings, they are concatenated by "+".
If original lables are numbers, they are renumbered starting from 0.
Example
classes=[0,3,4,2,1,3,3,2,4,1,1,0,0,1,2,3,4,1]
group=([0],[1,2],[3,4])
merge_class_labels(classes,group)
classes=['c2','c1','c0','c0','c1','c2','c1']
group=(['c0'],['c1','c2'])
merge_class_labels(classes,group)
"""
classes=np.asarray(classes)
if (classes.dtype != int) and (classes.dtype != 'int64') and (classes.dtype != 'int32'):
classes_merged=np.zeros(classes.shape,dtype=object)
for subgroup in group:
subgroup_label='+'.join(subgroup)
for member in subgroup:
classes_merged[classes==member]=subgroup_label
else: # int class labels
classes_merged=np.zeros(classes.shape,dtype=int)
for i in range(len(group)):
subgroup=group[i]
for member in subgroup:
classes_merged[classes==member]=i
return classes_merged
def take_some_classes(data,classes,given):
"""
Only take sevaral classes, and remove the rest.
INPUTS:
data: numpy 2d array or matrix, each row is a sample, the original data.
classes: numpy 1d array or vector, class labels, the original labels.
given: numpy 1d array or vector, indicates which classes to be taken.
OUTPUTS:
data: numpy 2d array or matrix, each row is a sample, the taken data.
classes: numpy 1d array or vector, class labels, the taken labels.
"""
classes=np.asarray(classes)
log_ind=np.zeros(classes.shape,dtype=bool)
for i in range(len(given)):
log_ind[classes==given[i]]=True
classes=classes[log_ind]
data=data[log_ind]
return data,classes
def partition_train_valid_test(data,classes,ratio=(1,1,1)):
"""
Partition the whole data into training, validation, and test sets.
INPUTS:
data: numpy 2d array or matrix, each row is a sample, the original data.
classes: numpy 1d array or vector, class labels, the original labels.
ratio, int tuple or list of length 3, (ratio_of_train_set,ratio_of_valid_set,ratio_test_set).
OUTPUTS:
train_set_x: data of training set.
train_set_y: class labels of training set.
valid_set_x: data of validation set.
valid_set_y: class labels of validation set.
test_set_x: data of test set.
test_set_y: class labels of test set.
Example:
data=np.random.random((20,3))
classes=np.array([0,2,2,2,0,0,1,1,0,0,0,2,2,2,0,0,1,1,0,0],dtype=int)
train_set_x,train_set_y,valid_set_x,valid_set_y,test_set_x,test_set_y \
=partition_train_valid_test(data,classes,ratio=(2,1,1))
Yifeng Li, in UBC.
August 22, 2014.
"""
k=sum(ratio) # ratio must be a vector of integers
skf = cross_validation.StratifiedKFold(classes, n_folds=k)
train_ind=np.array([],dtype=int)
valid_ind=np.array([],dtype=int)
test_ind=np.array([],dtype=int)
count=0
for (tr,te) in skf:
if count<ratio[0]:
train_ind=np.append(train_ind,te)
count=count+1
continue
if count>=ratio[0] and count <ratio[0]+ratio[1]:
valid_ind=np.append(valid_ind,[te])
count=count+1
continue
if count>=ratio[0]+ratio[1]:
test_ind=np.append(test_ind,[te])
count=count+1
continue
train_set_x=data[train_ind]
train_set_y=classes[train_ind]
valid_set_x=data[valid_ind]
valid_set_y=classes[valid_ind]
test_set_x=data[test_ind]
test_set_y=classes[test_ind]
return train_set_x,train_set_y,valid_set_x,valid_set_y,test_set_x,test_set_y
def perform(y,y_predicted,unique_classes):
"""
Compute the classification performance.
INPUTS:
y: numpy 1d array or vector, the actual class labels.
y_predicted: numpy 1d array or vector, the predicted class labels.
unique_classes: numpy 1d array or vector of length C (# classes), all unique actual class labels.
OUTPUTS:
perf: numpy 1d array or vector of length C+2,
[acc_0, acc_1, acc_{C-1}, accuracy, balanced accuracy].
confusion_matrix: numpy 2d array of size C X C, confusion matrix.
Example:
y=[0,0,1,1,1,2,2,2,2]
y_predicted=[0,1,1,1,2,2,2,0,1]
perform(y,y_predicted,[0,1,2])
Yifeng Li, in UBC.
August 23, 2014.
"""
y=np.asarray(y,dtype=int)
y_predicted=np.asarray(y_predicted,dtype=int)
numcl=len(unique_classes)
confusion_matrix=np.zeros((numcl,numcl),dtype=float)
for i in xrange(len(y)):
confusion_matrix[y[i],y_predicted[i]]=confusion_matrix[y[i],y_predicted[i]]+1
perf=np.zeros((numcl+2,)) # acc_0,acc_1,...,acc_C-1, acc, BACC
perf[0:numcl]=confusion_matrix.diagonal()/confusion_matrix.sum(axis=1)
perf[numcl]=confusion_matrix.diagonal().sum()/confusion_matrix.sum(axis=1).sum()
perf[numcl+1]=np.mean(perf[0:numcl])
return perf,confusion_matrix
def change_max_num_epoch_change_learning_rate(max_num_epoch_change_learning_rate,max_num_epoch_change_rate):
max_num_epoch_change_learning_rate= int(math.ceil(max_num_epoch_change_rate * max_num_epoch_change_learning_rate))
return max_num_epoch_change_learning_rate
def drange(start, stop, step):
values=[]
r = start
while r <= stop:
values.append(r)
r += step
return values
def write_feature_weight(weights,features,lambda1s,filename):
"""
Write the input layer weights to a file.
Only applicable to deep feature selection.
INPUTS:
weights: numpy 2d array or matrix,
rows corresponding to values of lambda1,
columns corresponding to features.
features: numpy 1d array or vector, name of features.
lambda1s: numpy 1d array or vector, values of lambda1.
filename: string, file name to be written.
OUTPUTS:
None.
"""
# example:
#weights=np.asarray([[1.1,2.2,3.4],[5.5,6.6,7.7]])
#features=np.asarray(['f1','f2','f3'],dtype=object)
#lambda1s=np.asarray([1.0,2.0])
#write_feature_weight(weights,features,lambda1s,filename='test.txt')
features=np.insert(features,0,'lambda')
weights=np.asarray(weights,dtype=object)
lambda1s=np.asanyarray(lambda1s,dtype=object)
lambda1s.resize((lambda1s.shape[0],1))
lambda1s_weights=np.hstack((lambda1s,weights))
features.resize((1,features.shape[0]))
features_lambda1s_weights=np.vstack((features,lambda1s_weights))
np.savetxt(filename,features_lambda1s_weights,fmt='%s',delimiter='\t')
def write_feature_weight2(weights=None, features=None, lambda1s=None, accuracy=None, uniqueness=False, tol=1e-4, filename='selected_features.txt'):
"""
Write the input layer weights and other information to a file.
Only applicable to deep feature selection.
INPUTS:
weights: numpy 2d array or matrix,
rows corresponding to values of lambda1,
columns corresponding to features.
features: numpy 1d array or vector, name of features.
lambda1s: numpy 1d array or vector, values of lambda1.
accuracy: numpy 1d array or vector, accuracy corresponding to each lambda1.
This parameter is optional.
uniqueness: bool, indiates if only writing unique sizes of feature subsets.
tol: threshold, weights below tol*w_max are considered to be zeros.
filename: string, file name to be written.
OUTPUTS:
The output file is arranged as [lambda,accuracy,num_selected,feature_subset,weights_of_feature_subset]
"""
weights=np.asarray(weights,dtype=float)
lambda1s=np.asarray(lambda1s,dtype=float)
num_selected=np.zeros(len(lambda1s),dtype=int) # for each lambda, save the number of selected features
features_selected=np.zeros(len(lambda1s),dtype=object)
# get the numbers of selected features
for i in range(len(lambda1s)):
w=weights[i]
w_max=np.max(abs(w))
w_min=np.min(abs(w))
if tol*w_max<=w_min: # there is no element that is much larger: either none selected, or select all
continue
selected=(abs(w)>tol*w_max)
#selected=(abs(w)>tol)
num_selected[i]=selected.sum()
feat_selected=features[selected]
w_selected=w[selected]
ind=np.argsort(abs(w_selected))
ind=ind[::-1]
feat_selected=feat_selected[ind]
features_selected[i]=','.join(feat_selected)
# take the first non-zeros
if uniqueness:
take=take_first(num_selected)
else:
take=np.ones(len(num_selected),dtype=bool)
weights_take=weights[take]
lambda1s_take=lambda1s[take]
lambda1s_take.resize((lambda1s_take.shape[0],1))
lambda1s_take.round(decimals=6)
features_take=features_selected[take]
features_take.resize((features_take.shape[0],1))
num_take=num_selected[take]
# if no subset is selected
if num_take.shape[0]==0:
return None
# if the last one is zero, then it means that all features are selected
if num_take.shape[0]>1 and num_take[-1]==0 and num_take[-2]>0:
num_take[-1]=len(features)
features_take[-1]=','.join(features)
num_take.resize((num_take.shape[0],1))
if accuracy is not None:
accuracy=np.asarray(accuracy,dtype=float)
accuracy_take=accuracy[take]
accuracy_take.resize((accuracy_take.shape[0],1))
accuracy_take.round(decimals=4)
features=np.insert(features,0,['lambda','accuracy','num_selected','feature_subset'])
features.resize((1,features.shape[0]))
data=np.hstack((lambda1s_take,accuracy_take, num_take,features_take,weights_take))
data=np.vstack((features,data))
else:
features=np.insert(features,0,['lambda','num_selected','feature_subset'])
features.resize((1,features.shape[0]))
data=np.hstack((lambda1s_take,num_take,features_take,weights_take))
data=np.vstack((features,data))
np.savetxt(filename,data,fmt='%s',delimiter='\t')
def take_first(nums):
"""
Return the first distinct nonzeros.
Yifeng Li in UBC.
Aug 30, 2014.
Example:
nums=[0,0,0,1,2,2,2,3,4,4,5,5,5,5,6,7,7,8]
take_first(nums)
"""
take=np.zeros(len(nums),dtype=bool)
if len(nums)==1:
if nums[0]!=0:
take[0]=True
return take
i=0
while i<len(nums)-1:
if nums[i]==0:
i=i+1
continue
if i==0 and nums[i]==nums[i+1]:
take[i]=True
if i>0 and nums[i-1]==0:
take[i]=True
if i==0 and nums[i] != nums[i+1]:
take[i]=True
take[i+1]=True
if nums[i] != nums[i+1]:
take[i+1]=True
i=i+1
return take
def reduce_sample_size(data,classes,times=2):
"""
Reduce sample by to 1/times.
INPUTS:
data: numpy 2d array or matrix, each row is a sample, the original data.
classes: numpy 1d array or vector, class labels, the original labels.
times: int.
OUTPUTS:
data: the reduced data.
clases: the reduced classes.
"""
data=data[range(0,data.shape[0],times)]
classes=classes[range(0,classes.shape[0],times)]
return data,classes
| |
#!/usr/bin/env python
#
# Copyright 2010 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
"""App Engine Models for Simian web application."""
import datetime
import difflib
import gc
import logging
import re
from google.appengine import runtime
from google.appengine.api import memcache
from google.appengine.api import users
from google.appengine.ext import blobstore
from google.appengine.ext import db
from google.appengine.ext import deferred
from google.appengine.runtime import apiproxy_errors
from simian.mac.common import ipcalc
from simian.mac import common
from simian.mac.common import gae_util
from simian.mac.common import util
from simian.mac.models import constants
from simian.mac.models import properties
from simian.mac.munki import plist as plist_lib
class Error(Exception):
"""Class for domain exceptions."""
class InvalidArgumentsError(Error):
"""Invalid arguments were passed."""
# The number of days a client is silent before being considered inactive.
COMPUTER_ACTIVE_DAYS = 30
# Default memcache seconds for memcache-backed datastore entities
MEMCACHE_SECS = 300
class BaseModel(db.Model):
"""Abstract base model with useful generic methods."""
@classmethod
def MemcacheAddAutoUpdateTask(cls, func, *args, **kwargs):
"""Sets a memcache auto update task.
Args:
func: str, like "MemcacheWrappedSet"
args: list, optional, arguments to function
kwargs: dict, optional, keyword arguments to function
Raises:
ValueError: func is not a function of this class or not callable
"""
if not hasattr(cls, func) or not callable(getattr(cls, func)):
raise ValueError(func)
if not hasattr(cls, '_memcache_auto_update_tasks'):
cls._memcache_auto_update_tasks = []
cls._memcache_auto_update_tasks.append((func, args, kwargs))
@classmethod
def MemcacheAutoUpdate(cls, _deferred=False):
"""Run all memcache auto updates.
Args:
_deferred: bool, whether this function has been deferred
"""
if not getattr(cls, '_memcache_auto_update_tasks', None):
return
if not _deferred:
deferred.defer(cls.MemcacheAutoUpdate, _deferred=True, _countdown=10)
return
for func, args, kwargs in getattr(cls, '_memcache_auto_update_tasks', []):
getattr(cls, func)(*args, **kwargs)
@classmethod
def DeleteMemcacheWrap(cls, key_name, prop_name=None):
"""Deletes a cached entity or property from memcache.
Args:
key_name: str key name of the entity to delete.
prop_name: optional, default None, property name to delete.
"""
if prop_name:
memcache_key = 'mwgpn_%s_%s_%s' % (cls.kind(), key_name, prop_name)
else:
memcache_key = 'mwg_%s_%s' % (cls.kind(), key_name)
memcache.delete(memcache_key)
@classmethod
def ResetMemcacheWrap(
cls, key_name, prop_name=None, memcache_secs=MEMCACHE_SECS):
"""Deletes and repopulates a cached entity or property from Datastore.
Args:
key_name: str key name of the entity to delete.
prop_name: optional, default None, property name to delete.
memcache_secs: int seconds to store in memcache; default MEMCACHE_SECS.
"""
cls.DeleteMemcacheWrap(key_name, prop_name=prop_name)
cls.MemcacheWrappedGet(
key_name, prop_name=prop_name, memcache_secs=memcache_secs)
@classmethod
def MemcacheWrappedGet(
cls, key_name, prop_name=None, memcache_secs=MEMCACHE_SECS,
retry=False):
"""Fetches an entity by key name from model wrapped by Memcache.
Args:
key_name: str key name of the entity to fetch.
prop_name: optional property name to return the value for instead of
returning the entire entity.
memcache_secs: int seconds to store in memcache; default MEMCACHE_SECS.
retry: bool, default False, if this is a retry (2nd attempt) to
MemcacheWrappedGet the entity.
Returns:
If an entity for key_name exists,
if prop_name == None returns the db.Model entity,
otherwise only returns the prop_name property value on entity.
If an entity for key_name does not exist,
returns None.
"""
output = None
if prop_name:
memcache_key = 'mwgpn_%s_%s_%s' % (cls.kind(), key_name, prop_name)
else:
memcache_key = 'mwg_%s_%s' % (cls.kind(), key_name)
cached = memcache.get(memcache_key)
if cached is None:
entity = cls.get_by_key_name(key_name)
if not entity:
return
if prop_name:
try:
output = getattr(entity, prop_name)
except AttributeError:
logging.error(
'Retrieving missing property %s on %s',
prop_name,
entity.__class__.__name__)
return
to_cache = output
else:
output = entity
to_cache = db.model_to_protobuf(entity).SerializeToString()
try:
memcache.set(memcache_key, to_cache, memcache_secs)
except ValueError, e:
logging.warning(
'MemcacheWrappedGet: failure to memcache.set(%s, ...): %s',
memcache_key, str(e))
else:
if prop_name:
output = cached
else:
try:
output = db.model_from_protobuf(cached)
except Exception, e: # pylint: disable=broad-except
# NOTE(user): I copied this exception trap style from
# google.appengine.datastore.datatstore_query. The notes indicate
# that trapping this exception by the class itself is problematic
# due to differences between the Python and SWIG'd exception
# classes.
output = None
memcache.delete(memcache_key)
if e.__class__.__name__ == 'ProtocolBufferDecodeError':
logging.warning('Invalid protobuf at key %s', key_name)
elif retry:
logging.exception('Unexpected exception in MemcacheWrappedGet')
if not retry:
return cls.MemcacheWrappedGet(
key_name, prop_name=prop_name, memcache_secs=memcache_secs,
retry=True)
else:
return cls.get_by_key_name(key_name)
return output
@classmethod
def MemcacheWrappedGetAllFilter(
cls, filters=(), limit=1000, memcache_secs=MEMCACHE_SECS):
"""Fetches all entities for a filter set, wrapped by Memcache.
Args:
filters: tuple, optional, filter arguments, e.g.
( ( "foo =", True ),
( "zoo =", 1 ), ),
limit: int, number of rows to fetch
memcache_secs: int seconds to store in memcache; default MEMCACHE_SECS.
Returns:
entities
"""
filter_str = '|'.join(map(lambda x: '_%s,%s_' % (x[0], x[1]), filters))
memcache_key = 'mwgaf_%s%s' % (cls.kind(), filter_str)
entities = memcache.get(memcache_key)
if entities is None:
query = cls.all()
for filt, value in filters:
query = query.filter(filt, value)
entities = query.fetch(limit)
memcache.set(memcache_key, entities, memcache_secs)
return entities
@classmethod
def DeleteMemcacheWrappedGetAllFilter(cls, filters=()):
"""Deletes the memcache wrapped response for this GetAllFilter.
Args:
filters: tuple, optional, filter arguments, e.g.
( ( "foo =", True ),
( "zoo =", 1 ), ),
"""
filter_str = '|'.join(map(lambda x: '_%s,%s_' % (x[0], x[1]), filters))
memcache_key = 'mwgaf_%s%s' % (cls.kind(), filter_str)
memcache.delete(memcache_key)
@classmethod
def MemcacheWrappedSet(
cls, key_name, prop_name, value, memcache_secs=MEMCACHE_SECS):
"""Sets an entity by key name and property wrapped by Memcache.
Args:
key_name: str, key name of entity to fetch
prop_name: str, property name to set with value
value: object, value to set
memcache_secs: int seconds to store in memcache; default MEMCACHE_SECS.
"""
memcache_entity_key = 'mwg_%s_%s' % (cls.kind(), key_name)
memcache_key = 'mwgpn_%s_%s_%s' % (cls.kind(), key_name, prop_name)
entity = cls.get_or_insert(key_name)
setattr(entity, prop_name, value)
entity.put()
entity_protobuf = db.model_to_protobuf(entity).SerializeToString()
memcache.set(memcache_key, value, memcache_secs)
memcache.set(memcache_entity_key, entity_protobuf, memcache_secs)
@classmethod
def MemcacheWrappedDelete(cls, key_name=None, entity=None):
"""Delete an entity by key name and clear Memcache.
Note: This only clears the entity cache. If MemcacheWrappedGet()
with a prop_name kwarg has been used, a separate cache will exist
for that property. This function will not delete that memcache.
TODO(user): If this function were actually used anywhere
we should have prop_name=[] here so that users can delete prop_name
caches too.
Args:
key_name: str, key name of entity to fetch
entity: db.Model entity
Raises:
ValueError: when neither entity nor key_name are supplied
"""
if entity:
key_name = entity.key().name()
elif key_name:
entity = cls.get_by_key_name(key_name)
else:
raise ValueError
if entity:
entity.delete()
memcache_key = 'mwg_%s_%s' % (cls.kind(), key_name)
memcache.delete(memcache_key)
def put(self, *args, **kwargs):
"""Perform datastore put operation.
Args:
args: list, optional, args to superclass put()
kwargs: dict, optional, keyword args to superclass put()
Returns:
return value from superclass put()
"""
r = super(BaseModel, self).put(*args, **kwargs)
self.MemcacheAutoUpdate()
return r
class BasePlistModel(BaseModel):
"""Base model which can easily store a utf-8 plist."""
PLIST_LIB_CLASS = plist_lib.ApplePlist
_plist = db.TextProperty() # catalog/manifest/pkginfo plist file.
def _ParsePlist(self):
"""Parses the self._plist XML into a plist_lib.ApplePlist object."""
self._plist_obj = self.PLIST_LIB_CLASS(self._plist.encode('utf-8'))
try:
self._plist_obj.Parse()
except plist_lib.PlistError, e:
logging.exception('Error parsing self._plist: %s', str(e))
self._plist_obj = None
def _GetPlist(self):
"""Returns the _plist property encoded in utf-8."""
if not hasattr(self, '_plist_obj'):
if self._plist:
self._ParsePlist()
else:
self._plist_obj = self.PLIST_LIB_CLASS('')
return self._plist_obj
def _SetPlist(self, plist):
"""Sets the _plist property.
if plist is unicode, store as is.
if plist is other, store it and attach assumption that encoding is utf-8.
therefore, the setter only accepts unicode or utf-8 str (or ascii, which
would fit inside utf-8)
Args:
plist: str or unicode XML plist.
"""
if type(plist) is unicode:
self._plist = db.Text(plist)
self._ParsePlist()
elif type(plist) is str:
self._plist = db.Text(plist, encoding='utf-8')
self._ParsePlist()
else:
self._plist_obj = plist
self._plist = db.Text(self._plist_obj.GetXml())
plist = property(_GetPlist, _SetPlist)
def _GetPlistXml(self):
"""Returns the str plist."""
return self._plist
plist_xml = property(_GetPlistXml)
def put(self, *args, **kwargs):
"""Put to Datastore.
Args:
args: list, optional, args to superclass put()
kwargs: dict, optional, keyword args to superclass put()
Returns:
return value from superclass put()
"""
if self.plist:
self._plist = self.plist.GetXml()
return super(BasePlistModel, self).put(*args, **kwargs)
class Computer(db.Model):
"""Computer model."""
# All datetimes are UTC.
active = db.BooleanProperty(default=True) # automatically set property
hostname = db.StringProperty() # i.e. user-macbook.
serial = db.StringProperty() # str serial number of the computer.
ip_address = db.StringProperty() # str ip address of last connection
uuid = db.StringProperty() # OSX or Puppet UUID; undecided.
runtype = db.StringProperty() # Munki runtype. i.e. auto, custom, etc.
preflight_datetime = db.DateTimeProperty() # last preflight execution.
postflight_datetime = db.DateTimeProperty() # last postflight execution.
last_notified_datetime = db.DateTimeProperty() # last MSU.app popup.
pkgs_to_install = db.StringListProperty() # pkgs needed to be installed.
all_apple_updates_installed = db.BooleanProperty() # True=all installed.
all_pkgs_installed = db.BooleanProperty() # True=all installed, False=not.
owner = db.StringProperty() # i.e. foouser
client_version = db.StringProperty() # i.e. 0.6.0.759.0.
os_version = db.StringProperty() # i.e. 10.5.3, 10.6.1, etc.
site = db.StringProperty() # string site or campus name. i.e. NYC.
office = db.StringProperty() # string office name. i.e. US-NYC-FOO.
# Simian track (i.e. Munki)
track = db.StringProperty() # i.e. stable, testing, unstable
# Configuration track (i.e. Puppet)
config_track = db.StringProperty() # i.e. stable, testing, unstable
# Connection dates and times.
connection_dates = db.ListProperty(datetime.datetime)
connection_datetimes = db.ListProperty(datetime.datetime)
# Counts of connections on/off corp.
connections_on_corp = db.IntegerProperty(default=0)
connections_off_corp = db.IntegerProperty(default=0)
last_on_corp_preflight_datetime = db.DateTimeProperty()
uptime = db.FloatProperty() # float seconds since last reboot.
root_disk_free = db.IntegerProperty() # int of bytes free on / partition.
user_disk_free = db.IntegerProperty() # int of bytes free in owner User dir.
_user_settings = db.BlobProperty()
user_settings_exist = db.BooleanProperty(default=False)
# request logs to be uploaded, and notify email addresses saved here.
# the property should contain a comma delimited list of email addresses.
upload_logs_and_notify = db.StringProperty()
# The number of preflight connections since the last successful postflight
# connection. Resets to 0 when a postflight connection is posted.
preflight_count_since_postflight = db.IntegerProperty(default=0)
def _GetUserSettings(self):
"""Returns the user setting dictionary, or None."""
if self._user_settings:
return util.Deserialize(self._user_settings)
else:
return None
def _SetUserSettings(self, data):
"""Sets the user settings dictionary.
Args:
data: dictionary data to set to the user_settings, or None.
"""
if not data:
self.user_settings_exist = False
self._user_settings = None
else:
self._user_settings = util.Serialize(data)
self.user_settings_exist = True
user_settings = property(_GetUserSettings, _SetUserSettings)
@classmethod
def AllActive(cls, keys_only=False):
"""Returns a query for all Computer entities that are active."""
return cls.all(keys_only=keys_only).filter('active =', True)
@classmethod
def MarkInactive(cls):
"""Marks any inactive computers as such."""
count = 0
now = datetime.datetime.utcnow()
earliest_active_date = now - datetime.timedelta(days=COMPUTER_ACTIVE_DAYS)
query = cls.AllActive().filter('preflight_datetime <', earliest_active_date)
gc.collect()
while True:
computers = query.fetch(500)
if not computers:
break
for c in computers:
c.active = False # this isn't neccessary, but makes more obvious.
c.put()
count += 1
cursor = str(query.cursor())
del(computers)
del(query)
gc.collect()
query = cls.AllActive().filter(
'preflight_datetime <', earliest_active_date)
query.with_cursor(cursor)
return count
def put(self, update_active=True):
"""Forcefully set active according to preflight_datetime."""
if update_active:
now = datetime.datetime.utcnow()
earliest_active_date = now - datetime.timedelta(days=COMPUTER_ACTIVE_DAYS)
if self.preflight_datetime:
if self.preflight_datetime > earliest_active_date:
self.active = True
else:
self.active = False
super(Computer, self).put()
class ComputerClientBroken(db.Model):
"""Model to store broken client reports."""
uuid = db.StringProperty()
hostname = db.StringProperty()
owner = db.StringProperty()
reason = db.StringProperty()
details = db.TextProperty()
first_broken_datetime = db.DateTimeProperty(auto_now_add=True)
broken_datetimes = db.ListProperty(datetime.datetime)
fixed = db.BooleanProperty(default=False)
serial = db.StringProperty()
ticket_number = db.StringProperty()
class ComputerMSULog(db.Model):
"""Store MSU logs as state information.
key = uuid_source_event
"""
uuid = db.StringProperty() # computer uuid
source = db.StringProperty() # "MSU", "user", ...
event = db.StringProperty() # "launched", "quit", ...
user = db.StringProperty() # user who MSU ran as -- may not be owner!
desc = db.StringProperty() # additional descriptive text
mtime = db.DateTimeProperty() # time of log
class ClientLogFile(db.Model):
"""Store client log files, like ManagedSoftwareUpdate.log.
key = uuid + mtime
"""
uuid = db.StringProperty() # computer uuid
name = db.StringProperty() # log name
mtime = db.DateTimeProperty(auto_now_add=True)
log_file = properties.CompressedUtf8BlobProperty()
class Log(db.Model):
"""Base Log class to be extended for Simian logging."""
# UTC datetime when the event occured.
mtime = db.DateTimeProperty()
def put(self):
"""If a log mtime was not set, automatically set it to now in UTC.
Note: auto_now_add=True is not ideal as then clients can't report logs that
were written in the past.
"""
if not self.mtime:
self.mtime = datetime.datetime.utcnow()
super(Log, self).put()
class ClientLogBase(Log):
"""ClientLog model for all client interaction."""
# denormalized OSX or Puppet UUID; undecided.
uuid = db.StringProperty()
computer = db.ReferenceProperty(Computer)
class ClientLog(ClientLogBase):
"""Model for generic client interaction (preflight exit, etc)."""
action = db.StringProperty() # short description of action.
details = db.TextProperty() # extended description.
class PreflightExitLog(ClientLogBase):
"""Model for preflight exit logging."""
exit_reason = db.TextProperty() # extended description.
class InstallLog(ClientLogBase):
"""Model for all client installs."""
package = db.StringProperty() # Firefox, Munkitools, etc.
# TODO(user): change status to db.IntegerProperty(), convert all entities.
status = db.StringProperty() # return code; 0, 1, 2 etc.
on_corp = db.BooleanProperty() # True for install on corp, False otherwise.
applesus = db.BooleanProperty(default=False)
dl_kbytes_per_sec = db.IntegerProperty()
duration_seconds = db.IntegerProperty()
success = db.BooleanProperty()
server_datetime = db.DateTimeProperty(auto_now_add=True)
unattended = db.BooleanProperty()
def IsSuccess(self):
"""Returns True if the install was a success, False otherwise."""
# Most Adobe installers return 20 success. Yuck!
return self.status in ['0', '20']
def put(self):
"""Perform datastore put operation, forcefully setting success boolean."""
self.success = self.IsSuccess()
return super(InstallLog, self).put()
class AdminLogBase(Log):
"""AdminLogBase model for all admin interaction."""
user = db.StringProperty() # i.e. fooadminuser.
class AdminPackageLog(AdminLogBase, BasePlistModel):
"""AdminPackageLog model for all admin pkg interaction."""
original_plist = db.TextProperty()
action = db.StringProperty() # i.e. upload, delete, etc.
filename = db.StringProperty()
catalogs = db.StringListProperty()
manifests = db.StringListProperty()
install_types = db.StringListProperty()
def _GetPlistDiff(self):
"""Returns a generator of diff lines between original and new plist."""
new_plist = self.plist.GetXml().splitlines()
if not self.original_plist:
return [{'type': 'diff_add', 'line': line} for line in new_plist]
original_plist = self.original_plist.splitlines()
new_plist = self.plist.GetXml().splitlines()
diff = difflib.Differ().compare(original_plist, new_plist)
lines = []
if diff:
re_add = re.compile("^\s*\+")
re_sub = re.compile("^\s*\-")
for line in diff:
if re_add.match(line):
linetype = 'diff_add'
elif re_sub.match(line):
linetype = 'diff_sub'
else:
linetype = 'diff_none'
lines.append({'type': linetype, 'line': line})
omitting = False
for i, line in enumerate(lines):
if i > 1 and i < len(lines)-2:
# A line is "omittable" if it's at least 2 lines away from the start,
# end or an edited line.
is_omit = all([l['type'] == 'diff_none' for l in lines[i-2:i+3]])
if is_omit and not omitting:
line['start_omitting'] = True
omitting = True
if omitting:
not_omit = any([l['type'] != 'diff_none' for l in lines[i:i+3]])
if i > len(lines)-3 or not_omit:
line['end_omitting'] = True
omitting = False
return lines
plist_diff = property(_GetPlistDiff)
class AdminPackageProposalLog(AdminPackageLog):
"""AdminPackageLog model for all admin pkg interaction."""
approver = db.StringProperty()
class AdminAppleSUSProductLog(AdminLogBase):
"""Model to log all admin Apple SUS Product changes."""
product_id = db.StringProperty()
action = db.StringProperty()
tracks = db.StringListProperty()
@classmethod
def Log(cls, products, action):
"""Puts batches of product changes to AdminAppleSUSProductLog.
Args:
products: list of or single models.AppleSUSProduct entity.
action: str, description of the change taking place to the batch.
"""
# Support products being a single product entity.
if not isinstance(products, (list, tuple)):
products = (products,)
to_put = []
for p in products:
log = cls(product_id=p.product_id, action=action, tracks=p.tracks)
log.mtime = datetime.datetime.utcnow()
to_put.append(log)
# Put all log entities together.
gae_util.BatchDatastoreOp(db.put, to_put)
class KeyValueCache(BaseModel):
"""Model for a generic key value pair storage."""
text_value = db.TextProperty()
blob_value = db.BlobProperty()
mtime = db.DateTimeProperty(auto_now=True)
@classmethod
def IpInList(cls, key_name, ip):
"""Check whether IP is in serialized IP/mask list in key_name.
The KeyValueCache entity at key_name is expected to have a text_value
which is in util.Serialize() form. The deserialized structure looks like
[ "200.0.0.0/24",
"10.0.0.0/8",
etc ...
]
Note that this function is not IPv6 safe and will always return False
if the input ip is IPv6 format.
Args:
key_name: str, like 'auth_bad_ip_blocks'
ip: str, like '127.0.0.1'
Returns:
True if the ip is inside a mask in the list, False if not
"""
if not ip:
return False # lenient response
# TODO(user): Once the underlying util.Ip* methods support ipv6
# this method can go away. Until then, this stops all of the churn
# and exits early.
if ip.find(':') > -1: # ipv6
return False
try:
ip_blocks_str = cls.MemcacheWrappedGet(key_name, 'text_value')
if not ip_blocks_str:
return False
ip_blocks = util.Deserialize(ip_blocks_str)
except (util.DeserializeError, db.Error):
logging.exception('IpInList(%s)', ip)
return False # lenient response
# Note: The method below, parsing a serialized list of networks
# expressed as strings, might seem silly. But the total time to
# deserialize and translate the strings back into IP network/mask
# integers is actually faster than storing them already split, e.g. a
# list of 2 item lists (network,mask). Apparently JSON isn't as
# efficient at parsing ints or nested lists.
#
# (pickle is 2X+ faster but not secure & deprecated inside util module)
ip_int = ipcalc.IpToInt(ip)
for ip_mask_str in ip_blocks:
ip_mask = ipcalc.IpMaskToInts(ip_mask_str)
if (ip_int & ip_mask[1]) == ip_mask[0]:
return True
return False
@classmethod
def GetSerializedItem(cls, key):
"""Returns the deserialized value of a serialized cache."""
entity = cls.MemcacheWrappedGet(key)
if entity and entity.blob_value:
return util.Deserialize(entity.blob_value), entity.mtime
else:
return {}, None
@classmethod
def SetSerializedItem(cls, key, value):
"""Serializes a value and caches it to an entity with a given key.
Args:
key: str, key_name for the ReportsCache entity.
value: any, a value of any kind to serialize and cache.
"""
value = util.Serialize(value)
cls.MemcacheWrappedSet(key, 'blob_value', value)
@classmethod
def GetItem(cls, name):
entity = cls.MemcacheWrappedGet(name)
if entity:
return entity.text_value, entity.mtime
else:
return None, None
@classmethod
def SetItem(cls, name, value):
return cls.MemcacheWrappedSet(name, 'text_value', value)
class ReportsCache(KeyValueCache):
"""Model for various reports data caching."""
_SUMMARY_KEY = 'summary'
_INSTALL_COUNTS_KEY = 'install_counts'
_TRENDING_INSTALLS_KEY = 'trending_installs_%d_hours'
_PENDING_COUNTS_KEY = 'pending_counts'
_MSU_USER_SUMMARY_KEY = 'msu_user_summary'
int_value = db.IntegerProperty()
# TODO(user): migrate reports cache to properties.SerializedProperty()
@classmethod
def GetStatsSummary(cls):
"""Returns tuples (stats summary dictionary, datetime) from Datastore."""
return cls.GetSerializedItem(cls._SUMMARY_KEY)
@classmethod
def SetStatsSummary(cls, d):
"""Sets a the stats summary dictionary to Datastore.
Args:
d: dict of summary data.
"""
return cls.SetSerializedItem(cls._SUMMARY_KEY, d)
@classmethod
def GetInstallCounts(cls):
"""Returns tuple (install counts dict, datetime) from Datastore."""
return cls.GetSerializedItem(cls._INSTALL_COUNTS_KEY)
@classmethod
def SetInstallCounts(cls, d):
"""Sets a the install counts dictionary to Datastore.
Args:
d: dict of summary data.
"""
return cls.SetSerializedItem(cls._INSTALL_COUNTS_KEY, d)
@classmethod
def GetTrendingInstalls(cls, since_hours):
key = cls._TRENDING_INSTALLS_KEY % since_hours
return cls.GetSerializedItem(key)
@classmethod
def SetTrendingInstalls(cls, since_hours, d):
key = cls._TRENDING_INSTALLS_KEY % since_hours
return cls.SetSerializedItem(key, d)
@classmethod
def GetPendingCounts(cls):
"""Returns tuple (pending counts dict, datetime) from Datastore."""
return cls.GetSerializedItem(cls._PENDING_COUNTS_KEY)
@classmethod
def SetPendingCounts(cls, d):
"""Sets a the pending counts dictionary to Datastore.
Args:
d: dict of summary data.
"""
return cls.SetSerializedItem(cls._PENDING_COUNTS_KEY, d)
@classmethod
def _GetMsuUserSummaryKey(cls, since, tmp):
if since is not None:
since = '_since_%s_' % since
else:
since = ''
return '%s%s%s' % (cls._MSU_USER_SUMMARY_KEY, since, tmp * '_tmp')
@classmethod
def SetMsuUserSummary(cls, d, since=None, tmp=False):
"""Sets the msu user summary dictionary to Datstore.
Args:
d: dict of summary data.
since: str, since when
tmp: bool, default False, retrieve tmp summary (in process of
calculation)
"""
key = cls._GetMsuUserSummaryKey(since, tmp)
return cls.SetSerializedItem(key, d)
@classmethod
def GetMsuUserSummary(cls, since, tmp=False):
"""Gets the MSU user summary dictionary from Datastore.
Args:
since: str, summary since date
Returns:
(dict of summary data, datetime mtime) or None if no summary
"""
key = cls._GetMsuUserSummaryKey(since, tmp)
return cls.GetSerializedItem(key)
@classmethod
def DeleteMsuUserSummary(cls, since, tmp=False):
"""Deletes the MSU user summary entity from Datastore.
Args:
since: str, summary since date
"""
key = cls._GetMsuUserSummaryKey(since, tmp)
entity = cls.get_by_key_name(key)
if not entity:
return
entity.delete()
# Munki ########################################################################
class AuthSession(db.Model):
"""Auth sessions.
key = session_id
"""
data = db.StringProperty()
mtime = db.DateTimeProperty()
state = db.StringProperty()
uuid = db.StringProperty()
level = db.IntegerProperty(default=0)
class BaseCompressedMunkiModel(BaseModel):
"""Base class for Munki related models."""
name = db.StringProperty()
mtime = db.DateTimeProperty(auto_now=True)
# catalog/manifest/pkginfo plist file.
plist = properties.CompressedUtf8BlobProperty()
class AppleSUSCatalog(BaseCompressedMunkiModel):
"""Apple Software Update Service Catalog."""
last_modified_header = db.StringProperty()
class AppleSUSProduct(BaseModel):
"""Apple Software Update Service products."""
product_id = db.StringProperty()
name = db.StringProperty()
version = db.StringProperty()
description = db.TextProperty()
restart_required = db.BooleanProperty()
force_install_after_date = db.DateTimeProperty()
apple_mtime = db.DateTimeProperty()
tracks = db.StringListProperty()
mtime = db.DateTimeProperty(auto_now=True)
# If manual_override, then auto-promotion will not occur.
manual_override = db.BooleanProperty(default=False)
# If unattended, then unattended installation will proceed.
unattended = db.BooleanProperty(default=False)
# If deprecated, then the product is entirely hidden and unused.
deprecated = db.BooleanProperty(default=False)
# Package download URLs.
package_urls = db.StringListProperty()
@classmethod
def AllActive(cls, keys_only=False):
"""Returns a query for all Computer entities that are active."""
return cls.all(keys_only=keys_only).filter('deprecated =', False)
def _GetPkginfoPlist(self):
"""Returns a pkginfo plist for an Apple Update Product."""
d = {
'installer_type': 'apple_update_metadata',
'name': self.product_id,
}
if self.unattended:
d['unattended_install'] = self.unattended
if self.force_install_after_date:
d['force_install_after_date'] = self.force_install_after_date
d['version'] = '1.0' # TODO(user): find out if this is needed.
plist = plist_lib.ApplePlist()
plist.SetContents(d)
return plist
plist = property(_GetPkginfoPlist)
def _GetForceInstallAfterDateStr(self):
"""Returns the force_install_after_date property in Munki catalog format."""
if self.force_install_after_date:
return self.force_install_after_date.strftime('%Y-%m-%dT%H:%M:%SZ')
def _SetForceInstallAfterDateStr(self, str_dt):
"""Sets the force_install_after_date property from a string."""
try:
dt = datetime.datetime.strptime(str_dt, '%Y-%m-%d %H:%M')
except ValueError:
try:
dt = datetime.datetime.strptime('%s 13:00' % (str_dt), '%Y-%m-%d %H:%M')
except ValueError:
raise
self.force_install_after_date = dt
force_install_after_date_str = property(
_GetForceInstallAfterDateStr, _SetForceInstallAfterDateStr)
def _GetMunkiName(self):
"""Returns a PackageName-Version formatted name of the product."""
return '%s-%s' % (self.name, self.version)
munki_name = property(_GetMunkiName)
class Tag(BaseModel):
"""A generic string tag that references a list of db.Key objects."""
ALL_TAGS_MEMCACHE_KEY = 'all_tags'
user = db.UserProperty(auto_current_user=True)
mrtime = db.DateTimeProperty(auto_now=True)
keys = db.ListProperty(db.Key)
def put(self, *args, **kwargs):
"""Ensure tags memcache entries are purged when a new one is created."""
memcache.delete(self.ALL_TAGS_MEMCACHE_KEY)
return super(Tag, self).put(*args, **kwargs)
def delete(self, *args, **kwargs):
"""Ensure tags memcache entries are purged when one is delete."""
# TODO(user): extend BaseModel so such memcache cleanup is reusable.
memcache.delete(self.ALL_TAGS_MEMCACHE_KEY)
return super(Tag, self).delete(*args, **kwargs)
@classmethod
def GetAllTagNames(cls):
"""Returns a list of all tag names."""
tags = memcache.get(cls.ALL_TAGS_MEMCACHE_KEY)
if not tags:
tags = [key.name() for key in cls.all(keys_only=True)]
tags = sorted(tags, key=unicode.lower)
memcache.set(cls.ALL_TAGS_MEMCACHE_KEY, tags)
return tags
@classmethod
def GetAllTagNamesForKey(cls, key):
"""Returns a list of all tag names for a given db.Key."""
return [k.name() for k in
cls.all(keys_only=True).filter('keys =', key)]
@classmethod
def GetAllTagNamesForEntity(cls, entity):
"""Returns a list of all tag names."""
return cls.GetAllTagNamesForKey(entity.key())
class BaseManifestModification(BaseModel):
"""Manifest modifications for dynamic manifest generation."""
enabled = db.BooleanProperty(default=True)
install_types = db.StringListProperty() # ['managed_installs']
# Value to be added or removed from the install_type above.
value = db.StringProperty() # fooinstallname. -fooinstallname to remove it.
manifests = db.StringListProperty() # ['unstable', 'testing']
# Automatic properties to record who made the mod and when.
mtime = db.DateTimeProperty(auto_now_add=True)
user = db.UserProperty()
def Serialize(self):
"""Returns a serialized string representation of the entity instance."""
d = {}
for p in self.properties():
d[p] = getattr(self, p)
if p in ['mtime', 'user']:
d[p] = str(d[p])
return util.Serialize(d)
def _GetTarget(self):
"""Returns the modification target property, defined by subclasses."""
if not hasattr(self, 'TARGET_PROPERTY_NAME'):
raise NotImplementedError
return getattr(self, self.TARGET_PROPERTY_NAME)
def _SetTarget(self, target):
"""Sets the modification target property, defined by subclasses."""
if not hasattr(self, 'TARGET_PROPERTY_NAME'):
raise NotImplementedError
setattr(self, self.TARGET_PROPERTY_NAME, target)
target = property(_GetTarget, _SetTarget)
@classmethod
def GenerateInstance(cls, mod_type, target,
munki_pkg_name, remove=False, **kwargs):
"""Returns a model instance for the passed mod_type.
Args:
mod_type: str, modification type like 'site', 'owner', etc.
target: str, modification target value, like 'foouser', or 'foouuid'.
munki_pkg_name: str, name of the munki package to inject, like Firefox.
remove: if True, will remove package from manifest instead of adding it.
kwargs: any other properties to set on the model instance.
Returns:
A model instance with key_name, value and the model-specific mod key value
properties already set.
Raises:
ValueError: if a manifest mod_type is unknown
"""
key_name = '%s##%s' % (target, munki_pkg_name)
model = MANIFEST_MOD_MODELS.get(mod_type, None)
if not model:
raise ValueError
m = model(key_name=key_name)
m.target = target
m.value = munki_pkg_name
if remove:
m.value = '-' + m.value
for kw in kwargs:
setattr(m, kw, kwargs[kw])
return m
@classmethod
def ResetModMemcache(cls, mod_type, target):
"""Clear the memcache associated with this modification type.
Args:
mod_type: str, modification type like 'site', 'owner', etc.
target: str, modification target value, like 'foouser', or 'foouuid'.
Raises:
ValueError: if a manifest mod_type is unknown
"""
model = MANIFEST_MOD_MODELS.get(mod_type, None)
if not model:
raise ValueError
model.DeleteMemcacheWrappedGetAllFilter((('%s =' % mod_type, target),))
class SiteManifestModification(BaseManifestModification):
"""Manifest modifications for dynamic manifest generation by site."""
TARGET_PROPERTY_NAME = 'site'
site = db.StringProperty() # NYC, MTV, etc.
class OSVersionManifestModification(BaseManifestModification):
"""Manifest modifications for dynamic manifest generation by OS version."""
TARGET_PROPERTY_NAME = 'os_version'
os_version = db.StringProperty() # 10.6.5
class OwnerManifestModification(BaseManifestModification):
"""Manifest modifications for dynamic manifest generation by owner."""
TARGET_PROPERTY_NAME = 'owner'
owner = db.StringProperty() # foouser
class UuidManifestModification(BaseManifestModification):
"""Manifest modifications for dynamic manifest generation by computer."""
TARGET_PROPERTY_NAME = 'uuid'
uuid = db.StringProperty() # Computer.uuid format
class TagManifestModification(BaseManifestModification):
"""Manifest modifications for dynamic manifest generation by a tag."""
TARGET_PROPERTY_NAME = 'tag_key_name'
tag_key_name = db.StringProperty() # Tag Model key_name.
MANIFEST_MOD_MODELS = {
'owner': OwnerManifestModification,
'uuid': UuidManifestModification,
'site': SiteManifestModification,
'os_version': OSVersionManifestModification,
'tag': TagManifestModification,
}
class PackageAlias(BaseModel):
"""Maps an alias to a Munki package name.
Note: PackageAlias key_name should be the alias name.
"""
munki_pkg_name = db.StringProperty()
enabled = db.BooleanProperty(default=True)
@classmethod
def ResolvePackageName(cls, pkg_alias):
"""Returns a package name for a given alias, or None if alias was not found.
Args:
pkg_alias: str package alias.
Returns:
str package name, or None if the pkg_alias was not found.
"""
entity = cls.MemcacheWrappedGet(pkg_alias)
if not entity:
# TODO(user): email Simian admins ??
logging.error('Unknown pkg_alias requested: %s', pkg_alias)
elif entity.enabled and entity.munki_pkg_name:
return entity.munki_pkg_name
return None
class FirstClientConnection(BaseModel):
"""Model to keep track of new clients and whether they've been emailed."""
mtime = db.DateTimeProperty(auto_now_add=True)
computer = db.ReferenceProperty(Computer)
owner = db.StringProperty()
hostname = db.StringProperty()
emailed = db.DateTimeProperty()
office = db.StringProperty()
site = db.StringProperty()
| |
"""Generate a schema wrapper from a schema"""
import argparse
import copy
import os
import sys
import json
import re
from os.path import abspath, join, dirname
import textwrap
from urllib import request
import m2r
# import schemapi from here
sys.path.insert(0, abspath(dirname(__file__)))
from schemapi import codegen # noqa: E402
from schemapi.codegen import CodeSnippet # noqa: E402
from schemapi.utils import ( # noqa: E402
get_valid_identifier,
SchemaInfo,
indent_arglist,
resolve_references,
)
import generate_api_docs # noqa: E402
# Map of version name to github branch name.
SCHEMA_VERSION = {
"vega": {"v5": "v5.10.0"},
"vega-lite": {"v3": "v3.4.0", "v4": "v4.17.0"}
# "vega-lite": {"v3": "v3.4.0", "v4": "v4.8.1"},
}
reLink = re.compile(r"(?<=\[)([^\]]+)(?=\]\([^\)]+\))", re.M)
reSpecial = re.compile(r"[*_]{2,3}|`", re.M)
class SchemaGenerator(codegen.SchemaGenerator):
schema_class_template = textwrap.dedent(
'''
class {classname}({basename}):
"""{docstring}"""
_schema = {schema!r}
{init_code}
'''
)
def _process_description(self, description):
description = "".join(
[
reSpecial.sub("", d) if i % 2 else d
for i, d in enumerate(reLink.split(description))
]
) # remove formatting from links
description = m2r.convert(description)
description = description.replace(m2r.prolog, "")
description = description.replace(":raw-html-m2r:", ":raw-html:")
description = description.replace(r"\ ,", ",")
description = description.replace(r"\ ", " ")
# turn explicit references into anonymous references
description = description.replace(">`_", ">`__")
description += "\n"
return description.strip()
def schema_class(*args, **kwargs):
return SchemaGenerator(*args, **kwargs).schema_class()
SCHEMA_URL_TEMPLATE = "https://vega.github.io/schema/" "{library}/{version}.json"
BASE_SCHEMA = """
class {basename}(SchemaBase):
_rootschema = load_schema()
@classmethod
def _default_wrapper_classes(cls):
return _subclasses({basename})
"""
LOAD_SCHEMA = '''
import pkgutil
import json
def load_schema():
"""Load the json schema associated with this module's functions"""
return json.loads(pkgutil.get_data(__name__, '{schemafile}').decode('utf-8'))
'''
CHANNEL_MIXINS = """
class FieldChannelMixin(object):
def to_dict(self, validate=True, ignore=(), context=None):
context = context or {}
shorthand = self._get('shorthand')
field = self._get('field')
if shorthand is not Undefined and field is not Undefined:
raise ValueError("{} specifies both shorthand={} and field={}. "
"".format(self.__class__.__name__, shorthand, field))
if isinstance(shorthand, (tuple, list)):
# If given a list of shorthands, then transform it to a list of classes
kwds = self._kwds.copy()
kwds.pop('shorthand')
return [self.__class__(sh, **kwds).to_dict(validate=validate, ignore=ignore, context=context)
for sh in shorthand]
if shorthand is Undefined:
parsed = {}
elif isinstance(shorthand, str):
parsed = parse_shorthand(shorthand, data=context.get('data', None))
type_required = 'type' in self._kwds
type_in_shorthand = 'type' in parsed
type_defined_explicitly = self._get('type') is not Undefined
if not type_required:
# Secondary field names don't require a type argument in VegaLite 3+.
# We still parse it out of the shorthand, but drop it here.
parsed.pop('type', None)
elif not (type_in_shorthand or type_defined_explicitly):
if isinstance(context.get('data', None), pd.DataFrame):
raise ValueError("{} encoding field is specified without a type; "
"the type cannot be inferred because it does not "
"match any column in the data.".format(shorthand))
else:
raise ValueError("{} encoding field is specified without a type; "
"the type cannot be automatically inferred because "
"the data is not specified as a pandas.DataFrame."
"".format(shorthand))
else:
# Shorthand is not a string; we pass the definition to field,
# and do not do any parsing.
parsed = {'field': shorthand}
# Set shorthand to Undefined, because it's not part of the base schema.
self.shorthand = Undefined
self._kwds.update({k: v for k, v in parsed.items()
if self._get(k) is Undefined})
return super(FieldChannelMixin, self).to_dict(
validate=validate,
ignore=ignore,
context=context
)
class ValueChannelMixin(object):
def to_dict(self, validate=True, ignore=(), context=None):
context = context or {}
condition = getattr(self, 'condition', Undefined)
copy = self # don't copy unless we need to
if condition is not Undefined:
if isinstance(condition, core.SchemaBase):
pass
elif 'field' in condition and 'type' not in condition:
kwds = parse_shorthand(condition['field'], context.get('data', None))
copy = self.copy(deep=['condition'])
copy.condition.update(kwds)
return super(ValueChannelMixin, copy).to_dict(validate=validate,
ignore=ignore,
context=context)
class DatumChannelMixin(object):
def to_dict(self, validate=True, ignore=(), context=None):
context = context or {}
datum = getattr(self, 'datum', Undefined)
copy = self # don't copy unless we need to
if datum is not Undefined:
if isinstance(datum, core.SchemaBase):
pass
return super(DatumChannelMixin, copy).to_dict(validate=validate,
ignore=ignore,
context=context)
"""
class FieldSchemaGenerator(SchemaGenerator):
schema_class_template = textwrap.dedent(
'''
class {classname}(FieldChannelMixin, core.{basename}):
"""{docstring}"""
_class_is_valid_at_instantiation = False
_encoding_name = "{encodingname}"
{init_code}
'''
)
class ValueSchemaGenerator(SchemaGenerator):
schema_class_template = textwrap.dedent(
'''
class {classname}(ValueChannelMixin, core.{basename}):
"""{docstring}"""
_class_is_valid_at_instantiation = False
_encoding_name = "{encodingname}"
{init_code}
'''
)
class DatumSchemaGenerator(SchemaGenerator):
schema_class_template = textwrap.dedent(
'''
class {classname}(DatumChannelMixin, core.{basename}):
"""{docstring}"""
_class_is_valid_at_instantiation = False
_encoding_name = "{encodingname}"
{init_code}
'''
)
HEADER = """\
# The contents of this file are automatically written by
# tools/generate_schema_wrapper.py. Do not modify directly.
"""
def schema_url(library, version):
version = SCHEMA_VERSION[library][version]
return SCHEMA_URL_TEMPLATE.format(library=library, version=version)
def download_schemafile(library, version, schemapath, skip_download=False):
url = schema_url(library, version)
if not os.path.exists(schemapath):
os.makedirs(schemapath)
filename = os.path.join(schemapath, "{library}-schema.json".format(library=library))
if not skip_download:
request.urlretrieve(url, filename)
elif not os.path.exists(filename):
raise ValueError("Cannot skip download: {} does not exist".format(filename))
return filename
def copy_schemapi_util():
"""
Copy the schemapi utility and its test file into altair/utils/
"""
# copy the schemapi utility file
source_path = abspath(join(dirname(__file__), "schemapi", "schemapi.py"))
destination_path = abspath(
join(dirname(__file__), "..", "altair", "utils", "schemapi.py")
)
print("Copying\n {}\n -> {}".format(source_path, destination_path))
with open(source_path, "r", encoding="utf8") as source:
with open(destination_path, "w", encoding="utf8") as dest:
dest.write(HEADER)
dest.writelines(source.readlines())
# Copy the schemapi test file
source_path = abspath(
join(dirname(__file__), "schemapi", "tests", "test_schemapi.py")
)
destination_path = abspath(
join(dirname(__file__), "..", "altair", "utils", "tests", "test_schemapi.py")
)
print("Copying\n {}\n -> {}".format(source_path, destination_path))
with open(source_path, "r", encoding="utf8") as source:
with open(destination_path, "w", encoding="utf8") as dest:
dest.write(HEADER)
dest.writelines(source.readlines())
def recursive_dict_update(schema, root, def_dict):
if "$ref" in schema:
next_schema = resolve_references(schema, root)
if "properties" in next_schema:
definition = schema["$ref"]
properties = next_schema["properties"]
for k in def_dict.keys():
if k in properties:
def_dict[k] = definition
else:
recursive_dict_update(next_schema, root, def_dict)
elif "anyOf" in schema:
for sub_schema in schema["anyOf"]:
recursive_dict_update(sub_schema, root, def_dict)
def get_field_datum_value_defs(propschema, root):
def_dict = {k: None for k in ("field", "datum", "value")}
schema = propschema.schema
if propschema.is_reference() and "properties" in schema:
if "field" in schema["properties"]:
def_dict["field"] = propschema.ref
else:
raise ValueError("Unexpected schema structure")
else:
recursive_dict_update(schema, root, def_dict)
return {i: j for i, j in def_dict.items() if j}
def toposort(graph):
"""Topological sort of a directed acyclic graph.
Parameters
----------
graph : dict of lists
Mapping of node labels to list of child node labels.
This is assumed to represent a graph with no cycles.
Returns
-------
order : list
topological order of input graph.
"""
stack = []
visited = {}
def visit(nodes):
for node in sorted(nodes, reverse=True):
if not visited.get(node):
visited[node] = True
visit(graph.get(node, []))
stack.insert(0, node)
visit(graph)
return stack
def generate_vegalite_schema_wrapper(schema_file):
"""Generate a schema wrapper at the given path."""
# TODO: generate simple tests for each wrapper
basename = "VegaLiteSchema"
with open(schema_file, encoding="utf8") as f:
rootschema = json.load(f)
definitions = {}
for name in rootschema["definitions"]:
defschema = {"$ref": "#/definitions/" + name}
defschema_repr = {"$ref": "#/definitions/" + name}
name = get_valid_identifier(name)
definitions[name] = SchemaGenerator(
name,
schema=defschema,
schemarepr=defschema_repr,
rootschema=rootschema,
basename=basename,
rootschemarepr=CodeSnippet("{}._rootschema".format(basename)),
)
graph = {}
for name, schema in definitions.items():
graph[name] = []
for child in schema.subclasses():
child = get_valid_identifier(child)
graph[name].append(child)
child = definitions[child]
if child.basename == basename:
child.basename = [name]
else:
child.basename.append(name)
contents = [
HEADER,
"from altair.utils.schemapi import SchemaBase, Undefined, _subclasses",
LOAD_SCHEMA.format(schemafile="vega-lite-schema.json"),
]
contents.append(BASE_SCHEMA.format(basename=basename))
contents.append(
schema_class(
"Root",
schema=rootschema,
basename=basename,
schemarepr=CodeSnippet("{}._rootschema".format(basename)),
)
)
for name in toposort(graph):
contents.append(definitions[name].schema_class())
contents.append("") # end with newline
return "\n".join(contents)
def generate_vega_schema_wrapper(schema_file):
"""Generate a schema wrapper at the given path."""
# TODO: generate simple tests for each wrapper
basename = "VegaSchema"
with open(schema_file, encoding="utf8") as f:
rootschema = json.load(f)
contents = [
HEADER,
"from altair.utils.schemapi import SchemaBase, Undefined, _subclasses",
LOAD_SCHEMA.format(schemafile="vega-schema.json"),
]
contents.append(BASE_SCHEMA.format(basename=basename))
contents.append(
schema_class(
"Root",
schema=rootschema,
basename=basename,
schemarepr=CodeSnippet("{}._rootschema".format(basename)),
)
)
for deflist in ["defs", "refs"]:
for name in rootschema[deflist]:
defschema = {"$ref": "#/{}/{}".format(deflist, name)}
defschema_repr = {"$ref": "#/{}/{}".format(deflist, name)}
contents.append(
schema_class(
get_valid_identifier(name),
schema=defschema,
schemarepr=defschema_repr,
rootschema=rootschema,
basename=basename,
rootschemarepr=CodeSnippet("Root._schema"),
)
)
contents.append("") # end with newline
return "\n".join(contents)
def generate_vegalite_channel_wrappers(schemafile, version, imports=None):
# TODO: generate __all__ for top of file
with open(schemafile, encoding="utf8") as f:
schema = json.load(f)
if imports is None:
imports = [
"from . import core",
"import pandas as pd",
"from altair.utils.schemapi import Undefined",
"from altair.utils import parse_shorthand",
]
contents = [HEADER]
contents.extend(imports)
contents.append("")
contents.append(CHANNEL_MIXINS)
if version == "v2":
encoding_def = "EncodingWithFacet"
else:
encoding_def = "FacetedEncoding"
encoding = SchemaInfo(schema["definitions"][encoding_def], rootschema=schema)
for prop, propschema in encoding.properties.items():
def_dict = get_field_datum_value_defs(propschema, schema)
for encoding_spec, definition in def_dict.items():
classname = prop[0].upper() + prop[1:]
basename = definition.split("/")[-1]
basename = get_valid_identifier(basename)
defschema = {"$ref": definition}
if encoding_spec == "field":
Generator = FieldSchemaGenerator
nodefault = []
defschema = copy.deepcopy(resolve_references(defschema, schema))
# For Encoding field definitions, we patch the schema by adding the
# shorthand property.
defschema["properties"]["shorthand"] = {
"type": "string",
"description": "shorthand for field, aggregate, and type",
}
defschema["required"] = ["shorthand"]
elif encoding_spec == "datum":
Generator = DatumSchemaGenerator
classname += "Datum"
nodefault = ["datum"]
elif encoding_spec == "value":
Generator = ValueSchemaGenerator
classname += "Value"
nodefault = ["value"]
gen = Generator(
classname=classname,
basename=basename,
schema=defschema,
rootschema=schema,
encodingname=prop,
nodefault=nodefault,
)
contents.append(gen.schema_class())
return "\n".join(contents)
MARK_METHOD = '''
def mark_{mark}({def_arglist}):
"""Set the chart's mark to '{mark}'
For information on additional arguments, see :class:`{mark_def}`
"""
kwds = dict({dict_arglist})
copy = self.copy(deep=False)
if any(val is not Undefined for val in kwds.values()):
copy.mark = core.{mark_def}(type="{mark}", **kwds)
else:
copy.mark = "{mark}"
return copy
'''
def generate_vegalite_mark_mixin(schemafile, markdefs):
with open(schemafile, encoding="utf8") as f:
schema = json.load(f)
imports = ["from altair.utils.schemapi import Undefined", "from . import core"]
code = [
"class MarkMethodMixin(object):",
' """A mixin class that defines mark methods"""',
]
for mark_enum, mark_def in markdefs.items():
if "enum" in schema["definitions"][mark_enum]:
marks = schema["definitions"][mark_enum]["enum"]
else:
marks = [schema["definitions"][mark_enum]["const"]]
info = SchemaInfo({"$ref": "#/definitions/" + mark_def}, rootschema=schema)
# adapted from SchemaInfo.init_code
nonkeyword, required, kwds, invalid_kwds, additional = codegen._get_args(info)
required -= {"type"}
kwds -= {"type"}
def_args = ["self"] + [
"{}=Undefined".format(p) for p in (sorted(required) + sorted(kwds))
]
dict_args = ["{0}={0}".format(p) for p in (sorted(required) + sorted(kwds))]
if additional or invalid_kwds:
def_args.append("**kwds")
dict_args.append("**kwds")
for mark in marks:
# TODO: only include args relevant to given type?
mark_method = MARK_METHOD.format(
mark=mark,
mark_def=mark_def,
def_arglist=indent_arglist(def_args, indent_level=10 + len(mark)),
dict_arglist=indent_arglist(dict_args, indent_level=16),
)
code.append("\n ".join(mark_method.splitlines()))
return imports, "\n".join(code)
CONFIG_METHOD = """
@use_signature(core.{classname})
def {method}(self, *args, **kwargs):
copy = self.copy(deep=False)
copy.config = core.{classname}(*args, **kwargs)
return copy
"""
CONFIG_PROP_METHOD = """
@use_signature(core.{classname})
def configure_{prop}(self, *args, **kwargs):
copy = self.copy(deep=['config'])
if copy.config is Undefined:
copy.config = core.Config()
copy.config["{prop}"] = core.{classname}(*args, **kwargs)
return copy
"""
def generate_vegalite_config_mixin(schemafile):
imports = ["from . import core", "from altair.utils import use_signature"]
code = [
"class ConfigMethodMixin(object):",
' """A mixin class that defines config methods"""',
]
with open(schemafile, encoding="utf8") as f:
schema = json.load(f)
info = SchemaInfo({"$ref": "#/definitions/Config"}, rootschema=schema)
# configure() method
method = CONFIG_METHOD.format(classname="Config", method="configure")
code.append("\n ".join(method.splitlines()))
# configure_prop() methods
for prop, prop_info in info.properties.items():
classname = prop_info.refname
if classname and classname.endswith("Config"):
method = CONFIG_PROP_METHOD.format(classname=classname, prop=prop)
code.append("\n ".join(method.splitlines()))
return imports, "\n".join(code)
def vegalite_main(skip_download=False):
library = "vega-lite"
for version in SCHEMA_VERSION[library]:
path = abspath(join(dirname(__file__), "..", "altair", "vegalite", version))
schemapath = os.path.join(path, "schema")
schemafile = download_schemafile(
library=library,
version=version,
schemapath=schemapath,
skip_download=skip_download,
)
# Generate __init__.py file
outfile = join(schemapath, "__init__.py")
print("Writing {}".format(outfile))
with open(outfile, "w", encoding="utf8") as f:
f.write("# flake8: noqa\n")
f.write("from .core import *\nfrom .channels import *\n")
f.write(
"SCHEMA_VERSION = {!r}\n" "".format(SCHEMA_VERSION[library][version])
)
f.write("SCHEMA_URL = {!r}\n" "".format(schema_url(library, version)))
# Generate the core schema wrappers
outfile = join(schemapath, "core.py")
print("Generating\n {}\n ->{}".format(schemafile, outfile))
file_contents = generate_vegalite_schema_wrapper(schemafile)
with open(outfile, "w", encoding="utf8") as f:
f.write(file_contents)
# Generate the channel wrappers
outfile = join(schemapath, "channels.py")
print("Generating\n {}\n ->{}".format(schemafile, outfile))
code = generate_vegalite_channel_wrappers(schemafile, version=version)
with open(outfile, "w", encoding="utf8") as f:
f.write(code)
# generate the mark mixin
if version == "v2":
markdefs = {"Mark": "MarkDef"}
else:
markdefs = {
k: k + "Def" for k in ["Mark", "BoxPlot", "ErrorBar", "ErrorBand"]
}
outfile = join(schemapath, "mixins.py")
print("Generating\n {}\n ->{}".format(schemafile, outfile))
mark_imports, mark_mixin = generate_vegalite_mark_mixin(schemafile, markdefs)
config_imports, config_mixin = generate_vegalite_config_mixin(schemafile)
imports = sorted(set(mark_imports + config_imports))
with open(outfile, "w", encoding="utf8") as f:
f.write(HEADER)
f.write("\n".join(imports))
f.write("\n\n\n")
f.write(mark_mixin)
f.write("\n\n\n")
f.write(config_mixin)
def vega_main(skip_download=False):
library = "vega"
for version in SCHEMA_VERSION[library]:
path = abspath(join(dirname(__file__), "..", "altair", "vega", version))
schemapath = os.path.join(path, "schema")
schemafile = download_schemafile(
library=library,
version=version,
schemapath=schemapath,
skip_download=skip_download,
)
# Generate __init__.py file
outfile = join(schemapath, "__init__.py")
print("Writing {}".format(outfile))
with open(outfile, "w", encoding="utf8") as f:
f.write("# flake8: noqa\n")
f.write("from .core import *\n\n")
f.write(
"SCHEMA_VERSION = {!r}\n" "".format(SCHEMA_VERSION[library][version])
)
f.write("SCHEMA_URL = {!r}\n" "".format(schema_url(library, version)))
# Generate the core schema wrappers
outfile = join(schemapath, "core.py")
print("Generating\n {}\n ->{}".format(schemafile, outfile))
file_contents = generate_vega_schema_wrapper(schemafile)
with open(outfile, "w", encoding="utf8") as f:
f.write(file_contents)
def main():
parser = argparse.ArgumentParser(
prog="generate_schema_wrapper.py", description="Generate the Altair package."
)
parser.add_argument(
"--skip-download", action="store_true", help="skip downloading schema files"
)
args = parser.parse_args()
copy_schemapi_util()
vegalite_main(args.skip_download)
vega_main(args.skip_download)
generate_api_docs.write_api_file()
if __name__ == "__main__":
main()
| |
#
# Copyright (c) 2010-2010 LinkedIn, Inc
# Portions Copyright (c) 2011 Yan Pujante
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
#
# Minimal Python version is 2.5
"""A REST wrapper to talk to GLUConsole."""
__author__ = ['Manish Dubey', 'Dan Sully', 'Yan Pujante']
__version__ = '@console.version@'
import logging
import re
import restkit
import sys
import time
import urllib
import urlparse
from xml.dom.minidom import parseString
try:
import progressbar
use_progressbar = True if sys.stdout.isatty() else False
except ImportError:
use_progressbar = False
logger = logging.getLogger('gluconsole.rest.Client') # pylint: disable=C0103
logger.setLevel(logging.INFO)
class Client:
"""A REST wrapper to talk to GLUConsole"""
def __init__(self, fabric='dev', url='http://localhost:8080',
username='glua', password='password', version='v1'):
self.uri_base = url
self.uri_path = 'console/rest/%s/%s' % (version, fabric)
self.auth = restkit.BasicAuth(username, password)
self._action_successful = None
# Toggle logging for restkit
if logging.getLogger('gluconsole.rest.Client').level == logging.DEBUG:
logging.getLogger('restkit').setLevel(logging.DEBUG)
use_progressbar = False
@property
def action_successful(self):
"""Return True if the latest executed action succeeded."""
return self._action_successful
# Private method:
# Actually perform the HTTP request and return the results
def _do_request(self, action, method, body=None, headers=None):
url = urlparse.urljoin(self.uri_base, self.uri_path + '/' + action)
logger.debug('URL via (%s): %s' % (method, url))
if headers is None:
headers = {}
headers['User-Agent'] = '@console.name@-@console.version@'
if method in ('GET', 'HEAD', 'DELETE', 'POST', 'PUT'):
response = restkit.request(url, method=method, body=body,
headers=headers, filters=[self.auth])
else:
raise restkit.errors.InvalidRequestMethod('Unsupported Method')
if response.status_int >= 400:
if response.status_int == 404:
raise restkit.errors.ResourceNotFound(response.body_string())
elif response.status_int == 401:
raise restkit.errors.Unauthorized('Unauthorized Request')
raise restkit.errors.RequestFailed(
response.body_string(), response.status_int, response)
# body_string() can only be called once, since it's a socket read, or
# will throw an AlreadyRead exception.
response.body = response.body_string()
return response
def generateSystemFilter(self, agent=None, # pylint: disable=C0103
instance=None, allTags=None, anyTag=None): # pylint: disable=C0103
"""Deprecated version of generate_system_filter()"""
logger.warn('DEPRECATED: Client.generateSystemFilter() is deprecated,'
' use Client.generate_system_filter() instead.')
self.generate_system_filter(
agent=agent, instance=instance, all_tags=allTags, any_tag=anyTag)
def generate_system_filter(self,
agent=None, instance=None, all_tags=None, any_tag=None):
"""Create a GLU systemFilter string
:param action: Action to perform: start, stop, bounce, deploy,
undeploy, redeploy
:param agent: agent filter.
:param instance: Instance filter.
:param all_tags: all tags filter.
:param any_tag: any tag filter.
"""
if agent:
return "agent='%s'" % agent
if instance:
return "key='%s'" % instance
if all_tags:
return "tags='%s'" % all_tags
if any_tag:
return "tags.hasAny('%s')" % any_tag
return None
def generateActionRequest(self, # pylint: disable=C0103
action, systemFilter, parallel=False): # pylint: disable=C0103
"""Deprecated version of generate_action_request()"""
logger.warn('DEPRECATED: Client.generateActionRequest() is deprecated,'
' use Client.generate_action_request() instead.')
self.generate_action_request(action, systemFilter, parallel=parallel)
def generate_action_request(self, action, system_filter, parallel=False):
"""Create a GLU action string.
This contains a system_filter, as well as an action, and order.
:param action: Action to perform: start, stop, bounce, deploy,
undeploy, redeploy
:param system_filter: Filter as created from generate_system_filter.
:param parallel: True to run stop in parallel, False (default) for
serial.
"""
filters = []
filters.append('planAction=' + action)
# An empty/None filter implies 'all'
if system_filter:
filters.append(
'systemFilter=' + restkit.util.url_quote(system_filter))
if parallel:
filters.append('order=parallel')
else:
filters.append('order=sequential')
return restkit.util.to_bytestring('&'.join(filters))
def _execute_plan(self, created_plan, dryrun):
"""Execute a created plan."""
self._action_successful = None
if created_plan.status_int == 204:
self._action_successful = True
return 'GLU Console message: %s' % (
created_plan.status.split(' ', 1)[-1])
url2uri_pat = r'https?://[-.:\w]+/(?:.*?/)?%s/' % self.uri_path
# unique identifier for the plan just created
plan_url = created_plan['location']
plan_url = re.sub(url2uri_pat, '', plan_url)
logger.debug('plan url = %s', plan_url)
# inspect execution plan here, if you need
exec_plan = self._do_request(plan_url, 'GET')
logger.debug('body = %s', exec_plan.body)
if dryrun:
self._action_successful = True
return exec_plan.body
# execute the plan
plan_url += '/execution'
logger.info('executing plan: %s', plan_url)
plan_status = self._do_request(plan_url, 'POST')
# check status of plan execution
status_url = plan_status['location']
status_url = re.sub(url2uri_pat, '', status_url)
logger.info('status url = %s', status_url)
# wait until plan is 100% executed.
completed = re.compile(r'^100')
if use_progressbar:
widgets = [' ', progressbar.Percentage(),
' ', progressbar.Bar(marker='*', left='[', right=']'),
' ', progressbar.ETA(), ' ']
progress = progressbar.ProgressBar(widgets=widgets, maxval=100)
progress.start()
while 1:
progress_status = self._do_request(status_url, 'HEAD')
complete_status = progress_status['x-glu-completion']
percent_complete = re.split(':', complete_status)
if not completed.match(complete_status):
if use_progressbar:
progress.update(int(percent_complete[0]))
else:
logger.info(
'InProgress: %s%% complete', percent_complete[0])
else:
if use_progressbar:
progress.finish()
else:
logger.info('Completed : %s', complete_status)
break
time.sleep(2)
self._action_successful = complete_status.startswith('100:COMPLETED')
if complete_status.endswith('FAILED'):
self.print_failed_jobs(status_url)
return complete_status
def print_failed_jobs(self, status_url):
"""
Prints failed jobs in current deployment, If exists.
:param status_url: current deployment job status url
"""
status = self._do_request(status_url, 'GET')
soup = parseString(status.body)
errors = soup.getElementsByTagName('exception')
message_re = re.compile(r'\[(.*)\].*action=(.*)')
for err in errors:
msg = err.attributes.get('message')
result = message_re.findall(msg.value)
if result:
logger.info('Failed "%s" app to %s' % result[0])
def executePlan(self, action, systemFilter, # pylint: disable=C0103
parallel=False, dryrun=False): # pylint: disable=C0103
"""Deprecated version of execute_plan()"""
logger.warn('DEPRECATED: Client.executePlan() is deprecated,'
' use Client.execute_plan() instead.')
self.execute_plan(
action, systemFilter, parallel=parallel, dryrun=dryrun)
def execute_plan(self, action, system_filter, parallel=False, dryrun=False):
"""Run the a plan command against the console.
:param action: Action to perform: start, stop, bounce, deploy,
undeploy, redeploy
:param system_filter: Filter as created from generate_system_filter.
:param parallel: True to run stop in parallel, False (default) for
serial.
:param dryrun: Create the plan, but don't execute it.
"""
self._action_successful = None
if action not in (
'start', 'stop', 'bounce', 'deploy', 'undeploy', 'redeploy'):
raise StandardError('Action %s is invalid.' % action)
plan_filter = self.generate_action_request(
action, system_filter, parallel)
headers = {
'Content-Type': 'application/x-www-form-urlencoded; charset=utf-8'
}
plan = self._do_request('plans', 'POST', plan_filter, headers)
return self._execute_plan(plan, dryrun)
def loadModel(self, modelUrl=None, modelFile=None): # pylint: disable=C0103
"""Deprecated version of execute_plan()"""
logger.warn('DEPRECATED: Client.loadModel() is deprecated,'
' use Client.execute_plan() instead.')
self.load_model(model_url=modelUrl, model_filename=modelFile)
def load_model(self, model_url=None, model_filename=None):
"""Load a model into the console.
:param model_url: to load the model from a model
:param model_filename: to load a model directly from a file
"""
if (model_url is None) and (model_filename is None):
raise StandardError('model_url or model_filename must be provided')
status = None
self._action_successful = None
if model_url is not None:
body = {
'modelUrl': model_url
}
response = self._do_request('model/static', 'POST', body)
status = response.status_int
else:
with open(model_filename, 'r') as model_file:
headers = {
'Content-Type': 'text/json'
}
response = self._do_request(
'model/static', 'POST', model_file, headers)
status = response.status_int
# XXX - Should exceptions be thrown here?
if status == 201:
self._action_successful = True
return 'Model loaded successfully: ' + response.body
if status == 204:
self._action_successful = True
return 'Model applied, but was not updated.'
self._action_successful = False
if status == 400:
#"Error: Invalid model."
return ''
if status == 404:
#"Error: model not found."
return ''
return ''
def status(self,
live=True, beautify=False, system_filter=None, filter=None):
"""Retrieve the model, either currently loaded or live as JSON.
:param live: If True, retrieve the live model. Otherwise the loaded
model.
:param beautify: If True, use a pretty printer on the output.
:param system_filter: The DSL/filter syntax for the GLU console to parse.
"""
if filter:
logger.warn('DEPRECATED: filter argument in Client.status() is'
' deprecated, use system_filter instead.')
system_filter = filter
uri = 'model/live' if live else 'model/static'
params = {}
if beautify:
params['prettyPrint'] = 'true'
if system_filter:
params['systemFilter'] = system_filter
if params:
uri = '%s?%s' % (uri, urllib.urlencode(params))
response = self._do_request(uri, 'GET')
return response.body
def agents(self, beautify=False):
"""Retrieve the agents status.
:param beautify: If True, use a pretty printer on the output.
"""
uri = 'agents'
params = {}
if beautify:
params['prettyPrint'] = 'true'
if params:
uri = '%s?%s' % (uri, urllib.urlencode(params))
response = self._do_request(uri, 'GET')
return response.body
| |
#!/usr/bin/env python
#
# test_multibytecodec_support.py
# Common Unittest Routines for CJK codecs
#
import sys, codecs, os.path
import unittest
from test import test_support
from StringIO import StringIO
class TestBase:
encoding = '' # codec name
codec = None # codec tuple (with 4 elements)
tstring = '' # string to test StreamReader
codectests = None # must set. codec test tuple
roundtriptest = 1 # set if roundtrip is possible with unicode
has_iso10646 = 0 # set if this encoding contains whole iso10646 map
xmlcharnametest = None # string to test xmlcharrefreplace
unmappedunicode = u'\udeee' # a unicode codepoint that is not mapped.
def setUp(self):
if self.codec is None:
self.codec = codecs.lookup(self.encoding)
self.encode = self.codec.encode
self.decode = self.codec.decode
self.reader = self.codec.streamreader
self.writer = self.codec.streamwriter
self.incrementalencoder = self.codec.incrementalencoder
self.incrementaldecoder = self.codec.incrementaldecoder
def test_chunkcoding(self):
for native, utf8 in zip(*[StringIO(f).readlines()
for f in self.tstring]):
u = self.decode(native)[0]
self.assertEqual(u, utf8.decode('utf-8'))
if self.roundtriptest:
self.assertEqual(native, self.encode(u)[0])
def test_errorhandle(self):
for source, scheme, expected in self.codectests:
if type(source) == type(''):
func = self.decode
else:
func = self.encode
if expected:
result = func(source, scheme)[0]
self.assertEqual(result, expected)
else:
self.assertRaises(UnicodeError, func, source, scheme)
def test_xmlcharrefreplace(self):
if self.has_iso10646:
return
s = u"\u0b13\u0b23\u0b60 nd eggs"
self.assertEqual(
self.encode(s, "xmlcharrefreplace")[0],
"ଓଣୠ nd eggs"
)
def test_customreplace_encode(self):
if self.has_iso10646:
return
from htmlentitydefs import codepoint2name
def xmlcharnamereplace(exc):
if not isinstance(exc, UnicodeEncodeError):
raise TypeError("don't know how to handle %r" % exc)
l = []
for c in exc.object[exc.start:exc.end]:
if ord(c) in codepoint2name:
l.append(u"&%s;" % codepoint2name[ord(c)])
else:
l.append(u"&#%d;" % ord(c))
return (u"".join(l), exc.end)
codecs.register_error("test.xmlcharnamereplace", xmlcharnamereplace)
if self.xmlcharnametest:
sin, sout = self.xmlcharnametest
else:
sin = u"\xab\u211c\xbb = \u2329\u1234\u232a"
sout = "«ℜ» = ⟨ሴ⟩"
self.assertEqual(self.encode(sin,
"test.xmlcharnamereplace")[0], sout)
def test_callback_wrong_objects(self):
def myreplace(exc):
return (ret, exc.end)
codecs.register_error("test.cjktest", myreplace)
for ret in ([1, 2, 3], [], None, object(), 'string', ''):
self.assertRaises(TypeError, self.encode, self.unmappedunicode,
'test.cjktest')
def test_callback_long_index(self):
def myreplace(exc):
return (u'x', long(exc.end))
codecs.register_error("test.cjktest", myreplace)
self.assertEqual(self.encode(u'abcd' + self.unmappedunicode + u'efgh',
'test.cjktest'), ('abcdxefgh', 9))
def myreplace(exc):
return (u'x', sys.maxint + 1)
codecs.register_error("test.cjktest", myreplace)
self.assertRaises(IndexError, self.encode, self.unmappedunicode,
'test.cjktest')
def test_callback_None_index(self):
def myreplace(exc):
return (u'x', None)
codecs.register_error("test.cjktest", myreplace)
self.assertRaises(TypeError, self.encode, self.unmappedunicode,
'test.cjktest')
def test_callback_backward_index(self):
def myreplace(exc):
if myreplace.limit > 0:
myreplace.limit -= 1
return (u'REPLACED', 0)
else:
return (u'TERMINAL', exc.end)
myreplace.limit = 3
codecs.register_error("test.cjktest", myreplace)
self.assertEqual(self.encode(u'abcd' + self.unmappedunicode + u'efgh',
'test.cjktest'),
('abcdREPLACEDabcdREPLACEDabcdREPLACEDabcdTERMINALefgh', 9))
def test_callback_forward_index(self):
def myreplace(exc):
return (u'REPLACED', exc.end + 2)
codecs.register_error("test.cjktest", myreplace)
self.assertEqual(self.encode(u'abcd' + self.unmappedunicode + u'efgh',
'test.cjktest'), ('abcdREPLACEDgh', 9))
def test_callback_index_outofbound(self):
def myreplace(exc):
return (u'TERM', 100)
codecs.register_error("test.cjktest", myreplace)
self.assertRaises(IndexError, self.encode, self.unmappedunicode,
'test.cjktest')
def test_incrementalencoder(self):
UTF8Reader = codecs.getreader('utf-8')
for sizehint in [None] + range(1, 33) + \
[64, 128, 256, 512, 1024]:
istream = UTF8Reader(StringIO(self.tstring[1]))
ostream = StringIO()
encoder = self.incrementalencoder()
while 1:
if sizehint is not None:
data = istream.read(sizehint)
else:
data = istream.read()
if not data:
break
e = encoder.encode(data)
ostream.write(e)
self.assertEqual(ostream.getvalue(), self.tstring[0])
def test_incrementaldecoder(self):
UTF8Writer = codecs.getwriter('utf-8')
for sizehint in [None, -1] + range(1, 33) + \
[64, 128, 256, 512, 1024]:
istream = StringIO(self.tstring[0])
ostream = UTF8Writer(StringIO())
decoder = self.incrementaldecoder()
while 1:
data = istream.read(sizehint)
if not data:
break
else:
u = decoder.decode(data)
ostream.write(u)
self.assertEqual(ostream.getvalue(), self.tstring[1])
def test_incrementalencoder_error_callback(self):
inv = self.unmappedunicode
e = self.incrementalencoder()
self.assertRaises(UnicodeEncodeError, e.encode, inv, True)
e.errors = 'ignore'
self.assertEqual(e.encode(inv, True), '')
e.reset()
def tempreplace(exc):
return (u'called', exc.end)
codecs.register_error('test.incremental_error_callback', tempreplace)
e.errors = 'test.incremental_error_callback'
self.assertEqual(e.encode(inv, True), 'called')
# again
e.errors = 'ignore'
self.assertEqual(e.encode(inv, True), '')
def test_streamreader(self):
UTF8Writer = codecs.getwriter('utf-8')
for name in ["read", "readline", "readlines"]:
for sizehint in [None, -1] + range(1, 33) + \
[64, 128, 256, 512, 1024]:
istream = self.reader(StringIO(self.tstring[0]))
ostream = UTF8Writer(StringIO())
func = getattr(istream, name)
while 1:
data = func(sizehint)
if not data:
break
if name == "readlines":
ostream.writelines(data)
else:
ostream.write(data)
self.assertEqual(ostream.getvalue(), self.tstring[1])
def test_streamwriter(self):
readfuncs = ('read', 'readline', 'readlines')
UTF8Reader = codecs.getreader('utf-8')
for name in readfuncs:
for sizehint in [None] + range(1, 33) + \
[64, 128, 256, 512, 1024]:
istream = UTF8Reader(StringIO(self.tstring[1]))
ostream = self.writer(StringIO())
func = getattr(istream, name)
while 1:
if sizehint is not None:
data = func(sizehint)
else:
data = func()
if not data:
break
if name == "readlines":
ostream.writelines(data)
else:
ostream.write(data)
self.assertEqual(ostream.getvalue(), self.tstring[0])
if len(u'\U00012345') == 2: # ucs2 build
_unichr = unichr
def unichr(v):
if v >= 0x10000:
return _unichr(0xd800 + ((v - 0x10000) >> 10)) + \
_unichr(0xdc00 + ((v - 0x10000) & 0x3ff))
else:
return _unichr(v)
_ord = ord
def ord(c):
if len(c) == 2:
return 0x10000 + ((_ord(c[0]) - 0xd800) << 10) + \
(ord(c[1]) - 0xdc00)
else:
return _ord(c)
class TestBase_Mapping(unittest.TestCase):
pass_enctest = []
pass_dectest = []
supmaps = []
def __init__(self, *args, **kw):
unittest.TestCase.__init__(self, *args, **kw)
self.open_mapping_file() # test it to report the error early
def open_mapping_file(self):
return test_support.open_urlresource(self.mapfileurl)
def test_mapping_file(self):
unichrs = lambda s: u''.join(map(unichr, map(eval, s.split('+'))))
urt_wa = {}
for line in self.open_mapping_file():
if not line:
break
data = line.split('#')[0].strip().split()
if len(data) != 2:
continue
csetval = eval(data[0])
if csetval <= 0x7F:
csetch = chr(csetval & 0xff)
elif csetval >= 0x1000000:
csetch = chr(csetval >> 24) + chr((csetval >> 16) & 0xff) + \
chr((csetval >> 8) & 0xff) + chr(csetval & 0xff)
elif csetval >= 0x10000:
csetch = chr(csetval >> 16) + \
chr((csetval >> 8) & 0xff) + chr(csetval & 0xff)
elif csetval >= 0x100:
csetch = chr(csetval >> 8) + chr(csetval & 0xff)
else:
continue
unich = unichrs(data[1])
if ord(unich) == 0xfffd or urt_wa.has_key(unich):
continue
urt_wa[unich] = csetch
self._testpoint(csetch, unich)
def test_mapping_supplemental(self):
for mapping in self.supmaps:
self._testpoint(*mapping)
def _testpoint(self, csetch, unich):
if (csetch, unich) not in self.pass_enctest:
self.assertEqual(unich.encode(self.encoding), csetch)
if (csetch, unich) not in self.pass_dectest:
self.assertEqual(unicode(csetch, self.encoding), unich)
def load_teststring(encoding):
from test import cjkencodings_test
return cjkencodings_test.teststring[encoding]
| |
#!/usr/bin/python
# -*- coding: utf-8 -*-
'''
Relay from GnuGo to Flash Client
gtp: Go Text Protocol
amf: ActionScript Messaging Format
'''
__author__ = 'Ethan Kennerly'
def example():
'''Make a move in GnuGo and see the board.
>>> gtp_envoy = setup_envoy(configuration.gtp_path, 'localhost', 5903)
>>> print talk(gtp_envoy, 'showboard')
=
A B C D E F G H J
9 . . . . . . . . . 9
8 . . . . . . . . . 8
7 . . + . . . + . . 7
6 . . . . . . . . . 6
5 . . . . + . . . . 5
4 . . . . . . . . . 4
3 . . + . . . + . . 3
2 . . . . . . . . . 2 WHITE (O) has captured 0 stones
1 . . . . . . . . . 1 BLACK (X) has captured 0 stones
A B C D E F G H J
<BLANKLINE>
<BLANKLINE>
>>> print talk(gtp_envoy, 'play black C3')
=
<BLANKLINE>
<BLANKLINE>
>>> print talk(gtp_envoy, 'showboard')
=
A B C D E F G H J
9 . . . . . . . . . 9
8 . . . . . . . . . 8
7 . . + . . . + . . 7
6 . . . . . . . . . 6
5 . . . . + . . . . 5
4 . . . . . . . . . 4
3 . . X . . . + . . 3
2 . . . . . . . . . 2 WHITE (O) has captured 0 stones
1 . . . . . . . . . 1 BLACK (X) has captured 0 stones
A B C D E F G H J
<BLANKLINE>
<BLANKLINE>
'''
def dragon_critical_crash_example():
'''Crash GnuGo. Why? Excluded from global test.
>>> from configuration import *
>>> gtp_envoy = setup_envoy(configuration.gtp_path, configuration.gtp_host, configuration.gtp_port)
>>> turn_to_move = talk(gtp_envoy, 'loadsgf sgf/eye_critical_example.sgf')
>>> critical = talk(gtp_envoy, 'dragon_status A2')
'''
def dragon_status_crash_example():
'''Crash GnuGo. Why? exclude from global test.
>>> from configuration import *
>>> gtp_envoy = setup_envoy(configuration.gtp_path, configuration.gtp_host, configuration.gtp_port)
>>> print talk(gtp_envoy, 'loadsgf sgf/eye_example.sgf')
= black
<BLANKLINE>
<BLANKLINE>
>>> print talk(gtp_envoy, 'showboard')
=
A B C D E F G H J
9 . . . . . . . . . 9
8 . . . . . . . . . 8
7 . . + . . . + . . 7
6 . . . . . . . . . 6
5 . . . . + . . . . 5
4 . . . . . . . . . 4
3 O O O O . . + . . 3
2 X X X X . . . . . 2 WHITE (O) has captured 0 stones
1 . . . X . . . . . 1 BLACK (X) has captured 0 stones
A B C D E F G H J
<BLANKLINE>
<BLANKLINE>
>>> print talk(gtp_envoy, 'dragon_status A2')
= alive
<BLANKLINE>
<BLANKLINE>
>>> print talk(gtp_envoy, 'white E2')
=
<BLANKLINE>
<BLANKLINE>
Dragon status. GnuGo crash. Why?
>>> print talk(gtp_envoy, 'dragon_status A2')
talk(dragon_status A2): socket error <class 'socket.error'>: "(10054, 'Connection reset by peer')"
talk(dragon_status A2): socket error <class 'socket.error'>: "(10054, 'Connection reset by peer')"
'''
import time
import subprocess
def setup_gtp(gtp_path, gtp_port):
'''In a subprocess, start a program for an artificial go player.
>>> gtp_pid = setup_gtp(configuration.gtp_path, configuration.gtp_port)
'''
#- base_command = 'gnugo-3.8.exe'
#- import os
# dirname = os.path.abspath('.')
# command = dirname + '/' + configuration.computer_start
#- command = os.path.join(os.getcwd(),
#- os.path.dirname(__file__),
#- base_command)
# configuration.computer_start)
# command = dirname + '/gnugo_port' + str(gtp_port) + '.bat'
options = ['--gtp-listen', '%i' % gtp_port,
'--mode', 'gtp', '--boardsize', '9', '--level', '1']
command_log = '%s %s' % (gtp_path, options)
import logging
logging.info(command_log)
try:
gtp_pid = subprocess.Popen([gtp_path] + options).pid
except:
print 'setup_gtp', gtp_path
print 'The system cannot find the file specified'
raise
## # if complaint that GTP is not ready yet, then sleep first.
time.sleep(2)
return gtp_pid
def exec_gtp(gtp_path, sgf_file, gtp_input):
r'''In a subprocess, start a program for an artificial go player and wait for its result.
For example:
gnugo-3.8.exe --mode gtp --boardsize 9 --level 1\
--infile sgf/eye_critical_example.sgf
--gtp-input sgf/dragon_status.gtp
where the gtp-input file contains: dragon_status A2
>>> print exec_gtp(configuration.gtp_path, 'sgf/eye_critical_example.sgf', 'dragon_status A2')
= critical B1 B1
<BLANKLINE>
<BLANKLINE>
>>> print exec_gtp(configuration.gtp_path, 'sgf/eye_critical_example.sgf', 'dragon_status A2')
= critical B1 B1
<BLANKLINE>
<BLANKLINE>
>>> print exec_gtp(configuration.gtp_path, 'sgf/eye_critical_example.sgf', 'showboard')
=
A B C D E F G H J
9 . . . . . . . . . 9
8 . . . . . . . . . 8
7 . . + . . . + . . 7
6 . . . . . . . . . 6
5 . . . . + . . . . 5
4 . . . . . . . . . 4
3 O O O O . . + . . 3
2 X X X X O . . . . 2 WHITE (O) has captured 0 stones
1 . . . X . . . . . 1 BLACK (X) has captured 0 stones
A B C D E F G H J
<BLANKLINE>
<BLANKLINE>
If memory set to 512 (-M 512), this takes over half a second to execute.
Whereas, without, the speed is fast.
>>> import timeit
>>> timer = timeit.Timer(stmt="exec_gtp(configuration.gtp_path, 'sgf/eye_critical_example.sgf', 'dragon_status A2')", setup="from go_text_protocol import exec_gtp, configuration")
>>> duration = timer.timeit(10)
>>> duration < 0.5
True
'''
gtp_input_file = 'sgf/__input.gtp'
response_file = 'sgf/__response.gtp'
text.save(gtp_input_file, gtp_input + gtp_end_of_response)
options = ['--mode', 'gtp', '--boardsize', '9', '--level', '1',
'--infile', sgf_file,
'--gtp-input', gtp_input_file]
command_log = '%s %s' % (gtp_path, options)
import logging
logging.info(command_log)
# Delete contents of response file.
text.save(response_file, '...')
out = open(response_file, 'w')
try:
process = subprocess.Popen([gtp_path] + options,
stdout = out)
except:
print 'setup_gtp', gtp_path
print 'The system cannot find the file specified'
raise
out.close()
response = open(response_file, 'r')
response_string = ''
line = response.readline()
while not response_string.endswith(gtp_end_of_response):
response_string += line
line = response.readline()
return response_string
#- migrated to smart_go_format
#- def save_sgf(history, path):
#- sgf_tree = get_sgf_tree(history)
#- text.save(path, str(sgf_tree))
def get_dragon_status(sgf_file, row, column, size = 9):
'''Coordinates of a dragon that are in critical condition.
>>> sgf_file = 'sgf/eye_example.sgf'
>>> status_attack_defense = get_dragon_status(sgf_file, 7, 3)
>>> status_attack_defense
['alive']
>>> sgf_file = 'sgf/eye_critical_example.sgf'
>>> status_attack_defense = get_dragon_status(sgf_file, 7, 3)
>>> status_attack_defense
['critical', (8, 1), (8, 1)]
>>> status_attack_defense = get_dragon_status(sgf_file, 4, 3, 5)
>>> status_attack_defense
['critical', (4, 1), (4, 1)]
'''
coordinate_gtp = array_to_gtp(row, column, size)
gtp_input = 'dragon_status %s' % coordinate_gtp
response_gtp = exec_gtp(configuration.gtp_path, sgf_file, gtp_input)
return parse_dragon_status_coordinates(response_gtp, size)
def parse_dragon_status_coordinates(response_gtp, size = 9):
r'''
>>> response_gtp = '= critical A3 PASS\n\n'
>>> parse_dragon_status_coordinates(response_gtp, 3)
['critical', (0, 0)]
'''
if not gtp_ok(response_gtp):
print 'get_dragon_status: response_gtp = %s' % response_gtp
return []
status_attack_defense = gtp_response_to_list(response_gtp)
if status_attack_defense:
status_attack_defense_coordinates = [status_attack_defense[0]]
for s, stone in enumerate(status_attack_defense[1:]):
if 'PASS' != stone:
coordinate = gtp_to_array(stone, size)
if coordinate:
status_attack_defense_coordinates.append(coordinate)
else:
print 'parse_dragon_status_coordinates: what is this strange stone? %s' % stone
return status_attack_defense_coordinates
def get_unconditional_status(sgf_file, row, column, size):
'''Coordinates of a dragon that are in critical condition.
>>> sgf_file = 'sgf/unconditional_status_example.sgf'
>>> status = get_unconditional_status(sgf_file, 2, 0, 5)
>>> status
'alive'
>>> status = get_unconditional_status(sgf_file, 1, 0, 5)
>>> status
'dead'
>>> status = get_unconditional_status(sgf_file, 4, 3, 5)
>>> status
'undecided'
'''
coordinate_gtp = array_to_gtp(row, column, size)
gtp_input = 'unconditional_status %s' % coordinate_gtp
response_gtp = exec_gtp(configuration.gtp_path, sgf_file, gtp_input)
if not gtp_ok(response_gtp):
print 'get_unconditional_status: response_gtp = %s' % response_gtp
return []
status = gtp_response_to_list(response_gtp)[0]
return status
def get_dragon_coordinates(sgf_file, row, column, size = 9):
'''Coordinates of a dragon that are in critical condition.
>>> sgf_file = 'sgf/eye_critical_example.sgf'
>>> get_dragon_coordinates(sgf_file, 7, 3)
[(7, 0), (7, 1), (7, 2), (7, 3), (8, 3)]
>>> get_dragon_coordinates(sgf_file, 6, 3)
[(6, 0), (6, 1), (6, 2), (6, 3)]
'''
coordinate_gtp = array_to_gtp(row, column, size)
dragon_stones_gtp = 'dragon_stones %s' % coordinate_gtp
stones_gtp = exec_gtp(configuration.gtp_path, sgf_file, dragon_stones_gtp)
stones = gtp_response_to_list(stones_gtp)
coordinates = [gtp_to_array(stone, size) for stone in stones]
return coordinates
def get_attacker_critical_coordinates(sgf_file, attackers, size = 9):
'''All critical coordinates of the attackers.
>>> sgf_file = 'sgf/eye_example.sgf'
>>> attackers = [(7, 3)]
>>> dragons, vitals = get_attacker_critical_coordinates(sgf_file, attackers)
>>> dragons
[]
>>> vitals
[]
>>> sgf_file = 'sgf/eye_critical_example.sgf'
>>> attackers = [(7, 3)]
>>> dragons, vitals = get_attacker_critical_coordinates(sgf_file, attackers)
>>> dragons
[(7, 0), (7, 1), (7, 2), (7, 3), (8, 3)]
>>> vitals
[(8, 1)]
'''
dragons = []
vitals = []
for r, c in attackers:
status_attack_defense = get_dragon_status(sgf_file, r, c, size)
if not status_attack_defense:
print 'get_attacker_critical_coordinates: error:', status_attack_defense
continue
status = status_attack_defense[0]
if not 'critical' == status:
continue
for point in status_attack_defense[1:]:
if point not in vitals:
vitals.append(point)
dragon = get_dragon_coordinates(sgf_file, r, c, size)
dragons.extend(dragon)
return dragons, vitals
import socket
def setup_envoy(gtp_path, gtp_host, gtp_port):
'''Must be setup before testing.
>>> gtp_envoy = setup_envoy(configuration.gtp_path, configuration.gtp_host, configuration.gtp_port)
'''
gtp_pid = setup_gtp(gtp_path, gtp_port)
envoy = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if str != type(gtp_host):
print 'setup_envoy(%s, %s) # gtp_host should be string' \
% (gtp_host, gtp_port)
try:
envoy.connect((gtp_host, gtp_port))
except:
print 'setup_envoy(%s, %s) # exception' \
% (gtp_host, gtp_port)
raise
envoy.settimeout(16)
return envoy
def talk(envoy, gtp_command, delay = 1.0/256, verbose = False):
r'''Send and receive in Go Text Protocol to GnuGo.
Send full message and receive full message
and validate GTP format of a single response.
Creating a second ambassador may corrupt responses.
For examples, see update_gnugo_example.
socket.error: dragon_status_crash_example
'''
if verbose:
print 'talk(%s)' % (gtp_command)
gtp_string = str(gtp_command) + '\n'
# docs.python.org/howto/sockets.html
message_length = len(gtp_string)
total_sent = 0
while total_sent < message_length:
sent = envoy.send(gtp_string[total_sent:])
if 0 == sent:
print 'RuntimeError socket connection broken'
total_sent += sent
# If waiting 1/128 or less, sometimes the message is truncated.
# I guess there is a variable execution and socket delay.
gtp_response = ''
while not gtp_response.endswith(gtp_end_of_response):
# http://bytes.com/topic/python/answers/22953-how-catch-socket-timeout
time.sleep(delay)
try:
chunk = envoy.recv(1024)
if '' == chunk:
print 'talk(%s): RuntimeError socket connection broken' % (gtp_command)
gtp_response += chunk
except socket.timeout:
print 'talk(%s): timeout: gtp_response: "%s"' \
% (gtp_command, gtp_response.__repr__())
return 'timeout'
except socket.error:
import sys
error_number, error_string = sys.exc_info()[:2]
error_message = 'talk(%s): socket error %s: "%s"' \
% (gtp_command,
error_number, error_string)
print error_message
return error_message
if not gtp_ok(gtp_response):
print '''talk(%s) # GnuGo strange response: "%s"''' \
% (gtp_command, gtp_response.__repr__())
gtp_response = last_response(gtp_response)
if verbose:
print 'talk(%s) # gtp response: "%s"' \
% (gtp_command, gtp_response)
return gtp_response
#
# Go Text Protocol
#
gtp_start_of_ok = '= '
gtp_start_of_error = '? '
gtp_end_of_response = '\n\n'
def gtp_ok(gtp_response):
r'''Single, complete, happy response
>>> gtp_ok('= \n\n')
True
>>> gtp_ok('? Cannot\n\n')
False
>>> gtp_ok('= C4\n\n')
True
>>> gtp_ok('= white\n\n= C4\n\n')
False
>>> gtp_ok('= C4\n')
False
'''
return gtp_response.startswith(gtp_start_of_ok) \
and 1 == gtp_response.count(gtp_end_of_response) \
and gtp_response.endswith(gtp_end_of_response)
def last_response(response):
r'''Return only last response.
>>> single_response = '= B+6.0\n\n'
>>> last_response(single_response)
'= B+6.0\n\n'
>>> double_response = '= B+6.0\n\n= B+6.0\n\n'
>>> last_response(double_response)
'= B+6.0\n\n'
'''
last = response.split(gtp_end_of_response)[-2]
last += gtp_end_of_response
return last
# index 0 1 2 3 4 5 6 7 8
column_list = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'J']
row_list = [ 9 , 8 , 7 , 6 , 5 , 4 , 3 , 2 , 1]
def array_to_gtp(row, column, size = 9):
'''Map 2D array to 9x9 board as shown by GnuGo 3.8
>>> array_to_gtp(0, 2)
'C9'
>>> array_to_gtp(2, 2)
'C7'
>>> array_to_gtp(1, 2)
'C8'
3x3 board
>>> array_to_gtp(1, 2, 3)
'C2'
>>> array_to_gtp(4, 2, 3)
array_to_gtp: row out of bounds 4
'''
if row <= -1 or size <= row or len(row_list) <= row:
print 'array_to_gtp: row out of bounds %s' % row
return
if column <= -1 or size <= column or len(column_list) <= column:
print 'array_to_gtp: column out of bounds %s' % column
return
if size <= -1 or len(row_list) < size or len(column_list) < size:
print 'array_to_gtp: size out of bounds %s' % size
return
offset = len(row_list) - size
return column_list[column] + str(row_list[row + offset])
def gtp_to_array(gtp_coordinate, size = 9):
'''Map 9x9 board as shown by GnuGo 3.8 to 2D array.
>>> gtp_to_array('A7')
(2, 0)
>>> gtp_to_array('C7')
(2, 2)
>>> gtp_to_array('PASS')
gtp_to_array: I only know how to handle 2 characters, not PASS
>>> gtp_to_array('Z7')
gtp_to_array: column not found: Z7
>>> gtp_to_array('A3', 9)
(6, 0)
>>> gtp_to_array('A3', 3)
(0, 0)
>>> gtp_to_array('B3', 5)
(2, 1)
'''
if 2 != len(gtp_coordinate):
print 'gtp_to_array: I only know how to handle 2 characters, not ' + gtp_coordinate
return
gtp_column, gtp_row = gtp_coordinate[0], gtp_coordinate[1:]
if gtp_column not in column_list:
print 'gtp_to_array: column not found: ' + str(gtp_coordinate)
return
column = column_list.index(gtp_column)
gtp_row = int(gtp_row)
if gtp_row not in row_list:
print 'gtp_to_array: row not found: ' + str(gtp_coordinate)
return
offset = len(row_list) - size
row = row_list.index(gtp_row + offset)
return row, column
def gtp_to_move(gtp_response, size = 9):
r'''Stone or PASS.
>>> gtp_to_move('= A7\n\n')
(2, 0)
>>> gtp_to_move('= C7\n\n')
(2, 2)
>>> gtp_to_move('= PASS\n\n')
'pass'
>>> gtp_to_move('= resign\n\n')
'resign'
'''
stone = gtp_response_to_stone(gtp_response)
if 'pass' == stone.lower() \
or 'resign' == stone.lower():
return stone.lower()
else:
return gtp_to_array(stone, size)
def gtp_response_to_stone(gtp_response):
a_list = gtp_response_to_list(gtp_response)
if a_list:
return a_list[0]
else:
return
def gtp_response_to_coordinate(gtp_response, size = 9):
r'''
>>> gtp_response_to_coordinate('= C4\n\n')
(5, 2)
'''
gtp_coordinate = gtp_response_to_stone(
gtp_response)
row, column = gtp_to_array(gtp_coordinate, size)
return row, column
def gtp_response_to_list(gtp_response):
r'''GTP to Python data.
>>> gtp_response_to_list('= C6 C4\n\n')
['C6', 'C4']
>>> gtp_response_to_list('= \n\n')
[]
>>> gtp_response_to_list('=\n\n')
[]
>>> gtp_response_to_list('= black\n\n')
['black']
>>> gtp_response_to_list('= PASS\n\n')
['PASS']
'''
gtp_response = gtp_response.splitlines()[0]
python_list = gtp_response.split(' ')[1:]
if '' in python_list:
python_list.remove('')
return python_list
def gtp_response_to_text(gtp_response):
r'''GTP to Python multi-line string.
>>> print gtp_response_to_text('= C6 C4\n\n')
C6 C4
<BLANKLINE>
<BLANKLINE>
'''
return gtp_response.strip('= ')
def gtp_response_to_dictionary(gtp_response):
r'''GTP to Python dictionary.
>>> gtp_response_to_dictionary('= \n\n')
{}
>>> places = gtp_response_to_dictionary('= C6 C4\n\n')
>>> if not places == {'C6': True, 'C4': True}:
... print places
'''
dictionary = {}
for key in gtp_response_to_list(gtp_response):
dictionary[key] = True
return dictionary
# Mark territory
wallis_territory_text = '= -1.00 -1.00 -1.00 -1.00 0.00 -1.00 -1.00 -1.00 0.00 \n -1.00 0.00 -1.00 -2.00 0.00 -1.00 0.00 0.00 0.00 \n -1.00 -1.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 \n -1.00 -1.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 \n -1.00 -1.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 \n -1.00 -1.00 0.00 0.00 0.00 0.00 2.00 2.00 0.00 \n 0.00 -1.00 0.00 0.00 0.00 0.00 2.00 0.00 1.00 \n 0.00 0.00 0.00 0.00 0.00 0.00 2.00 0.00 1.00 \n 0.00 0.00 0.00 0.00 0.00 1.00 1.00 1.00 1.00 \n\n'
mostly_dead = 1 + 15.0 / 32
mostly_mine = 1.0 / 0.72
not_black = '-0.72'
black_dead = '1.41'
def scale_value(raw):
'''
>>> scale_value(not_black)
0
>>> scale_value(black_dead)
2
'''
number = float(raw)
if abs(number) <= 1:
return int(number * mostly_mine)
else:
return int(number * mostly_dead)
def get_territory_values(territory_values_text):
'''
>>> from pprint import pprint
>>> pprint(get_territory_values(wallis_territory_text))
[[-1, -1, -1, -1, 0, -1, -1, -1, 0],
[-1, 0, -1, -2, 0, -1, 0, 0, 0],
[-1, -1, 0, 0, 0, 0, 0, 0, 0],
[-1, -1, 0, 0, 0, 0, 0, 0, 0],
[-1, -1, 0, 0, 0, 0, 0, 0, 0],
[-1, -1, 0, 0, 0, 0, 2, 2, 0],
[0, -1, 0, 0, 0, 0, 2, 0, 1],
[0, 0, 0, 0, 0, 0, 2, 0, 1],
[0, 0, 0, 0, 0, 1, 1, 1, 1]]
>>> pprint(get_territory_values(mostly_dead_territory_text))
[[-1, -1, -1, -1, 0, -1, -1, -1, 0],
[-1, 0, -1, -2, 0, -1, 0, 0, 0],
[-1, -1, 0, 0, 0, 0, 0, 0, 0],
[-1, -1, 0, 0, 0, 0, 0, 0, 0],
[-1, -1, 0, 0, 0, 0, 0, 0, 0],
[-1, -1, 0, 0, 0, 0, 2, 2, 0],
[0, -1, 0, 0, 0, 0, 2, 0, 1],
[0, 0, 0, 0, 0, 0, 2, 0, 1],
[0, 0, 0, 0, 0, 1, 1, 1, 1]]
>>> get_territory_values(mostly_dead_territory_text)[7][6]
2
Distinguish suboptimal move in 5x5 board.
>>> pprint(get_territory_values(beside_center_values_text))
[[-1, -1, -1, -1, -1],
[-1, -1, 0, -1, -1],
[-1, -1, -1, -1, -1],
[-1, 0, -1, 0, -1],
[0, 0, 0, 0, 0]]
>>> pprint(get_territory_values(center_values_text))
[[-1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1],
[-1, -1, 0, -1, -1],
[-1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1]]
'''
# sometimes a value like 1.41 appears, which is close enough to dead.
if not gtp_ok(territory_values_text):
print 'get_territory_text(%s) # invalid gtp_response' \
% (territory_values_text.__repr__())
territory_values_text = territory_values_text \
.lstrip(gtp_start_of_ok) \
.rstrip(gtp_end_of_response)
territory_values = []
for line in territory_values_text.splitlines():
line = line.split(' ')
while '' in line:
line.remove('')
int_line = [scale_value(i) for i in line]
territory_values.append(int_line)
return territory_values
neutral_territory_values_dictionary = {
-2: 'neutral',
-1: 'neutral',
0: 'neutral',
1: 'neutral',
2: 'neutral'
}
no_dead_territory_values_dictionary = {
-2: 'black',
-1: 'black',
0: 'neutral',
1: 'white',
2: 'white'
}
territory_values_dictionary = {
-2: 'white_dead',
-1: 'black',
0: 'neutral',
1: 'white',
2: 'black_dead'
}
def get_territory_labels(territory_values,
values_dictionary = territory_values_dictionary):
'''
>>> values = get_territory_values(wallis_territory_text)
>>> get_territory_labels(values)
[['black', 'black', 'black', 'black', 'neutral', 'black', 'black', 'black', 'neutral'], ['black', 'neutral', 'black', 'white_dead', 'neutral', 'black', 'neutral', 'neutral', 'neutral'], ['black', 'black', 'neutral', 'neutral', 'neutral', 'neutral', 'neutral', 'neutral', 'neutral'], ['black', 'black', 'neutral', 'neutral', 'neutral', 'neutral', 'neutral', 'neutral', 'neutral'], ['black', 'black', 'neutral', 'neutral', 'neutral', 'neutral', 'neutral', 'neutral', 'neutral'], ['black', 'black', 'neutral', 'neutral', 'neutral', 'neutral', 'black_dead', 'black_dead', 'neutral'], ['neutral', 'black', 'neutral', 'neutral', 'neutral', 'neutral', 'black_dead', 'neutral', 'white'], ['neutral', 'neutral', 'neutral', 'neutral', 'neutral', 'neutral', 'black_dead', 'neutral', 'white'], ['neutral', 'neutral', 'neutral', 'neutral', 'neutral', 'white', 'white', 'white', 'white']]
Mostly dead is dead too.
>>> old_values = get_territory_values(mostly_dead_territory_text)
>>> old_labels = get_territory_labels(old_values)
>>> old_labels[7][6]
'black_dead'
Optionally, do not show dead mark.
>>> old_labels = get_territory_labels(old_values,
... no_dead_territory_values_dictionary)
>>> old_labels[7][6]
'white'
'''
territory_labels = []
for row in territory_values:
territory_labels.append( [] )
for value in row:
territory_labels[-1].append(
values_dictionary[value] )
return territory_labels
mostly_dead_territory_text = '= -1.00 -1.00 -1.00 -1.00 0.00 -1.00 -1.00 -1.00 0.00 \n -1.00 0.00 -1.00 -2.00 0.00 -1.00 0.00 0.00 0.00 \n -1.00 -1.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 \n -1.00 -1.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 \n -1.00 -1.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 \n -1.00 -1.00 0.00 0.00 0.00 0.00 2.00 2.00 0.00 \n 0.00 -1.00 0.00 0.00 0.00 0.00 2.00 0.00 1.00 \n 0.00 0.00 0.00 0.00 0.00 0.00 1.41 0.00 1.00 \n 0.00 0.00 0.00 0.00 0.00 1.00 1.00 1.00 1.00 \n\n'
wallis_territory2_text = '= -1.00 -1.00 -1.00 -1.00 0.00 -1.00 -1.00 -1.00 0.00 \n -1.00 0.00 -1.00 -2.00 0.00 -1.00 0.00 0.00 0.00 \n -1.00 -1.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 \n -1.00 -1.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 \n -1.00 -1.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 \n -1.00 -1.00 0.00 0.00 0.00 0.00 1.00 1.00 0.00 \n 0.00 -1.00 0.00 0.00 0.00 0.00 1.00 0.00 1.00 \n 0.00 0.00 0.00 0.00 0.00 0.00 1.00 0.00 1.00 \n 0.00 0.00 0.00 0.00 0.00 1.00 0.00 1.00 1.00 \n\n'
def estimate_black_score(territory_values):
'''Estimate score from territory values.
>>> values = get_territory_values(wallis_territory_text)
>>> estimate_black_score(values)
7
'''
sum = 0
for values in territory_values:
for value in values:
sum += value
return 0 - sum
beside_center_values_text = '''= -1.00 -1.00 -1.00 -1.00 -1.00
-0.98 -1.00 0.00 -1.00 -0.98
-0.95 -0.73 -0.99 -0.73 -0.95
-0.73 -0.72 -0.73 -0.72 -0.73
-0.72 -0.72 -0.72 -0.72 -0.72
'''
center_values_text = '''= -1.00 -1.00 -1.00 -1.00 -1.00
-1.00 -1.00 -1.00 -1.00 -1.00
-1.00 -1.00 0.00 -1.00 -1.00
-1.00 -1.00 -1.00 -1.00 -1.00
-1.00 -1.00 -1.00 -1.00 -1.00
'''
def score_5_5_example():
'''Distinguish suboptimal move in 5x5 board.
>>> pprint(get_territory_values(beside_center_values_text))
[[-1, -1, -1, -1, -1],
[-1, -1, 0, -1, -1],
[-1, -1, -1, -1, -1],
[-1, 0, -1, 0, -1],
[0, 0, 0, 0, 0]]
>>> pprint(get_territory_values(center_values_text))
[[-1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1],
[-1, -1, 0, -1, -1],
[-1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1]]
'''
# Delay dependence on Smart Go Format
import smart_go_format
import text
def update_gnugo(envoy, history, size = 9):
sgf_tree = smart_go_format.get_sgf_tree(history, size)
path = 'sgf/_update_gnugo.sgf'
text.save(path, str(sgf_tree))
return load_sgf(envoy, path)
def load_sgf(envoy, path = 'sgf/_update_gnugo.sgf'):
# XXX why does ambassador's GnuGo sometimes load blank board?
# is an invalid move being appended to path?
gtp_command = 'loadsgf ' + path + ' 999'
return talk(envoy, gtp_command)
def get_score_and_territory(gtp_envoy):
'''
>>> gtp_envoy = setup_envoy(configuration.gtp_path, 'localhost', 5903)
>>> next_player_gtp = update_gnugo(gtp_envoy, [])
>>> score, territory_values = get_score_and_territory(gtp_envoy)
>>> loaded = load_sgf(gtp_envoy, 'sgf/test_initial_influence.sgf')
>>> score, territory_values = get_score_and_territory(gtp_envoy)
>>> 0 == territory_values[5][5]
True
'''
territory_gtp = 'initial_influence white territory_value'
territory_text = talk(gtp_envoy, territory_gtp)
territory_values = get_territory_values(territory_text)
score = estimate_black_score(territory_values)
return score, territory_values
def update_score_and_territory(gtp_envoy, history, size = 9,
values_dictionary = territory_values_dictionary):
'''
>>> gtp_envoy = setup_envoy(configuration.gtp_path, 'localhost', 5903)
>>> score, territory_labels = update_score_and_territory(gtp_envoy, [])
>>> score, territory_labels = update_score_and_territory(gtp_envoy, [], size = 3)
>>> score, territory_labels = update_score_and_territory(gtp_envoy, [],
... size = 3, values_dictionary = no_dead_territory_values_dictionary)
'''
next_player_gtp = update_gnugo(gtp_envoy, history, size)
score, territory_values = get_score_and_territory(gtp_envoy)
territory_labels = get_territory_labels(territory_values, values_dictionary)
return score, territory_labels
from smart_go_format import sgf_black, sgf_white
def final_score(gtp_envoy, offset_komi = 5.5):
'''ask gnugo to score the game for black.
Return estimate. Offset compensation to white.
XXX even when komi set to 0, in setup_score_example
sometimes gnugo seems to get stuck on 5.5 for white.
Must update gnugo beforehand.
>>> gtp_envoy = setup_envoy(configuration.gtp_path, 'localhost', 5903)
>>> next_player_gtp = update_gnugo(gtp_envoy, [], 3)
>>> final_score(gtp_envoy, 5.5)
0
>>> next_player_gtp = update_gnugo(gtp_envoy, [{'black': (1, 1)}, {'white': (0, 0)}, {'black': (0, 1)}, {'white': (1, 0)}, {'black': (2, 1)}], size = 3)
>>> final_score(gtp_envoy, 5.5)
8
>>> print talk(gtp_envoy, 'showboard')
=
A B C
3 O X . 3
2 O X . 2 WHITE (O) has captured 0 stones
1 . X . 1 BLACK (X) has captured 0 stones
A B C
<BLANKLINE>
<BLANKLINE>
>>> print talk(gtp_envoy, 'final_score')
= B+2.5
<BLANKLINE>
<BLANKLINE>
'''
score_gtp = talk(gtp_envoy, 'final_score')
score_text = score_gtp.lstrip('= ').rstrip('\n\n')
if score_text.startswith(sgf_black):
score_value_text = score_text.lstrip(
sgf_black + '+')
elif score_text.startswith(sgf_white):
score_value_text = '-' + score_text.lstrip(
sgf_white + '+')
else:
print 'final_score: what is this score_text? %s' % score_text
print ' making up a score of 12 for now'
score_value_text = '12'
territory_score = int(float(score_value_text)
+ offset_komi )
return territory_score
def get_coordinate(gtp_envoy, gtp_input, board_size):
'''Return row and column of top move.
>>> gtp_envoy = setup_envoy(configuration.gtp_path, 'localhost', 5903)
>>> sgf_file = 'sgf/test_initial_influence.sgf'
>>> loaded = load_sgf(gtp_envoy, sgf_file)
>>> row_column = get_coordinate(gtp_envoy, 'top_moves_white', 7)
>>> row_column
(5, 6)
If hopeless for white, return nothing.
>>> sgf_file = 'sgf/white_hopeless.sgf'
>>> loaded = load_sgf(gtp_envoy, sgf_file)
>>> row_column = get_coordinate(gtp_envoy, 'top_moves_white', 3)
>>> row_column
'''
response_gtp = talk(gtp_envoy, gtp_input)
moves_values = gtp_response_to_list(response_gtp)
if not moves_values:
return
top_move = moves_values[0]
row, column = gtp_to_array(top_move, board_size)
return row, column
def get_top_coordinate_white(gtp_envoy, board_size):
return get_coordinate(gtp_envoy, 'top_moves_white', board_size)
# Obsolete? Not used anymore?
def play_to_gtp(play_dictionary):
r'''Convert an action and coordinate to GTP.
>>> print play_to_gtp({'black': [(2, 3)]})
['play black D7']
>>> print play_to_gtp({'white': [(1, 0)]})
['play white A8']
>>> print play_to_gtp({'black': [[2, 3], [3, 3]]})
['play black D7', 'play black D6']
'''
gtp_commands = []
for color in ('black', 'white'):
gtp_action = 'play ' + color
positions = play_dictionary.get(color, [])
for r, c in positions:
gtp_coordinate = array_to_gtp(r, c)
gtp_commands.append(
gtp_action + ' ' + gtp_coordinate)
return gtp_commands
def dictionary_to_gtp(action_dictionary):
r'''Convert play, undo, showboard.
>>> dictionary_to_gtp({'black': [(2, 3)]})
['play black D7']
>>> print dictionary_to_gtp({'white': [(1, 0)]})
['play white A8']
>>> print dictionary_to_gtp({'undo': 1})
['undo']
>>> print dictionary_to_gtp({'loadsgf': 'sgf/white_tiger.sgf'})
['loadsgf sgf/white_tiger.sgf']
>>> print dictionary_to_gtp({'printsgf': 'sgf/white_tiger.sgf'})
['printsgf sgf/white_tiger.sgf']
>>> print dictionary_to_gtp({'undo': 1})
['undo']
>>> print dictionary_to_gtp({'undo': 2})
['undo', 'undo']
>>> print dictionary_to_gtp({'genmove': 'white'})
['genmove white']
Not prepared to handle multiple commands.
>>> print dictionary_to_gtp({'undo': 1, 'showboard': True})
['showboard']
'''
gtp_commands = []
if 'black' in action_dictionary \
or 'white' in action_dictionary:
gtp_commands.extend(play_to_gtp(action_dictionary))
elif 'undo' in action_dictionary:
for number in range(action_dictionary['undo']):
gtp_commands.append(action_dictionary.keys()[0])
elif 'showboard' in action_dictionary \
or 'clear_board' in action_dictionary:
gtp_commands.append(action_dictionary.keys()[0])
elif 'loadsgf' in action_dictionary:
gtp_commands.append(
'loadsgf' + ' ' + action_dictionary['loadsgf'])
elif 'genmove' in action_dictionary:
gtp_commands.append(
'genmove' + ' ' + action_dictionary['genmove'])
elif 'printsgf' in action_dictionary:
gtp_commands.append(
'printsgf' + ' ' + action_dictionary['printsgf'])
else:
print 'dictionary_to_gtp: I was not planning on this action: ' + action_dictionary.keys()[0]
gtp_commands.append(action_dictionary.keys()[0])
return gtp_commands
import code_unit
snippet = '''
import go_text_protocol; go_text_protocol = reload(go_text_protocol); from go_text_protocol import *
'''
import config
defaults = config.setup_defaults()
configuration = config.borg(defaults)
if __name__ == '__main__':
import sys
units = globals()
units.pop('dragon_critical_crash_example')
units.pop('dragon_status_crash_example')
code_unit.test_file_args('./go_text_protocol.py', sys.argv,
locals(), globals())
| |
#!/usr/bin/env python
"""
MUDDERY SERVER LAUNCHER SCRIPT
This is adapt from evennia/evennia/server/evennia_launcher.py.
The licence of Evennia can be found in evennia/LICENSE.txt.
This is the start point for running Muddery.
Sets the appropriate environmental variables and launches the server
and portal through the evennia_runner. Run without arguments to get a
menu. Run the script with the -h flag to see usage information.
"""
import os
import sys
import shutil
import django.core.management
from argparse import ArgumentParser
from subprocess import check_output, CalledProcessError, STDOUT
MUDDERY_ROOT = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
MUDDERY_LIB = os.path.join(MUDDERY_ROOT, "muddery")
MUDDERY_TEMPLATE = os.path.join(MUDDERY_ROOT, "templates")
EVENNIA_LIB = os.path.join(MUDDERY_ROOT, "evennia")
# add evennia's path
sys.path.insert(1, EVENNIA_LIB)
from evennia.server import evennia_launcher
# Game directory structure
SETTINGFILE = "settings.py"
SERVERDIR = "server"
CONFDIR = os.path.join(SERVERDIR, "conf")
SETTINGS_PATH = os.path.join(CONFDIR, SETTINGFILE)
SETTINGS_DOTPATH = "server.conf.settings"
CURRENT_DIR = os.getcwd()
GAMEDIR = CURRENT_DIR
#------------------------------------------------------------
#
# Messages
#
#------------------------------------------------------------
CREATED_NEW_GAMEDIR = \
"""
Welcome to Muddery!
Created a new Muddery game directory '{gamedir}'.
You can now optionally edit your new settings file
at {settings_path}. If you don't, the defaults
will work out of the box. When ready to continue, 'cd' to your
game directory and run:
muddery -i start
This starts the server for the first time. Make sure to create
a superuser when asked for it. You should now be able to (by
default) connect to your server on server 'localhost', port 4000
using a telnet/mud client or http://localhost:8000 using your web
browser. If things don't work, check so those ports are open.
"""
CMDLINE_HELP = \
"""
Starts or operates the Muddery game server. Also allows for
initializing a new game directory and manages the game's database.
You can also pass most standard django-admin arguments and
options.
"""
VERSION_INFO = \
"""
Muddery {version}
OS: {os}
Python: {python}
Twisted: {twisted}
Django: {django}
Evennia {evennia}{about}
"""
ABOUT_INFO = \
"""
Muddery text game development system
Licence: BSD 3-Clause Licence
Web: http://www.muddery.org
Forum: http://www.muddery.org/forum
Maintainer (2015-): Lu Yijun
Use -h for command line options.
"""
ERROR_INPUT = \
"""
Command
{args} {kwargs}
raised an error: '{traceback}'.
"""
#------------------------------------------------------------
#
# Functions
#
#------------------------------------------------------------
def muddery_version():
"""
Get the Muddery version info from the main package.
"""
version = "Unknown"
try:
import muddery
version = muddery.__version__
except ImportError:
pass
try:
rev = check_output("git rev-parse --short HEAD", shell=True, cwd=MUDDERY_ROOT, stderr=STDOUT).strip()
version = "%s (rev %s)" % (version, rev)
except (IOError, CalledProcessError):
pass
return version
MUDDERY_VERSION = muddery_version()
def create_secret_key():
"""
Randomly create the secret key for the settings file
"""
import random
import string
secret_key = list((string.letters +
string.digits +
string.punctuation).replace("\\", "").replace("'", '"'))
random.shuffle(secret_key)
secret_key = "".join(secret_key[:40])
return secret_key
def create_settings_file():
"""
Uses the template settings file to build a working
settings file.
"""
settings_path = os.path.join(GAMEDIR, "server", "conf", "settings.py")
with open(settings_path, 'r') as f:
settings_string = f.read()
# tweak the settings
setting_dict = {"evennia_settings_default": os.path.join(evennia_launcher.EVENNIA_LIB, "settings_default.py"),
"muddery_settings_default": os.path.join(MUDDERY_LIB, "settings_default.py"),
"servername":"\"%s\"" % GAMEDIR.rsplit(os.path.sep, 1)[1].capitalize(),
"secret_key":"\'%s\'" % create_secret_key()}
# modify the settings
settings_string = settings_string.format(**setting_dict)
with open(settings_path, 'w') as f:
f.write(settings_string)
def create_game_directory(dirname, template):
"""
Initialize a new game directory named dirname
at the current path. This means copying the
template directory from muddery's root.
"""
def copy_tree(source, destination):
"""
copy file tree
"""
if not os.path.exists(destination):
# If does not exist, create one.
os.mkdir(destination)
# traverse files and folders
names = os.listdir(source)
for name in names:
srcname = os.path.join(source, name)
dstname = os.path.join(destination, name)
try:
if os.path.isdir(srcname):
# If it is a folder, copy it recursively.
copy_tree(srcname, dstname)
else:
# Copy file.
shutil.copy2(srcname, dstname)
except Exception, e:
print("Can not copy file:%s to %s for %s." % (srcname, dstname, e))
global GAMEDIR
GAMEDIR = os.path.abspath(os.path.join(CURRENT_DIR, dirname))
if os.path.exists(GAMEDIR):
print "Cannot create new Muddery game dir: '%s' already exists." % dirname
sys.exit()
template_dir = ""
if template:
template_dir = os.path.join(MUDDERY_TEMPLATE, template)
if not os.path.exists(template_dir):
print 'Sorry, template "%s" does not exist.\nThese are available templates:' % template
dirs = os.listdir(MUDDERY_TEMPLATE)
for dir in dirs:
full_path = os.path.join(MUDDERY_TEMPLATE, dir)
if os.path.isdir(full_path):
print " %s" % dir
print ""
sys.exit()
# copy default template directory
default_template = os.path.join(MUDDERY_LIB, "game_template")
shutil.copytree(default_template, GAMEDIR)
if template_dir:
copy_tree(template_dir, GAMEDIR)
# pre-build settings file in the new GAMEDIR
create_settings_file()
def show_version_info(about=False):
"""
Display version info
"""
import os, sys
import twisted
import django
return VERSION_INFO.format(version=MUDDERY_VERSION,
about=ABOUT_INFO if about else "",
os=os.name, python=sys.version.split()[0],
twisted=twisted.version.short(),
django=django.get_version(),
evennia=evennia_launcher.evennia_version(),)
def main():
"""
Run the muddery main program.
"""
# set up argument parser
parser = ArgumentParser(description=CMDLINE_HELP)
parser.add_argument('-v', '--version', action='store_true',
dest='show_version', default=False,
help="Show version info.")
parser.add_argument('-i', '--interactive', action='store_true',
dest='interactive', default=False,
help="Start given processes in interactive mode.")
parser.add_argument('--init', action='store', dest="init", metavar="game_name [template]",
help="Creates a new game directory 'game_name' at the current location (from optional template).")
parser.add_argument('-l', nargs='+', action='store', dest='listsetting', metavar="key",
help="List values for server settings. Use 'all' to list all available keys.")
parser.add_argument('--profiler', action='store_true', dest='profiler', default=False,
help="Start given server component under the Python profiler.")
parser.add_argument('--dummyrunner', nargs=1, action='store', dest='dummyrunner', metavar="N",
help="Tests a running server by connecting N dummy players to it.")
parser.add_argument('--settings', nargs=1, action='store', dest='altsettings', default=None, metavar="filename.py",
help="Start evennia with alternative settings file in gamedir/server/conf/.")
parser.add_argument("option", nargs='?', default="noop",
help="Operational mode: 'start', 'stop' or 'restart'.")
parser.add_argument("service", metavar="component", nargs='?', default="all",
help="Server component to operate on: 'server', 'portal' or 'all' (default).")
parser.epilog = "Example django-admin commands: 'migrate', 'flush', 'shell' and 'dbshell'. " \
"See the django documentation for more django-admin commands."
args, unknown_args = parser.parse_known_args()
# handle arguments
option, service = args.option, args.service
# make sure we have everything
evennia_launcher.check_main_evennia_dependencies()
if not args:
# show help pane
print CMDLINE_HELP
sys.exit()
elif args.init:
# initialization of game directory
if option == "noop":
option = ""
create_game_directory(args.init, option)
evennia_launcher.init_game_directory(GAMEDIR, check_db=False)
try:
django_args = ["makemigrations"]
django_kwargs = {}
django.core.management.call_command(*django_args, **django_kwargs)
except django.core.management.base.CommandError, exc:
print ERROR_INPUT.format(traceback=exc, args=django_args, kwargs=django_kwargs)
try:
django_args = ["migrate"]
django_kwargs = {}
django.core.management.call_command(*django_args, **django_kwargs)
except django.core.management.base.CommandError, exc:
print ERROR_INPUT.format(traceback=exc, args=django_args, kwargs=django_kwargs)
print CREATED_NEW_GAMEDIR.format(gamedir=args.init,
settings_path=os.path.join(args.init, SETTINGS_PATH))
sys.exit()
if args.show_version:
# show the version info
print show_version_info(option == "help")
sys.exit()
if args.altsettings:
evennia_launcher.main()
if option != "noop":
# pass-through to evennia
evennia_launcher.main()
else:
# no input; print muddery info
print ABOUT_INFO
if __name__ == '__main__':
# start Muddery from the command line
main()
| |
""" This module handles everything related to the tracker behaviour. """
import json # For importing the items and options
import time
import urllib2 # For checking for updates to the item tracker
import logging # For logging
# Import item tracker specific code
from view_controls.view import DrawingTool, Event
from game_objects.item import Item
from game_objects.state import TrackerState, TrackerStateEncoder
from log_parser import LogParser
from options import Options
class IsaacTracker(object):
""" The main class of the program """
def __init__(self, logging_level=logging.INFO, read_timer=1):
self.read_timer = read_timer
self.file_prefix = "../"
self.log = logging.getLogger("tracker")
# This will erase our tracker log file from previous runs
self.log.addHandler(logging.FileHandler(self.file_prefix + "tracker_log.txt", mode='w'))
self.log.setLevel(logging_level)
# Load items info
with open(self.file_prefix + "items.json", "r") as items_file:
Item.items_info = json.load(items_file)
# load version
with open(self.file_prefix + 'version.txt', 'r') as f:
self.tracker_version = f.read()
# Load options
Options().load_options(self.file_prefix + "options.json")
def __del__(self):
Options().save_options(self.file_prefix + "options.json")
def check_for_update(self):
""" Returns text to put in the title bar """
try:
latest = "https://api.github.com/repos/Hyphen-ated/RebirthItemTracker/releases/latest"
github_info_json = urllib2.urlopen(latest).read()
info = json.loads(github_info_json)
latest_version = info["name"]
title_text = " v" + self.tracker_version
if latest_version != self.tracker_version:
title_text += " (new version available)"
return title_text
except Exception as e:
self.log.debug("Failed to find update info: " + e.message)
return ""
def run(self):
""" The main routine which controls everything """
update_notifier = self.check_for_update()
framecount = 0
# Create drawing tool to use to draw everything - it'll create its own screen
drawing_tool = DrawingTool(self.file_prefix)
drawing_tool.set_window_title(update_notifier)
parser = LogParser(self.file_prefix, self.tracker_version)
opt = Options()
log = logging.getLogger("tracker")
event_result = None
state = None
read_from_server = opt.read_from_server
write_to_server = opt.write_to_server
state_version = -1
twitch_username = None
new_states_queue = []
screen_error_message = None
while event_result != Event.DONE:
# Check for events and handle them
event_result = drawing_tool.handle_events()
# A change means the user has (de)activated an option
if opt.read_from_server != read_from_server\
or opt.twitch_name != twitch_username:
twitch_username = opt.twitch_name
read_from_server = opt.read_from_server
new_states_queue = []
# Also restart version count if we go back and forth from log.txt to server
if read_from_server:
state_version = -1
state = None
# show who we are watching in the title bar
drawing_tool.set_window_title(update_notifier, watching_player=twitch_username, updates_queued=len(new_states_queue))
else:
drawing_tool.set_window_title(update_notifier)
if opt.write_to_server and opt.write_to_server != write_to_server:
write_to_server = True
drawing_tool.set_window_title(update_notifier, uploading=True)
if not opt.write_to_server:
write_to_server = False
if opt.read_from_server:
# Change the delay for polling, as we probably don't want to fetch it every second
update_timer = 2
else:
update_timer = self.read_timer
if event_result == Event.OPTIONS_UPDATE:
# By setting the framecount to 0 we ensure we'll refresh the state right away
framecount = 0
screen_error_message = None
# force updates after changing options
if state is not None:
state.modified = True
# Now we re-process the log file to get anything that might have loaded;
# do it every update_timer seconds (making sure to truncate to an integer
# or else it might never mod to 0)
if (framecount % int(Options().framerate_limit * update_timer) == 0):
# Let the parser do his thing and give us a state
if opt.read_from_server:
base_url = opt.trackerserver_url + "/tracker/api/user/" + opt.twitch_name
json_dict = None
try:
json_version = urllib2.urlopen(base_url + "/version").read()
if int(json_version) > state_version:
# FIXME better handling of 404 error ?
json_state = urllib2.urlopen(base_url).read()
json_dict = json.loads(json_state)
new_state = TrackerState.from_json(json_dict)
if new_state is None:
raise Exception
state_version = int(json_version)
new_states_queue.append((state_version, new_state))
drawing_tool.set_window_title(update_notifier, watching_player=twitch_username, updates_queued=len(new_states_queue), read_delay=opt.read_delay)
except Exception:
state = None
log.error("Couldn't load state from server")
import traceback
log.error(traceback.format_exc())
if json_dict is not None:
their_version = ""
if "tracker_version" in json_dict:
their_version = json_dict["tracker_version"]
else:
# this is the only version that can upload to the server but doesn't include a version string
their_version = "0.10-beta1"
if their_version != self.tracker_version:
screen_error_message = "They are using tracker version " + their_version + " but you have " + self.tracker_version
else:
force_draw = state and state.modified
state = parser.parse()
if force_draw:
state.modified = True
if write_to_server and not opt.trackerserver_authkey:
screen_error_message = "Your authkey is blank. Get a new authkey in the options menu and paste it into the authkey text field."
if state is not None and write_to_server and state.modified and screen_error_message is None:
opener = urllib2.build_opener(urllib2.HTTPHandler)
put_url = opt.trackerserver_url + "/tracker/api/update/" + opt.trackerserver_authkey
json_string = json.dumps(state, cls=TrackerStateEncoder, sort_keys=True)
request = urllib2.Request(put_url,
data=json_string)
request.add_header('Content-Type', 'application/json')
request.get_method = lambda: 'PUT'
try:
result = opener.open(request)
result_json = json.loads(result.read())
updated_user = result_json["updated_user"]
if updated_user is None:
screen_error_message = "The server didn't recognize you. Try getting a new authkey in the options menu."
else:
screen_error_message = None
except Exception as e:
import traceback
errmsg = traceback.format_exc()
log.error("ERROR: Couldn't send item info to server")
log.error(errmsg)
screen_error_message = "ERROR: Couldn't send item info to server, check tracker_log.txt"
# check the new state at the front of the queue to see if it's time to use it
if len(new_states_queue) > 0:
(state_timestamp, new_state) = new_states_queue[0]
current_timestamp = int(time.time())
if current_timestamp - state_timestamp >= opt.read_delay or state is None:
state = new_state
new_states_queue.pop(0)
drawing_tool.set_window_title(update_notifier, watching_player=twitch_username, updates_queued=len(new_states_queue), read_delay=opt.read_delay)
if state is None and screen_error_message is None:
if read_from_server:
screen_error_message = "Unable to read state from server. Please verify your options setup and tracker_log.txt"
else:
screen_error_message = "log.txt not found. Put the RebirthItemTracker folder inside the isaac folder, next to log.txt"
if screen_error_message is not None:
drawing_tool.write_error_message(screen_error_message)
else:
# We got a state, now we draw it
drawing_tool.draw_state(state)
drawing_tool.tick()
framecount += 1
# main loop finished. program is exiting
drawing_tool.save_window_position()
def main():
""" Main """
try:
# Pass "logging.DEBUG" in debug mode
rt = IsaacTracker()
rt.run()
except Exception:
import traceback
errmsg = traceback.format_exc()
#print it to stdout for dev troubleshooting, log it to a file for production
print(errmsg)
logging.getLogger("tracker").error(errmsg)
if __name__ == "__main__":
main()
| |
from ctypes import *
from ctypes import util
import os
import sys
PY3K = sys.version_info >= (3, 0)
from .types import *
# If we're on Android, the SO file isn't on the LD_LIBRARY_PATH,
# so we have to manually specify it using the environment.
java = cdll.LoadLibrary(os.environ.get('RUBICON_LIBRARY', util.find_library('rubicon')))
JNI_VERSION_1_1 = 0x00010001
JNI_VERSION_1_2 = 0x00010002
JNI_VERSION_1_4 = 0x00010004
JNI_VERSION_1_6 = 0x00010006
# Standard JNI API
java.GetVersion.restype = jint
java.GetVersion.argtypes = []
java.DefineClass.restype = jclass
java.DefineClass.argtypes = [c_char_p, jobject, jbyte_p, jsize]
java.FindClassB = getattr(java, "FindClass")
java.FindClassB.restype = jclass
java.FindClassB.argtypes = [c_char_p]
def FindClass(x):
if isinstance(x, str):
x = x.encode("utf-8")
return java.FindClassB(x)
if PY3K:
java.FindClass = FindClass
else:
java.FindClass = java.FindClassB
java.FromReflectedMethod.restype = jmethodID
java.FromReflectedMethod.argtypes = [jobject]
java.FromReflectedField.restype = jfieldID
java.FromReflectedField.argtypes = [jobject]
java.ToReflectedMethod.restype = jobject
java.ToReflectedMethod.argtypes = [jclass, jmethodID, jboolean]
java.GetSuperclass.restype = jclass
java.GetSuperclass.argtypes = [jclass]
java.IsAssignableFrom.restype = jboolean
java.IsAssignableFrom.argtypes = [jclass, jclass]
java.ToReflectedField.restype = jobject
java.ToReflectedField.argtypes = [jclass, jfieldID, jboolean]
java.Throw.restype = jint
java.Throw.argtypes = [jthrowable]
java.ThrowNew.restype = jint
java.ThrowNew.argtypes = [jclass, c_char_p]
java.ExceptionOccurred.restype = jthrowable
java.ExceptionOccurred.argtypes = []
java.ExceptionDescribe.restype = None
java.ExceptionDescribe.argtypes = []
java.ExceptionClear.restype = None
java.ExceptionClear.argtypes = []
java.FatalError.restype = None
java.FatalError.argtypes = [c_char_p]
java.PushLocalFrame.restype = jint
java.PushLocalFrame.argtypes = [jint]
java.PopLocalFrame.restype = jobject
java.PopLocalFrame.argtypes = [jobject]
java.NewGlobalRef.restype = jobject
java.NewGlobalRef.argtypes = [jobject]
java.DeleteGlobalRef.restype = None
java.DeleteGlobalRef.argtypes = [jobject]
java.DeleteLocalRef.restype = None
java.DeleteLocalRef.argtypes = [jobject]
java.IsSameObject.restype = jboolean
java.IsSameObject.argtypes = [jobject, jobject]
java.NewLocalRef.restype = jobject
java.NewLocalRef.argtypes = [jobject]
java.EnsureLocalCapacity.restype = jint
java.EnsureLocalCapacity.argtypes = [jint]
java.AllocObject.restype = jobject
java.AllocObject.argtypes = [jclass]
java.NewObject.restype = jobject
java.NewObject.argtypes = [jclass, jmethodID]
java.GetObjectClass.restype = jclass
java.GetObjectClass.argtypes = [jobject]
java.IsInstanceOf.restype = jboolean
java.IsInstanceOf.argtypes = [jobject, jclass]
java.GetMethodIDB = getattr(java, "GetMethodID")
java.GetMethodIDB.restype = jmethodID
java.GetMethodIDB.argtypes = [jclass, c_char_p, c_char_p]
def GetMethodID(x, y, z):
if isinstance(y, str):
y = y.encode("utf-8")
if isinstance(z, str):
z = z.encode("utf-8")
return java.GetMethodIDB(x, y, z)
if PY3K:
java.GetMethodID = GetMethodID
else:
java.GetMethodID = java.GetMethodIDB
java.CallObjectMethod.restype = jobject
java.CallObjectMethod.argtypes = [jobject, jmethodID]
java.CallBooleanMethod.restype = jboolean
java.CallBooleanMethod.argtypes = [jobject, jmethodID]
java.CallByteMethod.restype = jbyte
java.CallByteMethod.argtypes = [jobject, jmethodID]
java.CallCharMethod.restype = jchar
java.CallCharMethod.argtypes = [jobject, jmethodID]
java.CallShortMethod.restype = jshort
java.CallShortMethod.argtypes = [jobject, jmethodID]
java.CallIntMethod.restype = jint
java.CallIntMethod.argtypes = [jobject, jmethodID]
java.CallLongMethod.restype = jlong
java.CallLongMethod.argtypes = [jobject, jmethodID]
java.CallFloatMethod.restype = jfloat
java.CallFloatMethod.argtypes = [jobject, jmethodID]
java.CallDoubleMethod.restype = jdouble
java.CallDoubleMethod.argtypes = [jobject, jmethodID]
java.CallVoidMethod.restype = None
java.CallVoidMethod.argtypes = [jobject, jmethodID]
java.CallNonvirtualObjectMethod.restype = jobject
java.CallNonvirtualObjectMethod.argtypes = [jobject, jclass, jmethodID]
java.CallNonvirtualBooleanMethod.restype = jboolean
java.CallNonvirtualBooleanMethod.argtypes = [jobject, jclass, jmethodID]
java.CallNonvirtualByteMethod.restype = jbyte
java.CallNonvirtualByteMethod.argtypes = [jobject, jclass, jmethodID]
java.CallNonvirtualCharMethod.restype = jchar
java.CallNonvirtualCharMethod.argtypes = [jobject, jclass,jmethodID]
java.CallNonvirtualShortMethod.restype = jshort
java.CallNonvirtualShortMethod.argtypes = [jobject, jclass, jmethodID]
java.CallNonvirtualIntMethod.restype = jint
java.CallNonvirtualIntMethod.argtypes = [jobject, jclass, jmethodID]
java.CallNonvirtualLongMethod.restype = jlong
java.CallNonvirtualLongMethod.argtypes = [jobject, jclass, jmethodID]
java.CallNonvirtualFloatMethod.restype = jfloat
java.CallNonvirtualFloatMethod.argtypes = [jobject, jclass, jmethodID]
java.CallNonvirtualDoubleMethod.restype = jdouble
java.CallNonvirtualDoubleMethod.argtypes = [jobject, jclass, jmethodID]
java.CallNonvirtualVoidMethod.restype = None
java.CallNonvirtualVoidMethod.argtypes = [jobject, jclass, jmethodID]
java.GetFieldIDB = getattr(java, "GetFieldID")
java.GetFieldIDB.restype = jfieldID
java.GetFieldIDB.argtypes = [jclass, c_char_p, c_char_p]
def GetFieldID(x, y, z):
if isinstance(y, str):
y = y.encode("utf-8")
if isinstance(z, str):
z = z.encode("utf-8")
return java.GetFieldIDB(x, y, z)
if PY3K:
java.GetFieldID = GetFieldID
else:
java.GetFieldID = java.GetFieldIDB
java.GetObjectField.restype = jobject
java.GetObjectField.argtypes = [jobject, jfieldID]
java.GetBooleanField.restype = jboolean
java.GetBooleanField.argtypes = [jobject, jfieldID]
java.GetByteField.restype = jbyte
java.GetByteField.argtypes = [jobject, jfieldID]
java.GetCharField.restype = jchar
java.GetCharField.argtypes = [jobject, jfieldID]
java.GetShortField.restype = jshort
java.GetShortField.argtypes = [jobject, jfieldID]
java.GetIntField.restype = jint
java.GetIntField.argtypes = [jobject, jfieldID]
java.GetLongField.restype = jlong
java.GetLongField.argtypes = [jobject, jfieldID]
java.GetFloatField.restype = jfloat
java.GetFloatField.argtypes = [jobject, jfieldID]
java.GetDoubleField.restype = jdouble
java.GetDoubleField.argtypes = [jobject, jfieldID]
java.SetObjectField.restype = None
java.SetObjectField.argtypes = [jobject, jfieldID, jobject]
java.SetBooleanField.restype = None
java.SetBooleanField.argtypes = [jobject, jfieldID, jboolean]
java.SetByteField.restype = None
java.SetByteField.argtypes = [jobject, jfieldID, jbyte]
java.SetCharField.restype = None
java.SetCharField.argtypes = [jobject, jfieldID, jchar]
java.SetShortField.restype = None
java.SetShortField.argtypes = [jobject, jfieldID, jshort]
java.SetIntField.restype = None
java.SetIntField.argtypes = [jobject, jfieldID, jint]
java.SetLongField.restype = None
java.SetLongField.argtypes = [jobject, jfieldID, jlong]
java.SetFloatField.restype = None
java.SetFloatField.argtypes = [jobject, jfieldID, jfloat]
java.SetDoubleField.restype = None
java.SetDoubleField.argtypes = [jobject, jfieldID, jdouble]
java.GetStaticMethodIDB = getattr(java, "GetStaticMethodID")
java.GetStaticMethodIDB.restype = jmethodID
java.GetStaticMethodIDB.argtypes = [jclass, c_char_p, c_char_p]
def GetStaticMethodID(x, y, z):
if isinstance(y, str):
y = y.encode("utf-8")
if isinstance(z, str):
z = z.encode("utf-8")
return java.GetStaticMethodIDB(x, y, z)
if PY3K:
java.GetStaticMethodID = GetStaticMethodID
else:
java.GetStaticMethodID = java.GetStaticMethodIDB
java.CallStaticObjectMethod.restype = jobject
java.CallStaticObjectMethod.argtypes = [jclass, jmethodID]
java.CallStaticBooleanMethod.restype = jboolean
java.CallStaticBooleanMethod.argtypes = [jclass, jmethodID]
java.CallStaticByteMethod.restype = jbyte
java.CallStaticByteMethod.argtypes = [jclass, jmethodID]
java.CallStaticCharMethod.restype = jchar
java.CallStaticCharMethod.argtypes = [jclass, jmethodID]
java.CallStaticShortMethod.restype = jshort
java.CallStaticShortMethod.argtypes = [jclass, jmethodID]
java.CallStaticIntMethod.restype = jint
java.CallStaticIntMethod.argtypes = [jclass, jmethodID]
java.CallStaticLongMethod.restype = jlong
java.CallStaticLongMethod.argtypes = [jclass, jmethodID]
java.CallStaticFloatMethod.restype = jfloat
java.CallStaticFloatMethod.argtypes = [jclass, jmethodID]
java.CallStaticDoubleMethod.restype = jdouble
java.CallStaticDoubleMethod.argtypes = [jclass, jmethodID]
java.CallStaticVoidMethod.restype = None
java.CallStaticVoidMethod.argtypes = [jclass, jmethodID]
java.GetStaticFieldIDB = getattr(java, "GetStaticFieldID")
java.GetStaticFieldIDB.restype = jfieldID
java.GetStaticFieldIDB.argtypes = [jclass, c_char_p, c_char_p]
def GetStaticFieldID(x, y, z):
if isinstance(y, str):
y = y.encode("utf-8")
if isinstance(z, str):
z = z.encode("utf-8")
return java.GetStaticFieldIDB(x, y, z)
if PY3K:
java.GetStaticFieldID = GetStaticFieldID
else:
java.GetStaticFieldID = java.GetStaticFieldIDB
java.GetStaticObjectField.restype = jobject
java.GetStaticObjectField.argtypes = [jclass, jfieldID]
java.GetStaticBooleanField.restype = jboolean
java.GetStaticBooleanField.argtypes = [jclass, jfieldID]
java.GetStaticByteField.restype = jbyte
java.GetStaticByteField.argtypes = [jclass, jfieldID]
java.GetStaticCharField.restype = jchar
java.GetStaticCharField.argtypes = [jclass, jfieldID]
java.GetStaticShortField.restype = jshort
java.GetStaticShortField.argtypes = [jclass, jfieldID]
java.GetStaticIntField.restype = jint
java.GetStaticIntField.argtypes = [jclass, jfieldID]
java.GetStaticLongField.restype = jlong
java.GetStaticLongField.argtypes = [jclass, jfieldID]
java.GetStaticFloatField.restype = jfloat
java.GetStaticFloatField.argtypes = [jclass, jfieldID]
java.GetStaticDoubleField.restype = jdouble
java.GetStaticDoubleField.argtypes = [jclass, jfieldID]
java.SetStaticObjectField.restype = None
java.SetStaticObjectField.argtypes = [jclass, jfieldID, jobject]
java.SetStaticBooleanField.restype = None
java.SetStaticBooleanField.argtypes = [jclass, jfieldID, jboolean]
java.SetStaticByteField.restype = None
java.SetStaticByteField.argtypes = [jclass, jfieldID, jbyte]
java.SetStaticCharField.restype = None
java.SetStaticCharField.argtypes = [jclass, jfieldID, jchar]
java.SetStaticShortField.restype = None
java.SetStaticShortField.argtypes = [jclass, jfieldID, jshort]
java.SetStaticIntField.restype = None
java.SetStaticIntField.argtypes = [jclass, jfieldID, jint]
java.SetStaticLongField.restype = None
java.SetStaticLongField.argtypes = [jclass, jfieldID, jlong]
java.SetStaticFloatField.restype = None
java.SetStaticFloatField.argtypes = [jclass, jfieldID, jfloat]
java.SetStaticDoubleField.restype = None
java.SetStaticDoubleField.argtypes = [jclass, jfieldID, jdouble]
java.NewString.restype = jstring
java.NewString.argtypes = [jchar_p, jsize]
java.GetStringLength.restype = jsize
java.GetStringLength.argtypes = [jstring]
java.GetStringChars.restype = jchar_p
java.GetStringChars.argtypes = [jstring, jboolean_p]
java.ReleaseStringChars.restype = None
java.ReleaseStringChars.argtypes = [jstring, jchar_p]
java.NewStringUTFB = getattr(java, "NewStringUTF")
java.NewStringUTFB.restype = jstring
java.NewStringUTFB.argtypes = [c_char_p]
def NewStringUTF(x):
if isinstance(x, str):
x = x.encode("utf-8")
return java.NewStringUTFB(x)
if PY3K:
java.NewStringUTF = NewStringUTF
else:
java.NewStringUTF = java.NewStringUTFB
java.GetStringUTFLength.restype = jsize
java.GetStringUTFLength.argtypes = [jstring]
java.GetStringUTFChars.restype = c_char_p
java.GetStringUTFChars.argtypes = [jstring, jboolean_p]
java.ReleaseStringUTFChars.restype = None
java.ReleaseStringUTFChars.argtypes = [jstring, c_char_p]
java.GetArrayLength.restype = jsize
java.GetArrayLength.argtypes = [jarray]
java.NewObjectArray.restype = jobjectArray
java.NewObjectArray.argtypes = [jsize, jclass, jobject]
java.GetObjectArrayElement.restype = jobject
java.GetObjectArrayElement.argtypes = [jobjectArray, jsize]
java.SetObjectArrayElement.restype = None
java.SetObjectArrayElement.argtypes = [jobjectArray, jsize, jobject]
java.NewBooleanArray.restype = jbooleanArray
java.NewBooleanArray.argtypes = [jsize]
java.NewByteArray.restype = jbyteArray
java.NewByteArray.argtypes = [jsize]
java.NewCharArray.restype = jcharArray
java.NewCharArray.argtypes = [jsize]
java.NewShortArray.restype = jshortArray
java.NewShortArray.argtypes = [jsize]
java.NewIntArray.restype = jintArray
java.NewIntArray.argtypes = [jsize]
java.NewLongArray.restype = jlongArray
java.NewLongArray.argtypes = [jsize]
java.NewFloatArray.restype = jfloatArray
java.NewFloatArray.argtypes = [jsize]
java.NewDoubleArray.restype = jdoubleArray
java.NewDoubleArray.argtypes = [jsize]
java.GetBooleanArrayElements.restype = jboolean_p
java.GetBooleanArrayElements.argtypes = [jbooleanArray, jboolean_p]
java.GetByteArrayElements.restype = jbyte_p
java.GetByteArrayElements.argtypes = [jbyteArray, jboolean_p]
java.GetCharArrayElements.restype = jchar_p
java.GetCharArrayElements.argtypes = [jcharArray, jboolean_p]
java.GetShortArrayElements.restype = jshort_p
java.GetShortArrayElements.argtypes = [jshortArray, jboolean_p]
java.GetIntArrayElements.restype = jint_p
java.GetIntArrayElements.argtypes = [jintArray, jboolean_p]
java.GetLongArrayElements.restype = jlong_p
java.GetLongArrayElements.argtypes = [jlongArray, jboolean_p]
java.GetFloatArrayElements.restype = jfloat_p
java.GetFloatArrayElements.argtypes = [jfloatArray, jboolean_p]
java.GetDoubleArrayElements.restype = jdouble_p
java.GetDoubleArrayElements.argtypes = [jdoubleArray, jboolean_p]
java.ReleaseBooleanArrayElements.restype = None
java.ReleaseBooleanArrayElements.argtypes = [jbooleanArray, jboolean_p, jint]
java.ReleaseByteArrayElements.restype = None
java.ReleaseByteArrayElements.argtypes = [jbyteArray, jbyte_p, jint]
java.ReleaseCharArrayElements.restype = None
java.ReleaseCharArrayElements.argtypes = [jcharArray, jchar_p, jint]
java.ReleaseShortArrayElements.restype = None
java.ReleaseShortArrayElements.argtypes = [jshortArray, jshort_p, jint]
java.ReleaseIntArrayElements.restype = None
java.ReleaseIntArrayElements.argtypes = [jintArray, jint_p, jint]
java.ReleaseLongArrayElements.restype = None
java.ReleaseLongArrayElements.argtypes = [jlongArray, jlong_p, jint]
java.ReleaseFloatArrayElements.restype = None
java.ReleaseFloatArrayElements.argtypes = [jfloatArray, jfloat_p, jint]
java.ReleaseDoubleArrayElements.restype = None
java.ReleaseDoubleArrayElements.argtypes = [jdoubleArray, jdouble_p, jint]
java.GetBooleanArrayRegion.restype = None
java.GetBooleanArrayRegion.argtypes = [jbooleanArray, jsize, jsize, jboolean_p]
java.GetByteArrayRegion.restype = None
java.GetByteArrayRegion.argtypes = [jbyteArray, jsize, jsize, jbyte_p]
java.GetCharArrayRegion.restype = None
java.GetCharArrayRegion.argtypes = [jcharArray, jsize, jsize, jchar_p]
java.GetShortArrayRegion.restype = None
java.GetShortArrayRegion.argtypes = [jshortArray, jsize, jsize, jshort_p]
java.GetIntArrayRegion.restype = None
java.GetIntArrayRegion.argtypes = [jintArray, jsize, jsize, jint_p]
java.GetLongArrayRegion.restype = None
java.GetLongArrayRegion.argtypes = [jlongArray, jsize, jsize, jlong_p]
java.GetFloatArrayRegion.restype = None
java.GetFloatArrayRegion.argtypes = [jfloatArray, jsize, jsize, jfloat_p]
java.GetDoubleArrayRegion.restype = None
java.GetDoubleArrayRegion.argtypes = [jdoubleArray, jsize, jsize, jdouble_p]
java.SetBooleanArrayRegion.restype = None
java.SetBooleanArrayRegion.argtypes = [jbooleanArray, jsize, jsize, jboolean_p]
java.SetByteArrayRegion.restype = None
java.SetByteArrayRegion.argtypes = [jbyteArray, jsize, jsize, jbyte_p]
java.SetCharArrayRegion.restype = None
java.SetCharArrayRegion.argtypes = [jcharArray, jsize, jsize, jchar_p]
java.SetShortArrayRegion.restype = None
java.SetShortArrayRegion.argtypes = [jshortArray, jsize, jsize, jshort_p]
java.SetIntArrayRegion.restype = None
java.SetIntArrayRegion.argtypes = [jintArray, jsize, jsize, jint_p]
java.SetLongArrayRegion.restype = None
java.SetLongArrayRegion.argtypes = [jlongArray, jsize, jsize, jlong_p]
java.SetFloatArrayRegion.restype = None
java.SetFloatArrayRegion.argtypes = [jfloatArray, jsize, jsize, jfloat_p]
java.SetDoubleArrayRegion.restype = None
java.SetDoubleArrayRegion.argtypes = [jdoubleArray, jsize, jsize, jdouble_p]
java.RegisterNatives.restype = jint
java.RegisterNatives.argtypes = [jclass, JNINativeMethod_p, jint]
java.UnregisterNatives.restype = jint
java.UnregisterNatives.argtypes = [jclass]
java.MonitorEnter.restype = jint
java.MonitorEnter.argtypes = [jobject]
java.MonitorExit.restype = jint
java.MonitorExit.argtypes = [jobject]
java.GetJavaVM.restype = jint
java.GetJavaVM.argtypes = [JavaVM_p]
java.GetStringRegion.restype = None
java.GetStringRegion.argtypes = [jstring, jsize, jsize, jchar_p]
java.GetStringUTFRegion.restype = None
java.GetStringUTFRegion.argtypes = [jstring, jsize, jsize, c_char_p]
java.GetPrimitiveArrayCritical.restype = c_void_p
java.GetPrimitiveArrayCritical.argtypes = [jarray, jboolean_p]
java.ReleasePrimitiveArrayCritical.restype = None
java.ReleasePrimitiveArrayCritical.argtypes = [jarray, c_void_p, jint]
java.GetStringCritical.restype = jchar_p
java.GetStringCritical.argtypes = [jstring, jboolean_p]
java.ReleaseStringCritical.restype = None
java.ReleaseStringCritical.argtypes = [jstring, jchar_p]
java.NewWeakGlobalRef.restype = jweak
java.NewWeakGlobalRef.argtypes = [jobject]
java.DeleteWeakGlobalRef.restype = None
java.DeleteWeakGlobalRef.argtypes = [jweak]
java.ExceptionCheck.restype = jboolean
java.ExceptionCheck.argtypes = []
java.NewDirectByteBuffer.restype = jobject
java.NewDirectByteBuffer.argtypes = [c_void_p, jlong]
java.GetDirectBufferAddress.restype = c_void_p
java.GetDirectBufferAddress.argtypes = [jobject]
java.GetDirectBufferCapacity.restype = jlong
java.GetDirectBufferCapacity.argtypes = [jobject]
java.GetObjectRefType.restype = c_int
java.GetObjectRefType.argtypes = [jobject]
class _ReflectionAPI(object):
"A lazy-loading proxy for the key classes and methods in the Java reflection API"
def __init__(self):
self._attrs = {}
self._descriptors = {
'Class': ('FindClass', 'java/lang/Class'),
'Class__getName': ('GetMethodID', 'Class', 'getName', '()Ljava/lang/String;'),
'Class__getConstructors': ('GetMethodID', 'Class', 'getConstructors', '()[Ljava/lang/reflect/Constructor;'),
'Class__getMethods': ('GetMethodID', 'Class', 'getMethods', '()[Ljava/lang/reflect/Method;'),
'Class__getInterfaces': ('GetMethodID', 'Class', 'getInterfaces', '()[Ljava/lang/Class;'),
'Class__getSuperclass': ('GetMethodID', 'Class', 'getSuperclass', '()Ljava/lang/Class;'),
'Constructor': ('FindClass', 'java/lang/reflect/Constructor'),
'Constructor__getParameterTypes': ('GetMethodID', 'Constructor', 'getParameterTypes', '()[Ljava/lang/Class;'),
'Constructor__getModifiers': ('GetMethodID', 'Constructor', 'getModifiers', '()I'),
'Method': ('FindClass', 'java/lang/reflect/Method'),
'Method__getName': ('GetMethodID', 'Method', 'getName', '()Ljava/lang/String;'),
'Method__getReturnType': ('GetMethodID', 'Method', 'getReturnType', '()Ljava/lang/Class;'),
'Method__getParameterTypes': ('GetMethodID', 'Method', 'getParameterTypes', '()[Ljava/lang/Class;'),
'Method__getModifiers': ('GetMethodID', 'Method', 'getModifiers', '()I'),
'Field': ('FindClass', 'java/lang/reflect/Field'),
'Field__getType': ('GetMethodID', 'Field', 'getType', '()Ljava/lang/Class;'),
'Modifier': ('FindClass', 'java/lang/reflect/Modifier'),
'Modifier__isStatic': ('GetStaticMethodID', 'Modifier', 'isStatic', '(I)Z'),
'Modifier__isPublic': ('GetStaticMethodID', 'Modifier', 'isPublic', '(I)Z'),
'Python': ('FindClass', 'org/pybee/rubicon/Python'),
'Python__proxy': ('GetStaticMethodID', 'Python', 'proxy', '(Ljava/lang/Class;J)Ljava/lang/Object;'),
'Python__getField': ('GetStaticMethodID', 'Python', 'getField', '(Ljava/lang/Class;Ljava/lang/String;Z)Ljava/lang/reflect/Field;'),
'Python__getMethods': ('GetStaticMethodID', 'Python', 'getMethods', '(Ljava/lang/Class;Ljava/lang/String;Z)[Ljava/lang/reflect/Method;'),
'Boolean': ('FindClass', 'java/lang/Boolean'),
'Boolean__booleanValue': ('GetMethodID', 'Boolean', 'booleanValue', '()Z'),
'Byte': ('FindClass', 'java/lang/Byte'),
'Byte__byteValue': ('GetMethodID', 'Byte', 'byteValue', '()B'),
'Char': ('FindClass', 'java/lang/Char'),
'Char__charValue': ('GetMethodID', 'Char', 'charValue', '()C'),
'Short': ('FindClass', 'java/lang/Short'),
'Short__shortValue': ('GetMethodID', 'Short', 'shortValue', '()S'),
'Integer': ('FindClass', 'java/lang/Integer'),
'Integer__intValue': ('GetMethodID', 'Integer', 'intValue', '()I'),
'Long': ('FindClass', 'java/lang/Long'),
'Long__longValue': ('GetMethodID', 'Long', 'longValue', '()J'),
'Float': ('FindClass', 'java/lang/Float'),
'Float__floatValue': ('GetMethodID', 'Float', 'floatValue', '()F'),
'Double': ('FindClass', 'java/lang/Double'),
'Double__doubleValue': ('GetMethodID', 'Double', 'doubleValue', '()D'),
}
def __getattr__(self, name):
try:
result = self._attrs[name]
return result
except KeyError:
try:
args = self._descriptors[name]
if args[0] == 'FindClass':
result = java.FindClass(*args[1:])
if result.value is None:
raise RuntimeError("Couldn't find Java class '%s'" % args[1])
result = cast(java.NewGlobalRef(result), jclass)
elif args[0] == 'GetMethodID':
klass = getattr(self, args[1])
result = java.GetMethodID(klass, *args[2:])
if result.value is None:
raise RuntimeError("Couldn't find Java method '%s.%s'" % (args[1], args[2]))
elif args[0] == 'GetStaticMethodID':
klass = getattr(self, args[1])
result = java.GetStaticMethodID(klass, *args[2:])
if result.value is None:
raise RuntimeError("Couldn't find Java static method '%s.%s'" % (args[1], args[2]))
self._attrs[name] = result
return result
except KeyError:
raise RuntimeError("Unexpected reflection API request '%s'" % name)
reflect = _ReflectionAPI()
| |
import json
import copy
from django.contrib import messages
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect, Http404
from django.utils.decorators import method_decorator
from django.utils.translation import ugettext as _, ugettext_noop
from corehq.apps.commtrack.const import SUPPLY_POINT_CASE_TYPE
from dimagi.utils.decorators.memoized import memoized
from dimagi.utils.web import json_response
from corehq.apps.domain.decorators import (
domain_admin_required,
login_and_domain_required,
)
from corehq.apps.domain.views import BaseDomainView
from corehq.apps.locations.models import SQLLocation, LocationType
from .forms import ConsumptionForm, StockLevelsForm, CommTrackSettingsForm
from .models import CommtrackActionConfig, StockRestoreConfig
from .tasks import recalculate_domain_consumption_task
from .util import all_sms_codes
@domain_admin_required
def default(request, domain):
from corehq.apps.products.views import ProductListView
if not (request.project and request.project.commtrack_enabled):
raise Http404()
return HttpResponseRedirect(reverse(ProductListView.urlname,
args=[domain]))
class BaseCommTrackManageView(BaseDomainView):
section_name = ugettext_noop("Setup")
@property
def section_url(self):
return reverse('default_commtrack_setup', args=[self.domain])
@method_decorator(domain_admin_required) # TODO: will probably want less restrictive permission?
def dispatch(self, request, *args, **kwargs):
return super(BaseCommTrackManageView, self).dispatch(request, *args, **kwargs)
class CommTrackSettingsView(BaseCommTrackManageView):
urlname = 'commtrack_settings'
page_title = ugettext_noop("Advanced Settings")
template_name = 'domain/admin/commtrack_settings.html'
@property
@memoized
def commtrack_settings(self):
return self.domain_object.commtrack_settings
@property
def page_context(self):
return {
'form': self.commtrack_settings_form
}
@property
@memoized
def commtrack_settings_form(self):
initial = self.commtrack_settings.to_json()
initial.update(dict(('consumption_' + k, v) for k, v in
self.commtrack_settings.consumption_config.to_json().items()))
initial.update(dict(('stock_' + k, v) for k, v in
self.commtrack_settings.stock_levels_config.to_json().items()))
if self.request.method == 'POST':
return CommTrackSettingsForm(self.request.POST, initial=initial, domain=self.domain)
return CommTrackSettingsForm(initial=initial, domain=self.domain)
def set_ota_restore_config(self):
"""
If the checkbox for syncing consumption fixtures is
checked, then we build the restore config with appropriate
special properties, otherwise just clear the object.
If there becomes a way to tweak these on the UI, this should
be done differently.
"""
if self.commtrack_settings.sync_consumption_fixtures:
self.domain_object.commtrack_settings.ota_restore_config = StockRestoreConfig(
section_to_consumption_types={
'stock': 'consumption'
},
force_consumption_case_types=[
SUPPLY_POINT_CASE_TYPE
],
use_dynamic_product_list=True,
)
else:
self.domain_object.commtrack_settings.ota_restore_config = StockRestoreConfig()
def post(self, request, *args, **kwargs):
if self.commtrack_settings_form.is_valid():
data = self.commtrack_settings_form.cleaned_data
previous_config = copy.copy(self.commtrack_settings)
self.commtrack_settings.use_auto_consumption = bool(data.get('use_auto_consumption'))
self.commtrack_settings.sync_consumption_fixtures = bool(data.get('sync_consumption_fixtures'))
self.commtrack_settings.individual_consumption_defaults = bool(data.get('individual_consumption_defaults'))
self.set_ota_restore_config()
fields = ('emergency_level', 'understock_threshold', 'overstock_threshold')
for field in fields:
if data.get('stock_' + field):
setattr(self.commtrack_settings.stock_levels_config, field,
data['stock_' + field])
consumption_fields = ('min_transactions', 'min_window', 'optimal_window')
for field in consumption_fields:
if data.get('consumption_' + field):
setattr(self.commtrack_settings.consumption_config, field,
data['consumption_' + field])
self.commtrack_settings.save()
for loc_type in LocationType.objects.filter(domain=self.domain).all():
# This will update stock levels based on commtrack config
loc_type.save()
if (previous_config.use_auto_consumption != self.commtrack_settings.use_auto_consumption
or previous_config.consumption_config.to_json() != self.commtrack_settings.consumption_config.to_json()
):
# kick off delayed consumption rebuild
recalculate_domain_consumption_task.delay(self.domain)
messages.success(request, _("Settings updated! Your updated consumption settings may take a "
"few minutes to show up in reports and on phones."))
else:
messages.success(request, _("Settings updated!"))
return HttpResponseRedirect(self.page_url)
return self.get(request, *args, **kwargs)
class DefaultConsumptionView(BaseCommTrackManageView):
urlname = 'update_default_consumption'
template_name = 'commtrack/manage/default_consumption.html'
page_title = ugettext_noop("Consumption")
@property
@memoized
def consumption_form(self):
if self.request.method == 'POST':
return ConsumptionForm(self.domain, self.request.POST)
return ConsumptionForm(self.domain)
@property
def page_context(self):
return {
'form': self.consumption_form,
}
def post(self, request, *args, **kwargs):
if self.consumption_form.is_valid():
self.consumption_form.save()
messages.success(request, _("Default consumption values updated"))
return HttpResponseRedirect(
reverse(DefaultConsumptionView.urlname, args=[self.domain])
)
return self.get(request, *args, **kwargs)
class SMSSettingsView(BaseCommTrackManageView):
urlname = 'commtrack_sms_settings'
page_title = ugettext_noop("SMS")
template_name = 'domain/admin/sms_settings.html'
@property
def page_context(self):
return {
'other_sms_codes': dict(self.get_other_sms_codes()),
'settings': self.settings_context,
}
@property
def settings_context(self):
return {
'keyword': self.domain_object.commtrack_settings.multiaction_keyword,
'actions': [self._get_action_info(a) for a in self.domain_object.commtrack_settings.actions],
'requisition_config': {
'enabled': self.domain_object.commtrack_settings.requisition_config.enabled,
'actions': [self._get_action_info(a) for a in self.domain_object.commtrack_settings.requisition_config.actions],
},
}
# FIXME
def _get_action_info(self, action):
return {
'type': action.action,
'keyword': action.keyword,
'name': action.subaction,
'caption': action.caption,
}
def get_other_sms_codes(self):
for k, v in all_sms_codes(self.domain).iteritems():
if v[0] == 'product':
yield (k, (v[0], v[1].name))
def post(self, request, *args, **kwargs):
payload = json.loads(request.POST.get('json'))
self.domain_object.commtrack_settings.multiaction_keyword = payload['keyword']
def mk_action(action):
return CommtrackActionConfig(**{
'action': action['type'],
'subaction': action['caption'],
'keyword': action['keyword'],
'caption': action['caption'],
})
#TODO add server-side input validation here (currently validated on client)
self.domain_object.commtrack_settings.actions = [mk_action(a) for a in payload['actions']]
self.domain_object.commtrack_settings.requisition_config.enabled = payload['requisition_config']['enabled']
self.domain_object.commtrack_settings.requisition_config.actions = [mk_action(a) for a in payload['requisition_config']['actions']]
self.domain_object.commtrack_settings.save()
return self.get(request, *args, **kwargs)
class StockLevelsView(BaseCommTrackManageView):
urlname = 'stock_levels'
page_title = ugettext_noop("Stock Levels")
template_name = 'commtrack/manage/stock_levels.html'
def get_existing_stock_levels(self):
loc_types = LocationType.objects.by_domain(self.domain)
return [{
'loc_type': loc_type.name,
'emergency_level': loc_type.emergency_level,
'understock_threshold': loc_type.understock_threshold,
'overstock_threshold': loc_type.overstock_threshold,
} for loc_type in loc_types]
def save_stock_levels(self, levels):
"""
Accepts a list of dicts of the form returned by
get_existing_stock_levels and writes to the appropriate LocationType
"""
levels = {level['loc_type']: level for level in levels}
for loc_type in LocationType.objects.filter(domain=self.domain).all():
if loc_type.name not in levels:
continue
stock_levels = levels[loc_type.name]
changed = False
for threshold in [
'emergency_level',
'understock_threshold',
'overstock_threshold'
]:
if getattr(loc_type, threshold) != stock_levels[threshold]:
setattr(loc_type, threshold, stock_levels[threshold])
changed = True
if changed:
loc_type.save()
@property
def page_context(self):
return {
'stock_levels_form': self.stock_levels_form
}
@property
@memoized
def stock_levels_form(self):
if self.request.method == "POST":
data = self.request.POST
else:
data = self.get_existing_stock_levels()
return StockLevelsForm(data)
def post(self, request, *args, **kwargs):
if self.stock_levels_form.is_valid():
self.save_stock_levels(self.stock_levels_form.cleaned_data)
return HttpResponseRedirect(self.page_url)
# TODO display error messages to the user...
return self.get(request, *args, **kwargs)
| |
import praw
import traceback
import time
import types
import os
''' CONFIG '''
USERAGENT = ''
MAXPOSTS = 100
DROPOUT = [404]
# This error code will cause the nofailrequest to exit.
# There's no reason to repeatedly request a 404.
NFR_TIMEOUT = 10
# How many times should NoFailRequest attempt the request?
# If 0, it goes on forever
''' END CONFIG '''
try:
import bot
USERAGENT = bot.uG
except ImportError:
pass
r = praw.Reddit(USERAGENT)
def nfr2(function, *fargs, **fkwargs):
'''
Different version of NFR.
The first was having problems with generators and lazyload
objects, because those functions return successfully
even though the data isn't checked
'''
loopcount = 0
while loopcount == 0 or loopcount < NFR_TIMEOUT:
try:
results = function(*fargs, **fkwargs)
if isinstance(results, types.GeneratorType):
results = list(results)
return results
except praw.errors.NotFound:
return None
except KeyboardInterrupt:
return None
except:
traceback.print_exc()
print('Retrying in 2...')
time.sleep(2)
if NFR_TIMEOUT != 0:
loopcount += 1
def nfr(function, dropout=None):
'''
"No Fail Request"
Creates a function that will retry until it succeeds.
This function accepts 1 parameter, a function, and returns a modified
version of that function that will try-catch, sleep, and loop until it
finally returns.
'''
def b():
traceback.print_exc()
print('Retrying in 2...')
time.sleep(2)
def a(*args, **kwargs):
while True:
try:
result = function(*args, **kwargs)
return result
except praw.requests.exceptions.HTTPError as e:
if e.response.status_code == dropout:
return []
if isinstance(dropout, list) and e.response.status_code in dropout:
return []
else:
b()
except requests.exceptions.ConnectionError:
b()
except AssertionError:
# Strange PRAW bug causes certain MoreComments
# To throw assertion error, so just ignore it
# And get onto the next one.
return []
except KeyboardInterrupt:
raise Exception("KeyboardInterrupt")
except:
b()
return a
def get_subreddit_authors(sr):
'''
Given a subreddit name, go to /r/subreddit/new
and /r/subreddit/comments, and return the names of post
authors.
'''
sr = sr.lower()
subreddit = nfr(r.get_subreddit)(sr)
print('/r/%s/new' % sr)
#posts = list(nfr(subreddit.get_new)(limit=MAXPOSTS))
posts = nfr2(subreddit.get_new, limit=MAXPOSTS)
print('/r/%s/comments' % sr)
#posts += list(nfr(subreddit.get_comments)(limit=MAXPOSTS))
posts += nfr2(subreddit.get_comments, limit=MAXPOSTS)
authors = [post.author.name for post in posts if post.author is not None]
authors = list(set(authors))
authors.sort(key=lambda x: x.lower())
print('Found %d authors' % len(authors))
return authors
def process_userlist(authors, fromsubreddit=''):
'''
Given a list of usernames, put each into process_user()
and collect a total dictionary of subreddits
If this list of names comes from scanning a subreddit, you
can provide `fromsubreddit`, which will be removed from the dict
at the end, since it's useless data if everyone has it in common.
'''
authors = list(set(authors))
fromsubreddit = fromsubreddit.lower()
count = len(authors)
i = 1
userreddits = {}
totalreddits = {}
for username in authors:
pre = '(%0{l}d/%0{l}d) '.format(l=len(str(count))) % (i, count)
thisuser = process_user(username, pre=pre)
userreddits[username] = thisuser
for sub in thisuser:
totalreddits[sub] = totalreddits.get(sub, 0) + thisuser[sub]
#print(totalreddits)
i += 1
if fromsubreddit in totalreddits:
del totalreddits[fromsubreddit]
# -1 because of %totalposts%
totalreddits['%totalsubs%'] = (len(totalreddits) - 1)
return totalreddits
def process_subreddit(sr):
'''
Given a subreddit name, collect authors from submissions
and comments, then pass them into process_userlist
'''
authors = get_subreddit_authors(sr)
results = process_userlist(authors, fromsubreddit=sr)
return results
def process_user(username, pre=''):
'''
Given a username, go to /u/username/submitted
and /u/username/comments, and return the names
of subreddits he has posted to, with their frequencies
'''
user = nfr(r.get_redditor, dropout=404)(username)
if user is None:
return {}
print('\t%s/u/%s/submitted' % (pre, username))
#userposts = list(nfr(user.get_submitted)(limit=MAXPOSTS))
userposts = nfr2(user.get_submitted, limit=MAXPOSTS)
print('\t%s/u/%s/comments' % (pre, username))
#userposts += list(nfr(user.get_comments)(limit=MAXPOSTS))
userposts += nfr2(user.get_comments, limit=MAXPOSTS)
userreddits = {'%totalposts%':len(userposts)}
for post in userposts:
subreddit = post.subreddit.display_name.lower()
userreddits[subreddit] = userreddits.get(subreddit, 0) + 1
return userreddits
def write_json(filename, totalreddits):
'''
Given a dictionary totalreddits, sort by freq
and write it to filename.json
'''
if filename[-5:] != '.json':
filename += '.json'
keys = list(totalreddits.keys())
keys.sort(key=lambda x: (totalreddits.get(x), x.lower()), reverse=True)
print('Creating %s' % filename)
outfile = open(filename, 'w')
outfile.write('{\n')
lines = []
for key in keys:
val = totalreddits[key]
lines.append('\t"%s" : %d' % (key, val))
lines = ',\n'.join(lines)
outfile.write(lines)
outfile.write('\n}')
outfile.close()
def process_and_write(sr):
'''
shortcut to process_subreddit and write_json
'''
totalreddits = process_subreddit(sr)
write_json(sr, totalreddits)
def file_lines(filename):
textfile = open(filename, 'r')
textlines = [line.strip() for line in textfile.readlines()]
textfile.close()
return textlines
def process_subfile(filename):
'''
Shortcut to open a txt file containing subreddit names
automatically put each one into process_and_write
'''
sublines = file_lines(filename)
for subname in sublines:
process_and_write(subname)
def process_userfile(filename, jsonfilename):
'''
Shortcut to open a txt file containing user names
automatically put each one into process_userlist
jsonfilename is required since we don't have any subreddit
to go off of.
'''
userlines = file_lines(filename)
for username in userlines:
results = process_user(username)
jsonfilename = jsonfilename.split('.')[0]
jsonfilename += '-%s' % username
write_json(jsonfilename, results)
if __name__ == '__main__':
process_and_write('goldtesting')
os._exit(0)
| |
from unittest import TestCase
from src.Piloto import Piloto
from src.Escuderia import Escuderia
from mockito import *
__author__ = 'MAMISHO'
class TestEscuderia(TestCase):
def test_agregar_piloto_1(self):
"""
Test Agregar Piloto a Escuderia Success
El test prueba la funcionalidad de agregar un Piloto a
la escuderia. Los datos del piloto deben seguir el formato requerido.
:param p1: Piloto que se agrega a la escuderia.
:param e1: Escuderia para realizar el test
"""
p1 = Piloto("AAA", "Alni")
e1 = Escuderia("Ferrari")
self.assertEqual(e1.agregar_piloto(p1), True)
def test_agregar_piloto_2(self):
"""
Test Agregar Piloto a Escuderia Failure
El test prueba que el piloto no exista en la Escuderia
:param p1: Piloto que se agrega a la escuderia.
:param p2: Piloto con id repetido para probar el test
:param e1: Escuderia para realizar el test
"""
p1 = Piloto("AAA", "Alni")
p2 = Piloto("AAA", "Pepe")
e1 = Escuderia("Ferrari")
e1.agregar_piloto(p1)
self.assertEqual(e1.agregar_piloto(p2), False)
def test_agregar_piloto_3(self):
"""
Test Agregar Piloto a Escuderia Failure
El test prueba que el objeto que se va a agregar a
la escuderia sea un piloto.
:param m: Objeto tipo Mock que se agrega a la escuderia
:param e1: Escuderia para realizar el test
"""
p1 = mock(Piloto)
e1 = Escuderia("Ferrari")
self.assertEqual(e1.agregar_piloto(p1), False)
def test_eliminar_piloto_1(self):
"""
Test Eliminar piloto existente no activo de Escuderia
El test prueba que se elimine un piloto existente
de la escuderia y que el piloto eliminado no sea un
piloto activo
:param p1: Piloto existente en la escuderia
:param p2: Piloto existente en la escuderia
:param p3: Piloto existente en la escuderia y que sera eliminado
"""
p1 = Piloto("AAA", "Piloto A")
p2 = Piloto("BBB", "Piloto B")
p3 = Piloto("CCC", "Piloto C")
e1 = Escuderia("Ferrari")
e1.agregar_piloto(p1)
e1.agregar_piloto(p2)
e1.agregar_piloto(p3)
self.assertEqual(e1.eliminar_piloto(p3), True)
def test_eliminar_piloto_2(self):
"""
Test Eliminar piloto no existente de Escuderia
El test prueba que no se puede eliminar un
piloto que no existe en la escuderia, es decir
comprueba los parametros de entrada antes de eliminar
:param p1: Piloto existente en la escuderia
:param p2: Piloto existente en la escuderia
:param p3: Piloto existente en la escuderia y que sera eliminado
"""
p1 = Piloto("AAA", "Piloto A")
p2 = Piloto("BBB", "Piloto B")
p3 = Piloto("CCC", "Piloto C")
e1 = Escuderia("Ferrari")
e1.agregar_piloto(p1)
e1.agregar_piloto(p2)
self.assertEqual(e1.eliminar_piloto(p3), False)
def test_eliminar_piloto_3(self):
"""
Test Eliminar piloto existente activo de Escuderia
El test prueba que se elimine un piloto existente
de la escuderia y que el piloto eliminado no sea un
piloto activo
:param p1: Piloto Activo existente en la escuderia
:param p2: Piloto Activo existente en la escuderia y que sera eliminado
:param p3: Piloto No Activo existente en la escuderia
"""
p1 = Piloto("AAA", "Piloto A")
p2 = Piloto("BBB", "Piloto B")
p3 = Piloto("CCC", "Piloto C")
e1 = Escuderia("Ferrari")
e1.agregar_piloto(p1)
e1.agregar_piloto(p2)
e1.agregar_piloto(p3)
e1.definir_pilotos_activos(p1, p2)
self.assertEqual(e1.eliminar_piloto(p2), True)
def test_definir_pilotos_activos_1(self):
"""
Test definir Pilotos activos
El test prueba que dos pilotos sean activos en la
escuderia.
:param p1: Piloto no Activo Para hacer el test
:param p2: Piloto no Activo Para hacer el test
:param p3: Piloto no Activo Para hacer el test
:param e1: Escuderia para el test
"""
p1 = Piloto("AAA", "Piloto A")
p2 = Piloto("BBB", "Piloto B")
p3 = Piloto("CCC", "Piloto C")
e1 = Escuderia("Ferrari")
e1.agregar_piloto(p1)
e1.agregar_piloto(p2)
e1.agregar_piloto(p3)
self.assertEqual(e1.definir_pilotos_activos(p1, p2), True)
def test_definir_pilotos_activos_2(self):
"""
Test definir Pilotos activos Faulire
El test prueba que los dos objetos que se pasan,
sean Pilotos para que posteriormente sean activos
:param p1: Piloto no Activo para hacer el test
:param p2: Piloto no Activo para hacer el test
:param p3: Mock de tipo Piloto para hacer el test
:param e1: Escuderia para el test
"""
p1 = Piloto("AAA", "Piloto A")
p2 = Piloto("BBB", "Piloto B")
p3 = mock(Piloto("CCC", "Piloto C"))
e1 = Escuderia("Ferrari")
e1.agregar_piloto(p1)
e1.agregar_piloto(p2)
e1.agregar_piloto(p3)
self.assertEqual(e1.definir_pilotos_activos(p1, p3), False)
def test_sustituir_piloto_1(self):
"""
Test sustituir piloto de escuderia
El test comprueba que la sustitucion de un piloto
en la escuderia sea correcta
:param p1: Piloto de la escuderia
:param p2: Piloto de la escuderia
:param p3: Piloto que sustituira a otro en la escuderia
"""
p1 = Piloto("AAA", "Piloto A")
p2 = Piloto("BBB", "Piloto B")
p3 = Piloto("CCC", "Piloto C")
e1 = Escuderia("Ferrari")
e1.agregar_piloto(p1)
e1.agregar_piloto(p2)
self.assertEqual(e1.definir_pilotos_activos(p1, p3), True)
def test_sustituir_piloto_2(self):
"""
Test sustituir piloto de escuderia failure
El test comprueba que la sustitucion de un piloto
en la escuderia reciba los objetos correctos
:param p1: Piloto de la escuderia
:param p2: Piloto de la escuderia
:param p3: Mock de tipo piloto que va a intentar sustituir a otro piloto
"""
p1 = Piloto("AAA", "Piloto A")
p2 = Piloto("BBB", "Piloto B")
p3 = mock(Piloto("CCC", "Piloto C"))
e1 = Escuderia("Ferrari")
e1.agregar_piloto(p1)
e1.agregar_piloto(p2)
self.assertEqual(e1.definir_pilotos_activos(p1, p3), False)
| |
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import, unicode_literals
import numpy as np
from .spectrum_utils import copy_spectrum_with_new_mz_and_intensities
from .spectrum_utils import binary_search_for_left_range
from .spectrum_utils import binary_search_for_right_range, take_closest_lo
from .spectrum_utils import ThresholdedPeakFiltering
def is_window_vlm(spectrum_by_peak, window_start_idx, window_end_idx, n_spectra):
# Check that the window contains the right number of peaks
if (window_end_idx - window_start_idx + 1) == n_spectra:
# Check that the window does not contain multiple peaks from some spectra
window_spectra = spectrum_by_peak[window_start_idx : window_end_idx + 1]
if len(np.unique(window_spectra)) != len(window_spectra):
return False
else:
return True
else:
return False
class VirtualLockMassCorrector(object):
def __init__(self, window_size, minimum_peak_intensity, max_skipped_points=None,
mode='flat', poly_degree=1):
"""
Initiate a VirtualLockMassCorrector object.
:param window_size: The distance from left to right in ppm
:param minimum_peak_intensity: Minimum peak intensity to be considered by the algorithm
:param max_skipped_points: Maximum number of points that can be skipped during the transform step. None=any.
:param mode: How the transformation is applied before the first VLM and after the last VLM. [flat only]
:param poly_degree: Degree of the function used to calculate correction ratio between two VLM.
:return:
"""
self.window_size = window_size
self.window_size_ppm = 1.0 * window_size / 10**6
self.minimum_peak_intensity = minimum_peak_intensity
self._vlm_mz = None
self.max_skipped_points = max_skipped_points
self.mode = mode
self.polynomial_degree = poly_degree
def _compute_vlm_positions(self, peak_groups):
"""
Tries to set the center of mass of each group as its center. If this shifts the center to a point that is more
than w ppm from the first peak of the group, it is discarded.
Note: assumes that the peak groups are sorted
"""
vlm_mz_values = []
for group in peak_groups:
center_of_mass = np.mean(group)
window_start = center_of_mass * (1 - self.window_size_ppm)
window_end = center_of_mass * (1 + self.window_size_ppm)
if window_start <= np.min(group) and window_end >= np.max(group):
vlm_mz_values.append(center_of_mass)
return np.array(vlm_mz_values)
def _find_vlm_peak_groups(self, spectra):
# List all peaks of all spectra
peaks = np.concatenate(list(s.mz_values for s in spectra))
spectrum_by_peak = np.concatenate(list(np.ones(len(s), dtype=np.uint) * i for i, s in enumerate(spectra)))
# Sort the peaks in increasing order of m/z
sorter = np.argsort(peaks)
peaks = peaks[sorter]
spectrum_by_peak = spectrum_by_peak[sorter]
vlm_peak_groups = []
# Start by considering the first window that contains the first peak
window_start_idx = 0
window_end_idx = np.searchsorted(peaks, peaks[0], side="right") - 1
while window_start_idx < len(peaks):
# Check if the current group of peaks matches the definition of a VLM
if is_window_vlm(spectrum_by_peak, window_start_idx, window_end_idx, len(spectra)):
vlm_peak_groups.append(peaks[window_start_idx : window_end_idx + 1])
# Find the m/z of the first peak in the current group
window_first_peak_mz = peaks[window_start_idx]
# Check if there are peaks beyond the last peak of the group
if window_end_idx < len(peaks) - 1:
# The outer right peak is the peak following the last peak of the group in the list
# We find the lower bound (in m/z) of the first window that contains this peak
outer_right_peak_window_start_mz = (peaks[window_end_idx + 1] / (1 + self.window_size_ppm)) * (1 - self.window_size_ppm)
# Case 1: There exists a window containing the first peak of the group and the outer right peak
if outer_right_peak_window_start_mz <= window_first_peak_mz:
# We include the outer right peak in the group
window_end_idx = np.searchsorted(peaks, peaks[window_end_idx + 1], side='right') - 1
# Case 2: There does not exist a window containing the first peak and the outer right peak simultaneously
else:
# Since the condition is false, there necessarily exists a non-zero space between the first peak of
# the window and the lower bound of the first window where the outer right peak is included. We thus
# consider the window containing all the peaks in the group, except the first peak.
window_next_peak_idx = np.searchsorted(peaks, window_first_peak_mz, side='right')
window_start_idx = window_next_peak_idx
else:
# There are no peaks with a greater m/z than the last peak of the group. We simply remove the first peak.
window_next_peak_idx = np.searchsorted(peaks, window_first_peak_mz, side='right')
window_start_idx = window_next_peak_idx
return vlm_peak_groups
def _make_vlm_set_consistent(self, vlm_mz_values):
"""
Rejects any VLMs for which the windows overlap
"""
vlm_mz_values = np.asarray(vlm_mz_values)
vlm_window_starts = vlm_mz_values * (1 - self.window_size_ppm)
vlm_window_ends = vlm_mz_values * (1 + self.window_size_ppm)
inconsistencies = np.where(vlm_window_starts[1:] <= vlm_window_ends[: -1])[0]
rejection_mask = np.zeros(len(vlm_mz_values), dtype=np.bool)
rejection_mask[inconsistencies] = True
rejection_mask[inconsistencies + 1] = True
return vlm_mz_values[~rejection_mask]
def _preprocess_spectra(self, spectra):
return ThresholdedPeakFiltering(threshold=self.minimum_peak_intensity, remove_mz_values=True).fit_transform(spectra)
def _find_vlm_peaks(self, spectra):
spectra = self._preprocess_spectra(spectra)
peak_groups = self._find_vlm_peak_groups(spectra)
vlm_mz_values = self._compute_vlm_positions(peak_groups)
pre_vlm_count = len(vlm_mz_values)
vlm_mz_values = self._make_vlm_set_consistent(vlm_mz_values)
del pre_vlm_count
return vlm_mz_values
def _apply_correction(self, spectrum):
"""
Apply the VLM to a spectrum
:param spectrum: A pymspec spectrum to correct
:return: A corrected pymspec spectrum.
"""
if len(self._vlm_mz) <= 2:
raise ValueError("There must be at least 3 points to use virtual lock-mass")
# Correction is done on a copy of the spectrum. The original spectrum will not be modified.
found_vlm, observed_mz = self._find_vlock_mass_in_spectra(spectrum) # Find the corresponding points
correction_ratios = self._calculate_correction_ratios(found_vlm, observed_mz) # Calculate correction ratios at each VLM
# Correct the points smaller than observed_mz[0]
corrected_mz = self._correct_points_smaller_than(spectrum, observed_mz[0], correction_ratios[0])
#Correct all points between observed_mz[0] and observed_mz[-1]
index = 0
while index < len(correction_ratios)-1:
corrected_mz += self._correct_point_between(spectrum, observed_mz[index], observed_mz[index+1],
correction_ratios[index], correction_ratios[index+1])
index += 1
# Correct the points greater than observed_mz[-1]
corrected_mz += self._correct_points_greater_than(spectrum, observed_mz[-1], correction_ratios[-1])
# Simple verification that we still have the same number of points...
if len(corrected_mz) != len(spectrum.mz_values):
raise ValueError("There should be the same number of mz than in the initial spectrum: %s vs %s"
% (len(corrected_mz), len(spectrum.mz_values)))
# Createa copy and return
spect_copy = copy_spectrum_with_new_mz_and_intensities(spectrum, np.array(corrected_mz),
spectrum.intensity_values) # Use the same intensities
return spect_copy
def _calculate_correction_ratios(self, found_vlm, observed_mz):
"""
Do some checks and return the correction for each combination of vlm and observed point.
:param found_vlm: A list of VLM
:param observed_mz: The mz from a spectrum corresponding to the VLM
:return: A list of correction ratio
"""
if len(observed_mz) <= 0 or len(found_vlm) <= 0:
raise ValueError("There is no value in vlock_mass or observed_mz")
if len(observed_mz) != len(found_vlm):
raise ValueError("v_lock_mass and observed_mz have not the same amount of values")
correction_ratios = []
for i, v_mz in enumerate(found_vlm):
o_mz = observed_mz[i]
if v_mz <= 0 or o_mz <= 0:
raise ValueError("Cannot calculate ratio for a null or nagative mz")
ratio = np.float(v_mz / o_mz)
correction_ratios.append(ratio)
return correction_ratios
def _find_vlock_mass_in_spectra(self, spectrum):
"""
Search each vlm in a spectrum and return the list of vlm found and their correspondance in the spectrum.
:param spectrum: A pymspec spectrum
:return: two lists: The vlm found in the spectrum and their correspondance
"""
preprocessed_spect = ThresholdedPeakFiltering(threshold=self.minimum_peak_intensity,
remove_mz_values=True).fit_transform([spectrum])[0]
observed_mz = []
vlm_found = []
number_skipped_points = 0
for vlm in self._vlm_mz:
peak = -1
intensity = -1
last_index = 0
try:
best_match, position = take_closest_lo(preprocessed_spect.mz_values, vlm, lo=last_index)
mz_difference = abs(best_match - vlm) # Check if the vlm is in the window.
if mz_difference > vlm * self.window_size_ppm: # self.window_size is from center to side, not side to side.
raise ValueError("A VLM was not found in the appropriate window")
last_index = position
observed_mz.append(best_match)
vlm_found.append(vlm)
except ValueError as error:
if self.max_skipped_points is None:
pass #If none, any VLM can be unfound.
else:
number_skipped_points += 1
if number_skipped_points > self.max_skipped_points:
raise error
observed_mz = np.array(observed_mz)
vlm_found = np.array(vlm_found)
np.around(observed_mz, decimals=4)
return vlm_found, observed_mz
def _correct_points_smaller_than(self, spectrum, mz, ratio):
"""
Will apply the correction ratio to every points <mz.
No modification to correction ratio
:param spectrum: the spectrum to correct
:param mz: the observed mz of the first virtual lock mass
:param ratio: the correction ratio of the first virtual lock mass
:return: the corrected mz values of the spectrum, only those inferior to the first virtual lock mass
"""
if mz <= 0 or ratio <= 0:
raise ValueError("Mz and ratio cannot be null or negative")
mz_list = spectrum.mz_values
if self.mode == 'flat':
right = binary_search_for_right_range(mz_list, mz)
mz_to_be_corrected = mz_list[:right]
corrected_mz = mz_to_be_corrected * ratio
else:
raise NotImplementedError("Use flat mode.")
return corrected_mz.tolist()
def _correct_points_greater_than(self, spectrum, mz, ratio):
"""
:param spectrum: the spectrim to correct
:param mz: the observed mz of the last virtual lock mass
:param ratio: the correction ratio of the last virtual lock mass
:return: the corrected mz values of the spectrum, only those superior to the last virtual lock mass
"""
if mz <= 0 or ratio <= 0:
raise ValueError("Mz and ratio cannot be null or negative")
mz_list = spectrum.mz_values
if self.mode == 'flat':
left = binary_search_for_right_range(mz_list, mz)
mz_to_be_corrected = mz_list[left:]
corrected_mz = mz_to_be_corrected * ratio
else:
raise NotImplementedError("Use flat mode.")
return corrected_mz.tolist()
def _correct_point_between(self, spectrum, mz1, mz2, ratio1, ratio2):
"""
:param spectrum: the spectrum to correct
:param mz1: an observed mz of a virtual lock mass (smaller than the 2nd)
:param mz2: an observed mz of a virtual lock mass (greater than the first)
:param ratio1: correction ratio of mz1
:param ratio2: correction ratio of mz2
:return: the corrected mz values from the spectrum that are between mz1 and mz2
"""
if mz1 <= 0 or mz2 <= 0 or ratio1 <= 0 or ratio2 <= 0:
raise ValueError("Mz and ratios cannot be null or negative")
function = self._create_correction_function(mz1, mz2, ratio1, ratio2)
mz_list =spectrum.mz_values
right = binary_search_for_right_range(mz_list, mz2)
left = binary_search_for_left_range(mz_list, mz1)
mz_to_be_corrected = mz_list[left:right]
ratios = [function(mz) for mz in mz_to_be_corrected]
corrected_mz = mz_to_be_corrected * ratios
return corrected_mz.tolist()
def _create_correction_function(self, mz1, mz2, ratio1, ratio2):
"""
Create the y = m*x + b function for the 2 points in parameter
:param mz1: lowest mz
:param mz2: highest mz
:param ratio1: correction ratio at mz1
:param ratio2: correction ratio at mz2
:return: a numpy function that can correct the values between mz1 and mz2
:raises: ValueError is an mz or ratio is <= 0
"""
if mz1 <= 0 or mz2 <= 0 or ratio1 <= 0 or ratio2 <= 0:
raise ValueError("Mz and ratios cannot be null or negative")
if mz1 > mz2:
raise ValueError("mz2 must be greater than mz1")
if self.polynomial_degree == 1:
m = (ratio2 - ratio1) / (mz2 - mz1)
b = ratio2 - (m * mz2)
function = lambda x: m * x + b
else:
x = np.array([mz1, mz2])
y= np.array([ratio1, ratio2])
z = np.polyfit(x, y, self.polynomial_degree)
function = np.poly1d(z)
return function
def fit(self, spectra):
"""
TODO
"""
self._vlm_mz = self._find_vlm_peaks(spectra)
def transform(self, spectra):
"""
TODO
"""
if self._vlm_mz is None:
raise RuntimeError("The VLM corrector must be fitted before applying a correction.")
return np.asarray([self._apply_correction(spectrum) for spectrum in spectra])
| |
# -*- coding: utf-8 -*-
"""Provides base object for T1 data classes."""
from __future__ import absolute_import, division
from datetime import datetime, timedelta
from warnings import warn
from .config import PATHS
from .connection import Connection
from .errors import ClientError
from terminalone.utils import FixedOffset
from .vendor import six
class Entity(Connection):
"""Superclass for all the various T1 entities.
Implements methods for data validation and saving to T1. Entity and its
subclasses should not be instantiated directly; instead, an instance of
T1 should instantiate these classes, passing in the proper session, etc.
"""
_readonly = {'id', 'build_date', 'created_on',
'_type', # _type is used because "type" is taken by User.
'updated_on', 'last_modified'}
_readonly_update = set()
def __init__(self, session, properties=None, **kwargs):
"""Passes session to underlying connection and validates properties passed in.
Entity, or any class deriving from it, should never be instantiated directly.
`T1` class should, with session information, instantiate the relevant
subclass.
:param session: requests.Session to be used
:param properties: dict of entity properties
:param kwargs: additional kwargs to pass to Connection
"""
# __setattr__ is overridden below. So, to set self.properties as an empty
# dict, we need to use the built-in __setattr__ method; thus, super()
super(Entity, self).__init__(_create_session=False, **kwargs)
super(Entity, self).__setattr__('session', session)
if properties is None:
super(Entity, self).__setattr__('properties', {})
return
for attr, val in six.iteritems(properties):
if self._pull.get(attr) is not None:
properties[attr] = self._pull[attr](val)
super(Entity, self).__setattr__('properties', properties)
def __repr__(self):
return '{cname}({props})'.format(
cname=type(self).__name__,
props=', '.join(
'{key}={value!r}'.format(key=key, value=value)
for key, value in six.iteritems(self.properties)
)
)
def __getitem__(self, attribute):
"""DEPRECATED way of retrieving properties like with dictionary"""
warn('Accessing entity like a dictionary will be removed: '
'please discontinue use.',
DeprecationWarning, stacklevel=2)
if attribute in self.properties:
return self.properties[attribute]
else:
raise AttributeError(attribute)
def __setitem__(self, attribute, value):
"""DEPRECATED way of setting properties like with dictionary"""
warn('Accessing entity like a dictionary will be removed: '
'please discontinue use.',
DeprecationWarning, stacklevel=2)
self.properties[attribute] = self._pull[attribute](value)
def __getattr__(self, attribute):
if attribute in self.properties:
return self.properties[attribute]
else:
raise AttributeError(attribute)
def __setattr__(self, attribute, value):
if self._pull.get(attribute) is not None:
self.properties[attribute] = self._pull[attribute](value)
else:
self.properties[attribute] = value
def __delattr__(self, attribute):
if attribute in self.properties:
del self.properties[attribute]
else:
raise AttributeError(attribute)
def __getstate__(self):
"""Custom pickling. TODO"""
return super(Entity, self).__getstate__()
def __setstate__(self, state):
"""Custom unpickling. TODO"""
return super(Entity, self).__setstate__(state)
@staticmethod
def _int_to_bool(value):
"""Convert integer string {"0","1"} to its corresponding bool"""
return bool(int(value))
@staticmethod
def _none_to_empty(val):
"""Convert None to empty string.
Necessary for fields that are required POST but have no logical value.
"""
if val is None:
return ""
return val
@staticmethod
def _enum(all_vars, default):
"""Check input against accepted set or return a default."""
def get_value(test_value):
if test_value in all_vars:
return test_value
else:
return default
return get_value
@staticmethod
def _default_empty(default):
"""Check an input against its falsy value or return a default."""
def get_value(test_value):
if test_value:
return test_value
else:
return default
return get_value
@staticmethod
def _strpt(dt_string):
"""Convert ISO string time to datetime.datetime. No-op on datetimes"""
if isinstance(dt_string, datetime):
return dt_string
if dt_string[-5] == '-' or dt_string[-5] == '+':
offset_str = dt_string[-5:]
dt_string = dt_string[:-5]
offset = int(offset_str[-4:-2]) * 60 + int(offset_str[-2:])
if offset_str[0] == "-":
offset = -offset
else:
offset = 0
return datetime.strptime(dt_string, "%Y-%m-%dT%H:%M:%S").replace(tzinfo=FixedOffset(offset))
@staticmethod
def _strft(dt_obj, null_on_none=False):
"""Convert datetime.datetime to ISO string.
:param null_on_none: bool Occasionally, we will actually want to send an
empty string where a datetime would typically go. For instance, if a
strategy has an end_date set, but then wants to change to use
campaign end date, the POST will normally omit the end_date field
(because you cannot send it with use_campaign_end).
However, this will cause an error because there was an end_date set
previously. So, we need to send an empty string to indicate that it
should be nulled out. In cases like this, null_on_none should be set
to True in the entity's _push dict using a partial to make it a
single-argument function. See strategy.py
:raise AttributeError: if not provided a datetime
:return: str
"""
try:
return dt_obj.strftime("%Y-%m-%dT%H:%M:%S")
except AttributeError:
if dt_obj is None and null_on_none:
return ""
raise
def _validate_read(self, data):
"""Convert XML strings to Python objects"""
for key, value in six.iteritems(data):
if key in self._pull:
data[key] = self._pull[key](value)
return data
def _conds_for_removal(self, key, update, push_fn):
"""Determine if an attribute should be removed before POST.
Attributes should be removed if we don't expect them or if they
aren't to be written to. Because relations are incliuded as attributes
as well, remove these too.
"""
return (key in self._readonly or
key in self._relations or
(update and key in self._readonly_update) or
push_fn is False)
def _validate_write(self, data):
"""Convert Python objects to XML values.
If attribute should not be sent, remove it from the body.
"""
update = 'id' in self.properties
if 'version' not in data and update:
data['version'] = self.version
for key, value in six.iteritems(data.copy()):
push_fn = self._push.get(key, False)
if self._conds_for_removal(key, update, push_fn):
del data[key]
continue
if push_fn is not None:
data[key] = self._push[key](value)
else:
data[key] = value
return data
def _construct_url(self, addl=None):
"""Construct URL for post.
Collection, ID if present, additional values (like "history") if needed.
"""
url = [self.collection, ]
if self.properties.get('id'):
url.append(str(self.id))
if addl is not None:
url.extend(addl)
return '/'.join(url)
def _update_self(self, entity):
"""Update own properties based on values returned by API."""
for key, value in six.iteritems(entity):
setattr(self, key, value)
def is_property(self, prop):
if prop in self._pull:
return True
return False
def set(self, properties):
"""Set properties for object from given dict of properties.
Essentially a merge.
"""
for attr, value in six.iteritems(properties):
setattr(self, attr, value)
def save(self, data=None, url=None):
"""Save object to T1."""
if url is None:
url = self._construct_url()
if data is not None:
data = self._validate_write(data)
else:
data = self._validate_write(self.properties)
entity, _ = super(Entity, self)._post(PATHS['mgmt'], url, data=data)
self._update_self(next(entity))
def update(self, *args, **kwargs):
"""Alias for save"""
return self.save(*args, **kwargs)
def history(self):
"""Retrieve changelog entry for entity."""
if not self.properties.get('id'):
raise ClientError('Entity ID not given')
url = self._construct_url(addl=['history', ])
history, _ = super(Entity, self)._get(PATHS['mgmt'], url)
return history
class SubEntity(Entity):
"""Sub-entity, denoted by object like /collection/:id/sub-entity.
These objects need URLs constructed differently.
"""
def _construct_url(self, addl=None):
url = [self.parent, str(self.parent_id), self.collection]
if self.properties.get('id'):
url.append(str(self.id))
if addl is not None:
url.extend(addl)
return '/'.join(url)
| |
"""
This module processes Python exceptions that relate to HTTP exceptions
by defining a set of exceptions, all subclasses of HTTPException.
Each exception, in addition to being a Python exception that can be
raised and caught, is also a WSGI application and ``webob.Response``
object.
This module defines exceptions according to RFC 2068 [1]_ : codes with
100-300 are not really errors; 400's are client errors, and 500's are
server errors. According to the WSGI specification [2]_ , the application
can call ``start_response`` more then once only under two conditions:
(a) the response has not yet been sent, or (b) if the second and
subsequent invocations of ``start_response`` have a valid ``exc_info``
argument obtained from ``sys.exc_info()``. The WSGI specification then
requires the server or gateway to handle the case where content has been
sent and then an exception was encountered.
Exception
HTTPException
HTTPOk
* 200 - :class:`HTTPOk`
* 201 - :class:`HTTPCreated`
* 202 - :class:`HTTPAccepted`
* 203 - :class:`HTTPNonAuthoritativeInformation`
* 204 - :class:`HTTPNoContent`
* 205 - :class:`HTTPResetContent`
* 206 - :class:`HTTPPartialContent`
HTTPRedirection
* 300 - :class:`HTTPMultipleChoices`
* 301 - :class:`HTTPMovedPermanently`
* 302 - :class:`HTTPFound`
* 303 - :class:`HTTPSeeOther`
* 304 - :class:`HTTPNotModified`
* 305 - :class:`HTTPUseProxy`
* 307 - :class:`HTTPTemporaryRedirect`
HTTPError
HTTPClientError
* 400 - :class:`HTTPBadRequest`
* 401 - :class:`HTTPUnauthorized`
* 402 - :class:`HTTPPaymentRequired`
* 403 - :class:`HTTPForbidden`
* 404 - :class:`HTTPNotFound`
* 405 - :class:`HTTPMethodNotAllowed`
* 406 - :class:`HTTPNotAcceptable`
* 407 - :class:`HTTPProxyAuthenticationRequired`
* 408 - :class:`HTTPRequestTimeout`
* 409 - :class:`HTTPConflict`
* 410 - :class:`HTTPGone`
* 411 - :class:`HTTPLengthRequired`
* 412 - :class:`HTTPPreconditionFailed`
* 413 - :class:`HTTPRequestEntityTooLarge`
* 414 - :class:`HTTPRequestURITooLong`
* 415 - :class:`HTTPUnsupportedMediaType`
* 416 - :class:`HTTPRequestRangeNotSatisfiable`
* 417 - :class:`HTTPExpectationFailed`
* 422 - :class:`HTTPUnprocessableEntity`
* 423 - :class:`HTTPLocked`
* 424 - :class:`HTTPFailedDependency`
* 428 - :class:`HTTPPreconditionRequired`
* 429 - :class:`HTTPTooManyRequests`
* 431 - :class:`HTTPRequestHeaderFieldsTooLarge`
* 451 - :class:`HTTPUnavailableForLegalReasons`
HTTPServerError
* 500 - :class:`HTTPInternalServerError`
* 501 - :class:`HTTPNotImplemented`
* 502 - :class:`HTTPBadGateway`
* 503 - :class:`HTTPServiceUnavailable`
* 504 - :class:`HTTPGatewayTimeout`
* 505 - :class:`HTTPVersionNotSupported`
* 511 - :class:`HTTPNetworkAuthenticationRequired`
Usage notes
-----------
The HTTPException class is complicated by 4 factors:
1. The content given to the exception may either be plain-text or
as html-text.
2. The template may want to have string-substitutions taken from
the current ``environ`` or values from incoming headers. This
is especially troublesome due to case sensitivity.
3. The final output may either be text/plain or text/html
mime-type as requested by the client application.
4. Each exception has a default explanation, but those who
raise exceptions may want to provide additional detail.
Subclass attributes and call parameters are designed to provide an easier path
through the complications.
Attributes:
``code``
the HTTP status code for the exception
``title``
remainder of the status line (stuff after the code)
``explanation``
a plain-text explanation of the error message that is
not subject to environment or header substitutions;
it is accessible in the template via %(explanation)s
``detail``
a plain-text message customization that is not subject
to environment or header substitutions; accessible in
the template via %(detail)s
``body_template``
a content fragment (in HTML) used for environment and
header substitution; the default template includes both
the explanation and further detail provided in the
message
Parameters:
``detail``
a plain-text override of the default ``detail``
``headers``
a list of (k,v) header pairs
``comment``
a plain-text additional information which is
usually stripped/hidden for end-users
``body_template``
a string.Template object containing a content fragment in HTML
that frames the explanation and further detail
To override the template (which is HTML content) or the plain-text
explanation, one must subclass the given exception; or customize it
after it has been created. This particular breakdown of a message
into explanation, detail and template allows both the creation of
plain-text and html messages for various clients as well as
error-free substitution of environment variables and headers.
The subclasses of :class:`~_HTTPMove`
(:class:`~HTTPMultipleChoices`, :class:`~HTTPMovedPermanently`,
:class:`~HTTPFound`, :class:`~HTTPSeeOther`, :class:`~HTTPUseProxy` and
:class:`~HTTPTemporaryRedirect`) are redirections that require a ``Location``
field. Reflecting this, these subclasses have two additional keyword arguments:
``location`` and ``add_slash``.
Parameters:
``location``
to set the location immediately
``add_slash``
set to True to redirect to the same URL as the request, except with a
``/`` appended
Relative URLs in the location will be resolved to absolute.
References:
.. [1] http://www.python.org/peps/pep-0333.html#error-handling
.. [2] http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.5
"""
from string import Template
import re
import sys
from webob.compat import (
class_types,
text_,
text_type,
urlparse,
)
from webob.request import Request
from webob.response import Response
from webob.util import (
html_escape,
warn_deprecation,
)
tag_re = re.compile(r'<.*?>', re.S)
br_re = re.compile(r'<br.*?>', re.I|re.S)
comment_re = re.compile(r'<!--|-->')
def no_escape(value):
if value is None:
return ''
if not isinstance(value, text_type):
if hasattr(value, '__unicode__'):
value = value.__unicode__()
if isinstance(value, bytes):
value = text_(value, 'utf-8')
else:
value = text_type(value)
return value
def strip_tags(value):
value = value.replace('\n', ' ')
value = value.replace('\r', '')
value = br_re.sub('\n', value)
value = comment_re.sub('', value)
value = tag_re.sub('', value)
return value
class HTTPException(Exception):
def __init__(self, message, wsgi_response):
Exception.__init__(self, message)
self.wsgi_response = wsgi_response
def __call__(self, environ, start_response):
return self.wsgi_response(environ, start_response)
class WSGIHTTPException(Response, HTTPException):
## You should set in subclasses:
# code = 200
# title = 'OK'
# explanation = 'why this happens'
# body_template_obj = Template('response template')
code = 500
title = 'Internal Server Error'
explanation = ''
body_template_obj = Template('''\
${explanation}<br /><br />
${detail}
${html_comment}
''')
plain_template_obj = Template('''\
${status}
${body}''')
html_template_obj = Template('''\
<html>
<head>
<title>${status}</title>
</head>
<body>
<h1>${status}</h1>
${body}
</body>
</html>''')
## Set this to True for responses that should have no request body
empty_body = False
def __init__(self, detail=None, headers=None, comment=None,
body_template=None, **kw):
Response.__init__(self,
status='%s %s' % (self.code, self.title),
**kw)
Exception.__init__(self, detail)
if headers:
self.headers.extend(headers)
self.detail = detail
self.comment = comment
if body_template is not None:
self.body_template = body_template
self.body_template_obj = Template(body_template)
if self.empty_body:
del self.content_type
del self.content_length
def __str__(self):
return self.detail or self.explanation
def _make_body(self, environ, escape):
args = {
'explanation': escape(self.explanation),
'detail': escape(self.detail or ''),
'comment': escape(self.comment or ''),
}
if self.comment:
args['html_comment'] = '<!-- %s -->' % escape(self.comment)
else:
args['html_comment'] = ''
if WSGIHTTPException.body_template_obj is not self.body_template_obj:
# Custom template; add headers to args
for k, v in environ.items():
args[k] = escape(v)
for k, v in self.headers.items():
args[k.lower()] = escape(v)
t_obj = self.body_template_obj
return t_obj.substitute(args)
def plain_body(self, environ):
body = self._make_body(environ, no_escape)
body = strip_tags(body)
return self.plain_template_obj.substitute(status=self.status,
title=self.title,
body=body)
def html_body(self, environ):
body = self._make_body(environ, html_escape)
return self.html_template_obj.substitute(status=self.status,
body=body)
def generate_response(self, environ, start_response):
if self.content_length is not None:
del self.content_length
headerlist = list(self.headerlist)
accept = environ.get('HTTP_ACCEPT', '')
if accept and 'html' in accept or '*/*' in accept:
content_type = 'text/html'
body = self.html_body(environ)
else:
content_type = 'text/plain'
body = self.plain_body(environ)
extra_kw = {}
if isinstance(body, text_type):
extra_kw.update(charset='utf-8')
resp = Response(body,
status=self.status,
headerlist=headerlist,
content_type=content_type,
**extra_kw
)
resp.content_type = content_type
return resp(environ, start_response)
def __call__(self, environ, start_response):
is_head = environ['REQUEST_METHOD'] == 'HEAD'
if self.body or self.empty_body or is_head:
app_iter = Response.__call__(self, environ, start_response)
else:
app_iter = self.generate_response(environ, start_response)
if is_head:
app_iter = []
return app_iter
@property
def wsgi_response(self):
return self
class HTTPError(WSGIHTTPException):
"""
base class for status codes in the 400's and 500's
This is an exception which indicates that an error has occurred,
and that any work in progress should not be committed. These are
typically results in the 400's and 500's.
"""
class HTTPRedirection(WSGIHTTPException):
"""
base class for 300's status code (redirections)
This is an abstract base class for 3xx redirection. It indicates
that further action needs to be taken by the user agent in order
to fulfill the request. It does not necessarly signal an error
condition.
"""
class HTTPOk(WSGIHTTPException):
"""
Base class for the 200's status code (successful responses)
code: 200, title: OK
"""
code = 200
title = 'OK'
############################################################
## 2xx success
############################################################
class HTTPCreated(HTTPOk):
"""
subclass of :class:`~HTTPOk`
This indicates that request has been fulfilled and resulted in a new
resource being created.
code: 201, title: Created
"""
code = 201
title = 'Created'
class HTTPAccepted(HTTPOk):
"""
subclass of :class:`~HTTPOk`
This indicates that the request has been accepted for processing, but the
processing has not been completed.
code: 202, title: Accepted
"""
code = 202
title = 'Accepted'
explanation = 'The request is accepted for processing.'
class HTTPNonAuthoritativeInformation(HTTPOk):
"""
subclass of :class:`~HTTPOk`
This indicates that the returned metainformation in the entity-header is
not the definitive set as available from the origin server, but is
gathered from a local or a third-party copy.
code: 203, title: Non-Authoritative Information
"""
code = 203
title = 'Non-Authoritative Information'
class HTTPNoContent(HTTPOk):
"""
subclass of :class:`~HTTPOk`
This indicates that the server has fulfilled the request but does
not need to return an entity-body, and might want to return updated
metainformation.
code: 204, title: No Content
"""
code = 204
title = 'No Content'
empty_body = True
class HTTPResetContent(HTTPOk):
"""
subclass of :class:`~HTTPOk`
This indicates that the the server has fulfilled the request and
the user agent SHOULD reset the document view which caused the
request to be sent.
code: 205, title: Reset Content
"""
code = 205
title = 'Reset Content'
empty_body = True
class HTTPPartialContent(HTTPOk):
"""
subclass of :class:`~HTTPOk`
This indicates that the server has fulfilled the partial GET
request for the resource.
code: 206, title: Partial Content
"""
code = 206
title = 'Partial Content'
############################################################
## 3xx redirection
############################################################
class _HTTPMove(HTTPRedirection):
"""
redirections which require a Location field
Since a 'Location' header is a required attribute of 301, 302, 303,
305 and 307 (but not 304), this base class provides the mechanics to
make this easy.
You can provide a location keyword argument to set the location
immediately. You may also give ``add_slash=True`` if you want to
redirect to the same URL as the request, except with a ``/`` added
to the end.
Relative URLs in the location will be resolved to absolute.
"""
explanation = 'The resource has been moved to'
body_template_obj = Template('''\
${explanation} <a href="${location}">${location}</a>;
you should be redirected automatically.
${detail}
${html_comment}''')
def __init__(self, detail=None, headers=None, comment=None,
body_template=None, location=None, add_slash=False):
super(_HTTPMove, self).__init__(
detail=detail, headers=headers, comment=comment,
body_template=body_template)
if location is not None:
self.location = location
if add_slash:
raise TypeError(
"You can only provide one of the arguments location "
"and add_slash")
self.add_slash = add_slash
def __call__(self, environ, start_response):
req = Request(environ)
if self.add_slash:
url = req.path_url
url += '/'
if req.environ.get('QUERY_STRING'):
url += '?' + req.environ['QUERY_STRING']
self.location = url
self.location = urlparse.urljoin(req.path_url, self.location)
return super(_HTTPMove, self).__call__(
environ, start_response)
class HTTPMultipleChoices(_HTTPMove):
"""
subclass of :class:`~_HTTPMove`
This indicates that the requested resource corresponds to any one
of a set of representations, each with its own specific location,
and agent-driven negotiation information is being provided so that
the user can select a preferred representation and redirect its
request to that location.
code: 300, title: Multiple Choices
"""
code = 300
title = 'Multiple Choices'
class HTTPMovedPermanently(_HTTPMove):
"""
subclass of :class:`~_HTTPMove`
This indicates that the requested resource has been assigned a new
permanent URI and any future references to this resource SHOULD use
one of the returned URIs.
code: 301, title: Moved Permanently
"""
code = 301
title = 'Moved Permanently'
class HTTPFound(_HTTPMove):
"""
subclass of :class:`~_HTTPMove`
This indicates that the requested resource resides temporarily under
a different URI.
code: 302, title: Found
"""
code = 302
title = 'Found'
explanation = 'The resource was found at'
# This one is safe after a POST (the redirected location will be
# retrieved with GET):
class HTTPSeeOther(_HTTPMove):
"""
subclass of :class:`~_HTTPMove`
This indicates that the response to the request can be found under
a different URI and SHOULD be retrieved using a GET method on that
resource.
code: 303, title: See Other
"""
code = 303
title = 'See Other'
class HTTPNotModified(HTTPRedirection):
"""
subclass of :class:`~HTTPRedirection`
This indicates that if the client has performed a conditional GET
request and access is allowed, but the document has not been
modified, the server SHOULD respond with this status code.
code: 304, title: Not Modified
"""
# TODO: this should include a date or etag header
code = 304
title = 'Not Modified'
empty_body = True
class HTTPUseProxy(_HTTPMove):
"""
subclass of :class:`~_HTTPMove`
This indicates that the requested resource MUST be accessed through
the proxy given by the Location field.
code: 305, title: Use Proxy
"""
# Not a move, but looks a little like one
code = 305
title = 'Use Proxy'
explanation = (
'The resource must be accessed through a proxy located at')
class HTTPTemporaryRedirect(_HTTPMove):
"""
subclass of :class:`~_HTTPMove`
This indicates that the requested resource resides temporarily
under a different URI.
code: 307, title: Temporary Redirect
"""
code = 307
title = 'Temporary Redirect'
############################################################
## 4xx client error
############################################################
class HTTPClientError(HTTPError):
"""
base class for the 400's, where the client is in error
This is an error condition in which the client is presumed to be
in-error. This is an expected problem, and thus is not considered
a bug. A server-side traceback is not warranted. Unless specialized,
this is a '400 Bad Request'
code: 400, title: Bad Request
"""
code = 400
title = 'Bad Request'
explanation = ('The server could not comply with the request since\r\n'
'it is either malformed or otherwise incorrect.\r\n')
class HTTPBadRequest(HTTPClientError):
pass
class HTTPUnauthorized(HTTPClientError):
"""
subclass of :class:`~HTTPClientError`
This indicates that the request requires user authentication.
code: 401, title: Unauthorized
"""
code = 401
title = 'Unauthorized'
explanation = (
'This server could not verify that you are authorized to\r\n'
'access the document you requested. Either you supplied the\r\n'
'wrong credentials (e.g., bad password), or your browser\r\n'
'does not understand how to supply the credentials required.\r\n')
class HTTPPaymentRequired(HTTPClientError):
"""
subclass of :class:`~HTTPClientError`
code: 402, title: Payment Required
"""
code = 402
title = 'Payment Required'
explanation = ('Access was denied for financial reasons.')
class HTTPForbidden(HTTPClientError):
"""
subclass of :class:`~HTTPClientError`
This indicates that the server understood the request, but is
refusing to fulfill it.
code: 403, title: Forbidden
"""
code = 403
title = 'Forbidden'
explanation = ('Access was denied to this resource.')
class HTTPNotFound(HTTPClientError):
"""
subclass of :class:`~HTTPClientError`
This indicates that the server did not find anything matching the
Request-URI.
code: 404, title: Not Found
"""
code = 404
title = 'Not Found'
explanation = ('The resource could not be found.')
class HTTPMethodNotAllowed(HTTPClientError):
"""
subclass of :class:`~HTTPClientError`
This indicates that the method specified in the Request-Line is
not allowed for the resource identified by the Request-URI.
code: 405, title: Method Not Allowed
"""
code = 405
title = 'Method Not Allowed'
# override template since we need an environment variable
body_template_obj = Template('''\
The method ${REQUEST_METHOD} is not allowed for this resource. <br /><br />
${detail}''')
class HTTPNotAcceptable(HTTPClientError):
"""
subclass of :class:`~HTTPClientError`
This indicates the resource identified by the request is only
capable of generating response entities which have content
characteristics not acceptable according to the accept headers
sent in the request.
code: 406, title: Not Acceptable
"""
code = 406
title = 'Not Acceptable'
# override template since we need an environment variable
template = Template('''\
The resource could not be generated that was acceptable to your browser
(content of type ${HTTP_ACCEPT}. <br /><br />
${detail}''')
class HTTPProxyAuthenticationRequired(HTTPClientError):
"""
subclass of :class:`~HTTPClientError`
This is similar to 401, but indicates that the client must first
authenticate itself with the proxy.
code: 407, title: Proxy Authentication Required
"""
code = 407
title = 'Proxy Authentication Required'
explanation = ('Authentication with a local proxy is needed.')
class HTTPRequestTimeout(HTTPClientError):
"""
subclass of :class:`~HTTPClientError`
This indicates that the client did not produce a request within
the time that the server was prepared to wait.
code: 408, title: Request Timeout
"""
code = 408
title = 'Request Timeout'
explanation = ('The server has waited too long for the request to '
'be sent by the client.')
class HTTPConflict(HTTPClientError):
"""
subclass of :class:`~HTTPClientError`
This indicates that the request could not be completed due to a
conflict with the current state of the resource.
code: 409, title: Conflict
"""
code = 409
title = 'Conflict'
explanation = ('There was a conflict when trying to complete '
'your request.')
class HTTPGone(HTTPClientError):
"""
subclass of :class:`~HTTPClientError`
This indicates that the requested resource is no longer available
at the server and no forwarding address is known.
code: 410, title: Gone
"""
code = 410
title = 'Gone'
explanation = ('This resource is no longer available. No forwarding '
'address is given.')
class HTTPLengthRequired(HTTPClientError):
"""
subclass of :class:`~HTTPClientError`
This indicates that the the server refuses to accept the request
without a defined Content-Length.
code: 411, title: Length Required
"""
code = 411
title = 'Length Required'
explanation = ('Content-Length header required.')
class HTTPPreconditionFailed(HTTPClientError):
"""
subclass of :class:`~HTTPClientError`
This indicates that the precondition given in one or more of the
request-header fields evaluated to false when it was tested on the
server.
code: 412, title: Precondition Failed
"""
code = 412
title = 'Precondition Failed'
explanation = ('Request precondition failed.')
class HTTPRequestEntityTooLarge(HTTPClientError):
"""
subclass of :class:`~HTTPClientError`
This indicates that the server is refusing to process a request
because the request entity is larger than the server is willing or
able to process.
code: 413, title: Request Entity Too Large
"""
code = 413
title = 'Request Entity Too Large'
explanation = ('The body of your request was too large for this server.')
class HTTPRequestURITooLong(HTTPClientError):
"""
subclass of :class:`~HTTPClientError`
This indicates that the server is refusing to service the request
because the Request-URI is longer than the server is willing to
interpret.
code: 414, title: Request-URI Too Long
"""
code = 414
title = 'Request-URI Too Long'
explanation = ('The request URI was too long for this server.')
class HTTPUnsupportedMediaType(HTTPClientError):
"""
subclass of :class:`~HTTPClientError`
This indicates that the server is refusing to service the request
because the entity of the request is in a format not supported by
the requested resource for the requested method.
code: 415, title: Unsupported Media Type
"""
code = 415
title = 'Unsupported Media Type'
# override template since we need an environment variable
template_obj = Template('''\
The request media type ${CONTENT_TYPE} is not supported by this server.
<br /><br />
${detail}''')
class HTTPRequestRangeNotSatisfiable(HTTPClientError):
"""
subclass of :class:`~HTTPClientError`
The server SHOULD return a response with this status code if a
request included a Range request-header field, and none of the
range-specifier values in this field overlap the current extent
of the selected resource, and the request did not include an
If-Range request-header field.
code: 416, title: Request Range Not Satisfiable
"""
code = 416
title = 'Request Range Not Satisfiable'
explanation = ('The Range requested is not available.')
class HTTPExpectationFailed(HTTPClientError):
"""
subclass of :class:`~HTTPClientError`
This indidcates that the expectation given in an Expect
request-header field could not be met by this server.
code: 417, title: Expectation Failed
"""
code = 417
title = 'Expectation Failed'
explanation = ('Expectation failed.')
class HTTPUnprocessableEntity(HTTPClientError):
"""
subclass of :class:`~HTTPClientError`
This indicates that the server is unable to process the contained
instructions.
code: 422, title: Unprocessable Entity
"""
## Note: from WebDAV
code = 422
title = 'Unprocessable Entity'
explanation = 'Unable to process the contained instructions'
class HTTPLocked(HTTPClientError):
"""
subclass of :class:`~HTTPClientError`
This indicates that the resource is locked.
code: 423, title: Locked
"""
## Note: from WebDAV
code = 423
title = 'Locked'
explanation = ('The resource is locked')
class HTTPFailedDependency(HTTPClientError):
"""
subclass of :class:`~HTTPClientError`
This indicates that the method could not be performed because the
requested action depended on another action and that action failed.
code: 424, title: Failed Dependency
"""
## Note: from WebDAV
code = 424
title = 'Failed Dependency'
explanation = (
'The method could not be performed because the requested '
'action dependended on another action and that action failed')
class HTTPPreconditionRequired(HTTPClientError):
"""
subclass of :class:`~HTTPClientError`
This indicates that the origin server requires the request to be
conditional. From RFC 6585, "Additional HTTP Status Codes".
code: 428, title: Precondition Required
"""
code = 428
title = 'Precondition Required'
explanation = ('This request is required to be conditional')
class HTTPTooManyRequests(HTTPClientError):
"""
subclass of :class:`~HTTPClientError`
This indicates that the client has sent too many requests in a
given amount of time. Useful for rate limiting.
From RFC 6585, "Additional HTTP Status Codes".
code: 429, title: Too Many Requests
"""
code = 429
title = 'Too Many Requests'
explanation = (
'The client has sent too many requests in a given amount of time')
class HTTPRequestHeaderFieldsTooLarge(HTTPClientError):
"""
subclass of :class:`~HTTPClientError`
This indicates that the server is unwilling to process the request
because its header fields are too large. The request may be resubmitted
after reducing the size of the request header fields.
From RFC 6585, "Additional HTTP Status Codes".
code: 431, title: Request Header Fields Too Large
"""
code = 431
title = 'Request Header Fields Too Large'
explanation = (
'The request header fields were too large')
class HTTPUnavailableForLegalReasons(HTTPClientError):
"""
subclass of :class:`~HTTPClientError`
This indicates that the server is unable to process the request
because of legal reasons, e.g. censorship or government-mandated
blocked access.
From the draft "A New HTTP Status Code for Legally-restricted Resources"
by Tim Bray:
http://tools.ietf.org/html/draft-tbray-http-legally-restricted-status-00
code: 451, title: Unavailable For Legal Reasons
"""
code = 451
title = 'Unavailable For Legal Reasons'
explanation = ('The resource is not available due to legal reasons.')
############################################################
## 5xx Server Error
############################################################
# Response status codes beginning with the digit "5" indicate cases in
# which the server is aware that it has erred or is incapable of
# performing the request. Except when responding to a HEAD request, the
# server SHOULD include an entity containing an explanation of the error
# situation, and whether it is a temporary or permanent condition. User
# agents SHOULD display any included entity to the user. These response
# codes are applicable to any request method.
class HTTPServerError(HTTPError):
"""
base class for the 500's, where the server is in-error
This is an error condition in which the server is presumed to be
in-error. This is usually unexpected, and thus requires a traceback;
ideally, opening a support ticket for the customer. Unless specialized,
this is a '500 Internal Server Error'
"""
code = 500
title = 'Internal Server Error'
explanation = (
'The server has either erred or is incapable of performing\r\n'
'the requested operation.\r\n')
class HTTPInternalServerError(HTTPServerError):
pass
class HTTPNotImplemented(HTTPServerError):
"""
subclass of :class:`~HTTPServerError`
This indicates that the server does not support the functionality
required to fulfill the request.
code: 501, title: Not Implemented
"""
code = 501
title = 'Not Implemented'
template = Template('''
The request method ${REQUEST_METHOD} is not implemented for this server. <br /><br />
${detail}''')
class HTTPBadGateway(HTTPServerError):
"""
subclass of :class:`~HTTPServerError`
This indicates that the server, while acting as a gateway or proxy,
received an invalid response from the upstream server it accessed
in attempting to fulfill the request.
code: 502, title: Bad Gateway
"""
code = 502
title = 'Bad Gateway'
explanation = ('Bad gateway.')
class HTTPServiceUnavailable(HTTPServerError):
"""
subclass of :class:`~HTTPServerError`
This indicates that the server is currently unable to handle the
request due to a temporary overloading or maintenance of the server.
code: 503, title: Service Unavailable
"""
code = 503
title = 'Service Unavailable'
explanation = ('The server is currently unavailable. '
'Please try again at a later time.')
class HTTPGatewayTimeout(HTTPServerError):
"""
subclass of :class:`~HTTPServerError`
This indicates that the server, while acting as a gateway or proxy,
did not receive a timely response from the upstream server specified
by the URI (e.g. HTTP, FTP, LDAP) or some other auxiliary server
(e.g. DNS) it needed to access in attempting to complete the request.
code: 504, title: Gateway Timeout
"""
code = 504
title = 'Gateway Timeout'
explanation = ('The gateway has timed out.')
class HTTPVersionNotSupported(HTTPServerError):
"""
subclass of :class:`~HTTPServerError`
This indicates that the server does not support, or refuses to
support, the HTTP protocol version that was used in the request
message.
code: 505, title: HTTP Version Not Supported
"""
code = 505
title = 'HTTP Version Not Supported'
explanation = ('The HTTP version is not supported.')
class HTTPInsufficientStorage(HTTPServerError):
"""
subclass of :class:`~HTTPServerError`
This indicates that the server does not have enough space to save
the resource.
code: 507, title: Insufficient Storage
"""
code = 507
title = 'Insufficient Storage'
explanation = ('There was not enough space to save the resource')
class HTTPNetworkAuthenticationRequired(HTTPServerError):
"""
subclass of :class:`~HTTPServerError`
This indicates that the client needs to authenticate to gain
network access. From RFC 6585, "Additional HTTP Status Codes".
code: 511, title: Network Authentication Required
"""
code = 511
title = 'Network Authentication Required'
explanation = ('Network authentication is required')
class HTTPExceptionMiddleware(object):
"""
Middleware that catches exceptions in the sub-application. This
does not catch exceptions in the app_iter; only during the initial
calling of the application.
This should be put *very close* to applications that might raise
these exceptions. This should not be applied globally; letting
*expected* exceptions raise through the WSGI stack is dangerous.
"""
def __init__(self, application):
self.application = application
def __call__(self, environ, start_response):
try:
return self.application(environ, start_response)
except HTTPException:
parent_exc_info = sys.exc_info()
def repl_start_response(status, headers, exc_info=None):
if exc_info is None:
exc_info = parent_exc_info
return start_response(status, headers, exc_info)
return parent_exc_info[1](environ, repl_start_response)
try:
from paste import httpexceptions
except ImportError: # pragma: no cover
# Without Paste we don't need to do this fixup
pass
else: # pragma: no cover
for name in dir(httpexceptions):
obj = globals().get(name)
if (obj and isinstance(obj, type) and issubclass(obj, HTTPException)
and obj is not HTTPException
and obj is not WSGIHTTPException):
obj.__bases__ = obj.__bases__ + (getattr(httpexceptions, name),)
del name, obj, httpexceptions
__all__ = ['HTTPExceptionMiddleware', 'status_map']
status_map={}
for name, value in list(globals().items()):
if (isinstance(value, (type, class_types)) and
issubclass(value, HTTPException)
and not name.startswith('_')):
__all__.append(name)
if getattr(value, 'code', None):
status_map[value.code]=value
if hasattr(value, 'explanation'):
value.explanation = ' '.join(value.explanation.strip().split())
del name, value
| |
import uuid
import six
import yaml
from .transformer import BaseTransformer
class ComposeTransformer(BaseTransformer):
"""
A transformer for docker-compose
To use this class:
.. code-block:: python
transformer = ComposeTransformer('./docker-compose.yml')
normalized_keys = transformer.ingest_containers()
"""
def _read_stream(self, stream):
return yaml.safe_load(stream=stream)
def ingest_containers(self, containers=None):
"""
Transform the YAML into a dict with normalized keys
"""
containers = containers or self.stream or {}
output_containers = []
for container_name, definition in six.iteritems(containers):
container = definition.copy()
container['name'] = container_name
output_containers.append(container)
return output_containers
def emit_containers(self, containers, verbose=True):
output = {}
for container in containers:
name_in_container = container.get('name')
if not name_in_container:
name = str(uuid.uuid4())
else:
name = container.pop('name')
output[name] = container
noalias_dumper = yaml.dumper.SafeDumper
noalias_dumper.ignore_aliases = lambda self, data: True
return yaml.dump(
output,
default_flow_style=False,
Dumper=noalias_dumper
)
@staticmethod
def validate(container):
return container
@staticmethod
def _parse_port_mapping(mapping):
parts = str(mapping).split(':')
if len(parts) == 1:
return {
'container_port': int(parts[0])
}
if len(parts) == 2 and '.' not in mapping:
return {
'host_port': int(parts[0]),
'container_port': int(parts[1])
}
if len(parts) == 3:
if '.' in parts[0]:
return {
'host_ip': parts[0],
'host_port': int(parts[1]),
'container_port': int(parts[2])
}
else:
return {
'host_port': int(parts[0]),
'container_ip': parts[1],
'container_port': int(parts[2])
}
if len(parts) == 4:
return {
'host_ip': parts[0],
'host_port': int(parts[1]),
'container_ip': parts[2],
'container_port': int(parts[3])
}
def ingest_port_mappings(self, port_mappings):
"""
Transform the docker-compose port mappings to base schema port_mappings
:param port_mappings: The compose port mappings
:type port_mappings: list
:return: the base schema port_mappings
:rtype: list of dict
"""
return [self._parse_port_mapping(mapping) for mapping in port_mappings]
@staticmethod
def _emit_mapping(mapping):
parts = []
if mapping.get('host_ip'):
parts.append(str(mapping['host_ip']))
if mapping.get('host_port'):
parts.append(str(mapping['host_port']))
if mapping.get('container_ip'):
parts.append(str(mapping['container_ip']))
if mapping.get('container_port'):
parts.append(str(mapping['container_port']))
return ':'.join(parts)
def emit_port_mappings(self, port_mappings):
"""
:param port_mappings: the base schema port_mappings
:type port_mappings: list of dict
:return:
:rtype: list of str
"""
return [str(self._emit_mapping(mapping)) for mapping in port_mappings]
def ingest_memory(self, memory):
"""
Transform the memory into bytes
:param memory: Compose memory definition. (1g, 24k)
:type memory: str
:return: The memory in bytes
:rtype: int
"""
def lshift(num, shift):
return num << shift
def rshift(num, shift):
return num >> shift
bit_shift = {
'g': {'func': lshift, 'shift': 30},
'm': {'func': lshift, 'shift': 20},
'k': {'func': lshift, 'shift': 10},
'b': {'func': rshift, 'shift': 0}
}
unit = memory[-1]
number = int(memory[:-1])
return bit_shift[unit]['func'](number, bit_shift[unit]['shift'])
def emit_memory(self, memory):
return '{}b'.format(memory)
def ingest_cpu(self, cpu):
return cpu
def emit_cpu(self, cpu):
return cpu
def ingest_environment(self, environment):
output = {}
if type(environment) is list:
for kv in environment:
index = kv.find('=')
output[str(kv[:index])] = str(kv[index + 1:])
if type(environment) is dict:
for key, value in six.iteritems(environment):
output[str(key)] = str(value)
return output
def emit_environment(self, environment):
return environment
def ingest_command(self, command):
return command
def emit_command(self, command):
return command
def ingest_entrypoint(self, entrypoint):
return entrypoint
def emit_entrypoint(self, entrypoint):
return entrypoint
def ingest_volumes_from(self, volumes_from):
return volumes_from
def emit_volumes_from(self, volumes_from):
return volumes_from
@staticmethod
def _ingest_volume(volume):
parts = volume.split(':')
if len(parts) == 1:
return {
'host': parts[0],
'container': parts[0]
}
if len(parts) == 2 and parts[1] != 'ro':
return {
'host': parts[0],
'container': parts[1]
}
if len(parts) == 2 and parts[1] == 'ro':
return {
'host': parts[0],
'container': parts[0],
'readonly': True
}
if len(parts) == 3 and parts[-1] == 'ro':
return {
'host': parts[0],
'container': parts[1],
'readonly': True
}
def ingest_volumes(self, volumes):
return [
self._ingest_volume(volume)
for volume
in volumes
if self._ingest_volume(volume) is not None
]
@staticmethod
def _emit_volume(volume):
volume_str = volume.get('host') + ':' + volume.get('container', ':')
volume_str = volume_str.strip(':')
if volume.get('readonly') and len(volume_str):
volume_str += ':ro'
return volume_str
def emit_volumes(self, volumes):
return [
self._emit_volume(volume)
for volume
in volumes
if len(self._emit_volume(volume))
]
| |
"""Support for UPnP/IGD Sensors."""
from datetime import timedelta
from typing import Any, Mapping
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import DATA_BYTES, DATA_RATE_KIBIBYTES_PER_SECOND
from homeassistant.helpers import device_registry as dr
from homeassistant.helpers.typing import HomeAssistantType
from homeassistant.helpers.update_coordinator import (
CoordinatorEntity,
DataUpdateCoordinator,
)
from .const import (
BYTES_RECEIVED,
BYTES_SENT,
CONFIG_ENTRY_SCAN_INTERVAL,
CONFIG_ENTRY_UDN,
DATA_PACKETS,
DATA_RATE_PACKETS_PER_SECOND,
DEFAULT_SCAN_INTERVAL,
DOMAIN,
DOMAIN_COORDINATORS,
DOMAIN_DEVICES,
KIBIBYTE,
LOGGER as _LOGGER,
PACKETS_RECEIVED,
PACKETS_SENT,
TIMESTAMP,
)
from .device import Device
SENSOR_TYPES = {
BYTES_RECEIVED: {
"device_value_key": BYTES_RECEIVED,
"name": f"{DATA_BYTES} received",
"unit": DATA_BYTES,
"unique_id": BYTES_RECEIVED,
"derived_name": f"{DATA_RATE_KIBIBYTES_PER_SECOND} received",
"derived_unit": DATA_RATE_KIBIBYTES_PER_SECOND,
"derived_unique_id": "KiB/sec_received",
},
BYTES_SENT: {
"device_value_key": BYTES_SENT,
"name": f"{DATA_BYTES} sent",
"unit": DATA_BYTES,
"unique_id": BYTES_SENT,
"derived_name": f"{DATA_RATE_KIBIBYTES_PER_SECOND} sent",
"derived_unit": DATA_RATE_KIBIBYTES_PER_SECOND,
"derived_unique_id": "KiB/sec_sent",
},
PACKETS_RECEIVED: {
"device_value_key": PACKETS_RECEIVED,
"name": f"{DATA_PACKETS} received",
"unit": DATA_PACKETS,
"unique_id": PACKETS_RECEIVED,
"derived_name": f"{DATA_RATE_PACKETS_PER_SECOND} received",
"derived_unit": DATA_RATE_PACKETS_PER_SECOND,
"derived_unique_id": "packets/sec_received",
},
PACKETS_SENT: {
"device_value_key": PACKETS_SENT,
"name": f"{DATA_PACKETS} sent",
"unit": DATA_PACKETS,
"unique_id": PACKETS_SENT,
"derived_name": f"{DATA_RATE_PACKETS_PER_SECOND} sent",
"derived_unit": DATA_RATE_PACKETS_PER_SECOND,
"derived_unique_id": "packets/sec_sent",
},
}
async def async_setup_platform(
hass: HomeAssistantType, config, async_add_entities, discovery_info=None
) -> None:
"""Old way of setting up UPnP/IGD sensors."""
_LOGGER.debug(
"async_setup_platform: config: %s, discovery: %s", config, discovery_info
)
async def async_setup_entry(
hass, config_entry: ConfigEntry, async_add_entities
) -> None:
"""Set up the UPnP/IGD sensors."""
data = config_entry.data
if CONFIG_ENTRY_UDN in data:
udn = data[CONFIG_ENTRY_UDN]
else:
# any device will do
udn = list(hass.data[DOMAIN][DOMAIN_DEVICES])[0]
device: Device = hass.data[DOMAIN][DOMAIN_DEVICES][udn]
update_interval_sec = config_entry.options.get(
CONFIG_ENTRY_SCAN_INTERVAL, DEFAULT_SCAN_INTERVAL
)
update_interval = timedelta(seconds=update_interval_sec)
_LOGGER.debug("update_interval: %s", update_interval)
_LOGGER.debug("Adding sensors")
coordinator = DataUpdateCoordinator[Mapping[str, Any]](
hass,
_LOGGER,
name=device.name,
update_method=device.async_get_traffic_data,
update_interval=update_interval,
)
await coordinator.async_refresh()
hass.data[DOMAIN][DOMAIN_COORDINATORS][udn] = coordinator
sensors = [
RawUpnpSensor(coordinator, device, SENSOR_TYPES[BYTES_RECEIVED]),
RawUpnpSensor(coordinator, device, SENSOR_TYPES[BYTES_SENT]),
RawUpnpSensor(coordinator, device, SENSOR_TYPES[PACKETS_RECEIVED]),
RawUpnpSensor(coordinator, device, SENSOR_TYPES[PACKETS_SENT]),
DerivedUpnpSensor(coordinator, device, SENSOR_TYPES[BYTES_RECEIVED]),
DerivedUpnpSensor(coordinator, device, SENSOR_TYPES[BYTES_SENT]),
DerivedUpnpSensor(coordinator, device, SENSOR_TYPES[PACKETS_RECEIVED]),
DerivedUpnpSensor(coordinator, device, SENSOR_TYPES[PACKETS_SENT]),
]
async_add_entities(sensors, True)
class UpnpSensor(CoordinatorEntity):
"""Base class for UPnP/IGD sensors."""
def __init__(
self,
coordinator: DataUpdateCoordinator[Mapping[str, Any]],
device: Device,
sensor_type: Mapping[str, str],
update_multiplier: int = 2,
) -> None:
"""Initialize the base sensor."""
super().__init__(coordinator)
self._device = device
self._sensor_type = sensor_type
self._update_counter_max = update_multiplier
self._update_counter = 0
@property
def icon(self) -> str:
"""Icon to use in the frontend, if any."""
return "mdi:server-network"
@property
def available(self) -> bool:
"""Return if entity is available."""
device_value_key = self._sensor_type["device_value_key"]
return (
self.coordinator.last_update_success
and device_value_key in self.coordinator.data
)
@property
def name(self) -> str:
"""Return the name of the sensor."""
return f"{self._device.name} {self._sensor_type['name']}"
@property
def unique_id(self) -> str:
"""Return an unique ID."""
return f"{self._device.udn}_{self._sensor_type['unique_id']}"
@property
def unit_of_measurement(self) -> str:
"""Return the unit of measurement of this entity, if any."""
return self._sensor_type["unit"]
@property
def device_info(self) -> Mapping[str, Any]:
"""Get device info."""
return {
"connections": {(dr.CONNECTION_UPNP, self._device.udn)},
"name": self._device.name,
"manufacturer": self._device.manufacturer,
"model": self._device.model_name,
}
class RawUpnpSensor(UpnpSensor):
"""Representation of a UPnP/IGD sensor."""
@property
def state(self) -> str:
"""Return the state of the device."""
device_value_key = self._sensor_type["device_value_key"]
value = self.coordinator.data[device_value_key]
if value is None:
return None
return format(value, "d")
class DerivedUpnpSensor(UpnpSensor):
"""Representation of a UNIT Sent/Received per second sensor."""
def __init__(self, coordinator, device, sensor_type) -> None:
"""Initialize sensor."""
super().__init__(coordinator, device, sensor_type)
self._last_value = None
self._last_timestamp = None
@property
def name(self) -> str:
"""Return the name of the sensor."""
return f"{self._device.name} {self._sensor_type['derived_name']}"
@property
def unique_id(self) -> str:
"""Return an unique ID."""
return f"{self._device.udn}_{self._sensor_type['derived_unique_id']}"
@property
def unit_of_measurement(self) -> str:
"""Return the unit of measurement of this entity, if any."""
return self._sensor_type["derived_unit"]
def _has_overflowed(self, current_value) -> bool:
"""Check if value has overflowed."""
return current_value < self._last_value
@property
def state(self) -> str:
"""Return the state of the device."""
# Can't calculate any derivative if we have only one value.
device_value_key = self._sensor_type["device_value_key"]
current_value = self.coordinator.data[device_value_key]
if current_value is None:
return None
current_timestamp = self.coordinator.data[TIMESTAMP]
if self._last_value is None or self._has_overflowed(current_value):
self._last_value = current_value
self._last_timestamp = current_timestamp
return None
# Calculate derivative.
delta_value = current_value - self._last_value
if self._sensor_type["unit"] == DATA_BYTES:
delta_value /= KIBIBYTE
delta_time = current_timestamp - self._last_timestamp
if delta_time.seconds == 0:
# Prevent division by 0.
return None
derived = delta_value / delta_time.seconds
# Store current values for future use.
self._last_value = current_value
self._last_timestamp = current_timestamp
return format(derived, ".1f")
| |
"""The tests for the Template select platform."""
import pytest
from homeassistant import setup
from homeassistant.components.input_select import (
ATTR_OPTION as INPUT_SELECT_ATTR_OPTION,
ATTR_OPTIONS as INPUT_SELECT_ATTR_OPTIONS,
DOMAIN as INPUT_SELECT_DOMAIN,
SERVICE_SELECT_OPTION as INPUT_SELECT_SERVICE_SELECT_OPTION,
SERVICE_SET_OPTIONS,
)
from homeassistant.components.select.const import (
ATTR_OPTION as SELECT_ATTR_OPTION,
ATTR_OPTIONS as SELECT_ATTR_OPTIONS,
DOMAIN as SELECT_DOMAIN,
SERVICE_SELECT_OPTION as SELECT_SERVICE_SELECT_OPTION,
)
from homeassistant.const import CONF_ENTITY_ID, STATE_UNKNOWN
from homeassistant.core import Context
from homeassistant.helpers.entity_registry import async_get
from tests.common import (
assert_setup_component,
async_capture_events,
async_mock_service,
)
_TEST_SELECT = "select.template_select"
# Represent for select's current_option
_OPTION_INPUT_SELECT = "input_select.option"
@pytest.fixture
def calls(hass):
"""Track calls to a mock service."""
return async_mock_service(hass, "test", "automation")
async def test_missing_optional_config(hass, calls):
"""Test: missing optional template is ok."""
with assert_setup_component(1, "template"):
assert await setup.async_setup_component(
hass,
"template",
{
"template": {
"select": {
"state": "{{ 'a' }}",
"select_option": {"service": "script.select_option"},
"options": "{{ ['a', 'b'] }}",
}
}
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
_verify(hass, "a", ["a", "b"])
async def test_multiple_configs(hass, calls):
"""Test: multiple select entities get created."""
with assert_setup_component(1, "template"):
assert await setup.async_setup_component(
hass,
"template",
{
"template": {
"select": [
{
"state": "{{ 'a' }}",
"select_option": {"service": "script.select_option"},
"options": "{{ ['a', 'b'] }}",
},
{
"state": "{{ 'a' }}",
"select_option": {"service": "script.select_option"},
"options": "{{ ['a', 'b'] }}",
},
]
}
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
_verify(hass, "a", ["a", "b"])
_verify(hass, "a", ["a", "b"], f"{_TEST_SELECT}_2")
async def test_missing_required_keys(hass, calls):
"""Test: missing required fields will fail."""
with assert_setup_component(0, "template"):
assert await setup.async_setup_component(
hass,
"template",
{
"template": {
"select": {
"select_option": {"service": "script.select_option"},
"options": "{{ ['a', 'b'] }}",
}
}
},
)
with assert_setup_component(0, "select"):
assert await setup.async_setup_component(
hass,
"select",
{
"template": {
"select": {
"state": "{{ 'a' }}",
"select_option": {"service": "script.select_option"},
}
}
},
)
with assert_setup_component(0, "select"):
assert await setup.async_setup_component(
hass,
"select",
{
"template": {
"select": {
"state": "{{ 'a' }}",
"options": "{{ ['a', 'b'] }}",
}
}
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
assert hass.states.async_all() == []
async def test_templates_with_entities(hass, calls):
"""Test tempalates with values from other entities."""
with assert_setup_component(1, "input_select"):
assert await setup.async_setup_component(
hass,
"input_select",
{
"input_select": {
"option": {
"options": ["a", "b"],
"initial": "a",
"name": "Option",
},
}
},
)
with assert_setup_component(1, "template"):
assert await setup.async_setup_component(
hass,
"template",
{
"template": {
"unique_id": "b",
"select": {
"state": f"{{{{ states('{_OPTION_INPUT_SELECT}') }}}}",
"options": f"{{{{ state_attr('{_OPTION_INPUT_SELECT}', '{INPUT_SELECT_ATTR_OPTIONS}') }}}}",
"select_option": {
"service": "input_select.select_option",
"data_template": {
"entity_id": _OPTION_INPUT_SELECT,
"option": "{{ option }}",
},
},
"optimistic": True,
"unique_id": "a",
},
}
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
ent_reg = async_get(hass)
entry = ent_reg.async_get(_TEST_SELECT)
assert entry
assert entry.unique_id == "b-a"
_verify(hass, "a", ["a", "b"])
await hass.services.async_call(
INPUT_SELECT_DOMAIN,
INPUT_SELECT_SERVICE_SELECT_OPTION,
{CONF_ENTITY_ID: _OPTION_INPUT_SELECT, INPUT_SELECT_ATTR_OPTION: "b"},
blocking=True,
)
await hass.async_block_till_done()
_verify(hass, "b", ["a", "b"])
await hass.services.async_call(
INPUT_SELECT_DOMAIN,
SERVICE_SET_OPTIONS,
{
CONF_ENTITY_ID: _OPTION_INPUT_SELECT,
INPUT_SELECT_ATTR_OPTIONS: ["a", "b", "c"],
},
blocking=True,
)
await hass.async_block_till_done()
_verify(hass, "a", ["a", "b", "c"])
await hass.services.async_call(
SELECT_DOMAIN,
SELECT_SERVICE_SELECT_OPTION,
{CONF_ENTITY_ID: _TEST_SELECT, SELECT_ATTR_OPTION: "c"},
blocking=True,
)
_verify(hass, "c", ["a", "b", "c"])
async def test_trigger_select(hass):
"""Test trigger based template select."""
events = async_capture_events(hass, "test_number_event")
assert await setup.async_setup_component(
hass,
"template",
{
"template": [
{"invalid": "config"},
# Config after invalid should still be set up
{
"unique_id": "listening-test-event",
"trigger": {"platform": "event", "event_type": "test_event"},
"select": [
{
"name": "Hello Name",
"unique_id": "hello_name-id",
"state": "{{ trigger.event.data.beer }}",
"options": "{{ trigger.event.data.beers }}",
"select_option": {"event": "test_number_event"},
"optimistic": True,
},
],
},
],
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
state = hass.states.get("select.hello_name")
assert state is not None
assert state.state == STATE_UNKNOWN
context = Context()
hass.bus.async_fire(
"test_event", {"beer": "duff", "beers": ["duff", "alamo"]}, context=context
)
await hass.async_block_till_done()
state = hass.states.get("select.hello_name")
assert state is not None
assert state.state == "duff"
assert state.attributes["options"] == ["duff", "alamo"]
await hass.services.async_call(
SELECT_DOMAIN,
SELECT_SERVICE_SELECT_OPTION,
{CONF_ENTITY_ID: "select.hello_name", SELECT_ATTR_OPTION: "alamo"},
blocking=True,
)
assert len(events) == 1
assert events[0].event_type == "test_number_event"
def _verify(hass, expected_current_option, expected_options, entity_name=_TEST_SELECT):
"""Verify select's state."""
state = hass.states.get(entity_name)
attributes = state.attributes
assert state.state == str(expected_current_option)
assert attributes.get(SELECT_ATTR_OPTIONS) == expected_options
| |
from __future__ import division, absolute_import, print_function
import warnings
import pytest
import numpy as np
from numpy.testing import (
assert_, assert_equal, assert_raises, assert_warns, assert_array_equal,
temppath,
)
from numpy.core.tests._locales import CommaDecimalPointLocale
LD_INFO = np.finfo(np.longdouble)
longdouble_longer_than_double = (LD_INFO.eps < np.finfo(np.double).eps)
_o = 1 + LD_INFO.eps
string_to_longdouble_inaccurate = (_o != np.longdouble(repr(_o)))
del _o
def test_scalar_extraction():
"""Confirm that extracting a value doesn't convert to python float"""
o = 1 + LD_INFO.eps
a = np.array([o, o, o])
assert_equal(a[1], o)
# Conversions string -> long double
# 0.1 not exactly representable in base 2 floating point.
repr_precision = len(repr(np.longdouble(0.1)))
# +2 from macro block starting around line 842 in scalartypes.c.src.
@pytest.mark.skipif(LD_INFO.precision + 2 >= repr_precision,
reason="repr precision not enough to show eps")
def test_repr_roundtrip():
# We will only see eps in repr if within printing precision.
o = 1 + LD_INFO.eps
assert_equal(np.longdouble(repr(o)), o, "repr was %s" % repr(o))
def test_unicode():
np.longdouble(u"1.2")
def test_string():
np.longdouble("1.2")
def test_bytes():
np.longdouble(b"1.2")
@pytest.mark.skipif(string_to_longdouble_inaccurate, reason="Need strtold_l")
def test_repr_roundtrip_bytes():
o = 1 + LD_INFO.eps
assert_equal(np.longdouble(repr(o).encode("ascii")), o)
def test_bogus_string():
assert_raises(ValueError, np.longdouble, "spam")
assert_raises(ValueError, np.longdouble, "1.0 flub")
@pytest.mark.skipif(string_to_longdouble_inaccurate, reason="Need strtold_l")
def test_fromstring():
o = 1 + LD_INFO.eps
s = (" " + repr(o))*5
a = np.array([o]*5)
assert_equal(np.fromstring(s, sep=" ", dtype=np.longdouble), a,
err_msg="reading '%s'" % s)
def test_fromstring_bogus():
with assert_warns(DeprecationWarning):
assert_equal(np.fromstring("1. 2. 3. flop 4.", dtype=float, sep=" "),
np.array([1., 2., 3.]))
def test_fromstring_empty():
with assert_warns(DeprecationWarning):
assert_equal(np.fromstring("xxxxx", sep="x"),
np.array([]))
def test_fromstring_missing():
with assert_warns(DeprecationWarning):
assert_equal(np.fromstring("1xx3x4x5x6", sep="x"),
np.array([1]))
class TestFileBased(object):
ldbl = 1 + LD_INFO.eps
tgt = np.array([ldbl]*5)
out = ''.join([repr(t) + '\n' for t in tgt])
def test_fromfile_bogus(self):
with temppath() as path:
with open(path, 'wt') as f:
f.write("1. 2. 3. flop 4.\n")
with assert_warns(DeprecationWarning):
res = np.fromfile(path, dtype=float, sep=" ")
assert_equal(res, np.array([1., 2., 3.]))
@pytest.mark.skipif(string_to_longdouble_inaccurate,
reason="Need strtold_l")
def test_fromfile(self):
with temppath() as path:
with open(path, 'wt') as f:
f.write(self.out)
res = np.fromfile(path, dtype=np.longdouble, sep="\n")
assert_equal(res, self.tgt)
@pytest.mark.skipif(string_to_longdouble_inaccurate,
reason="Need strtold_l")
def test_genfromtxt(self):
with temppath() as path:
with open(path, 'wt') as f:
f.write(self.out)
res = np.genfromtxt(path, dtype=np.longdouble)
assert_equal(res, self.tgt)
@pytest.mark.skipif(string_to_longdouble_inaccurate,
reason="Need strtold_l")
def test_loadtxt(self):
with temppath() as path:
with open(path, 'wt') as f:
f.write(self.out)
res = np.loadtxt(path, dtype=np.longdouble)
assert_equal(res, self.tgt)
@pytest.mark.skipif(string_to_longdouble_inaccurate,
reason="Need strtold_l")
def test_tofile_roundtrip(self):
with temppath() as path:
self.tgt.tofile(path, sep=" ")
res = np.fromfile(path, dtype=np.longdouble, sep=" ")
assert_equal(res, self.tgt)
# Conversions long double -> string
def test_repr_exact():
o = 1 + LD_INFO.eps
assert_(repr(o) != '1')
@pytest.mark.skipif(longdouble_longer_than_double, reason="BUG #2376")
@pytest.mark.skipif(string_to_longdouble_inaccurate,
reason="Need strtold_l")
def test_format():
o = 1 + LD_INFO.eps
assert_("{0:.40g}".format(o) != '1')
@pytest.mark.skipif(longdouble_longer_than_double, reason="BUG #2376")
@pytest.mark.skipif(string_to_longdouble_inaccurate,
reason="Need strtold_l")
def test_percent():
o = 1 + LD_INFO.eps
assert_("%.40g" % o != '1')
@pytest.mark.skipif(longdouble_longer_than_double,
reason="array repr problem")
@pytest.mark.skipif(string_to_longdouble_inaccurate,
reason="Need strtold_l")
def test_array_repr():
o = 1 + LD_INFO.eps
a = np.array([o])
b = np.array([1], dtype=np.longdouble)
if not np.all(a != b):
raise ValueError("precision loss creating arrays")
assert_(repr(a) != repr(b))
#
# Locale tests: scalar types formatting should be independent of the locale
#
class TestCommaDecimalPointLocale(CommaDecimalPointLocale):
def test_repr_roundtrip_foreign(self):
o = 1.5
assert_equal(o, np.longdouble(repr(o)))
def test_fromstring_foreign_repr(self):
f = 1.234
a = np.fromstring(repr(f), dtype=float, sep=" ")
assert_equal(a[0], f)
def test_fromstring_best_effort_float(self):
with assert_warns(DeprecationWarning):
assert_equal(np.fromstring("1,234", dtype=float, sep=" "),
np.array([1.]))
def test_fromstring_best_effort(self):
with assert_warns(DeprecationWarning):
assert_equal(np.fromstring("1,234", dtype=np.longdouble, sep=" "),
np.array([1.]))
def test_fromstring_foreign(self):
s = "1.234"
a = np.fromstring(s, dtype=np.longdouble, sep=" ")
assert_equal(a[0], np.longdouble(s))
def test_fromstring_foreign_sep(self):
a = np.array([1, 2, 3, 4])
b = np.fromstring("1,2,3,4,", dtype=np.longdouble, sep=",")
assert_array_equal(a, b)
def test_fromstring_foreign_value(self):
with assert_warns(DeprecationWarning):
b = np.fromstring("1,234", dtype=np.longdouble, sep=" ")
assert_array_equal(b[0], 1)
@pytest.mark.parametrize("int_val", [
# cases discussed in gh-10723
# and gh-9968
2 ** 1024, 0])
def test_longdouble_from_int(int_val):
# for issue gh-9968
str_val = str(int_val)
# we'll expect a RuntimeWarning on platforms
# with np.longdouble equivalent to np.double
# for large integer input
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', RuntimeWarning)
# can be inf==inf on some platforms
assert np.longdouble(int_val) == np.longdouble(str_val)
# we can't directly compare the int and
# max longdouble value on all platforms
if np.allclose(np.finfo(np.longdouble).max,
np.finfo(np.double).max) and w:
assert w[0].category is RuntimeWarning
@pytest.mark.parametrize("bool_val", [
True, False])
def test_longdouble_from_bool(bool_val):
assert np.longdouble(bool_val) == np.longdouble(int(bool_val))
| |
from datetime import datetime
import hashlib
from werkzeug.security import generate_password_hash, check_password_hash
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer
from markdown import markdown
import bleach
from flask import current_app, request, url_for
from flask_login import UserMixin, AnonymousUserMixin
from app.exceptions import ValidationError
from . import db, login_manager
class Permission:
FOLLOW = 0x01
COMMENT = 0x02
WRITE_ARTICLES = 0x04
MODERATE_COMMENTS = 0x08
ADMINISTER = 0x80
class Role(db.Model):
__tablename__ = 'roles'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64), unique=True)
default = db.Column(db.Boolean, default=False, index=True)
permissions = db.Column(db.Integer)
users = db.relationship('User', backref='role', lazy='dynamic')
@staticmethod
def insert_roles():
roles = {
'User': (Permission.FOLLOW |
Permission.COMMENT |
Permission.WRITE_ARTICLES, True),
'Moderator': (Permission.FOLLOW |
Permission.COMMENT |
Permission.WRITE_ARTICLES |
Permission.MODERATE_COMMENTS, False),
'Administrator': (0xff, False)
}
for r in roles:
role = Role.query.filter_by(name=r).first()
if role is None:
role = Role(name=r)
role.permissions = roles[r][0]
role.default = roles[r][1]
db.session.add(role)
db.session.commit()
def __repr__(self):
return '<Role %r>' % self.name
class Follow(db.Model):
__tablename__ = 'follows'
follower_id = db.Column(db.Integer, db.ForeignKey('users.id'),
primary_key=True)
followed_id = db.Column(db.Integer, db.ForeignKey('users.id'),
primary_key=True)
timestamp = db.Column(db.DateTime, default=datetime.utcnow)
class User(UserMixin, db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
email = db.Column(db.String(64), unique=True, index=True)
username = db.Column(db.String(64), unique=True, index=True)
role_id = db.Column(db.Integer, db.ForeignKey('roles.id'))
password_hash = db.Column(db.String(128))
confirmed = db.Column(db.Boolean, default=False)
name = db.Column(db.String(64))
location = db.Column(db.String(64))
about_me = db.Column(db.Text())
member_since = db.Column(db.DateTime(), default=datetime.utcnow)
last_seen = db.Column(db.DateTime(), default=datetime.utcnow)
avatar_hash = db.Column(db.String(32))
posts = db.relationship('Post', backref='author', lazy='dynamic')
followed = db.relationship('Follow',
foreign_keys=[Follow.follower_id],
backref=db.backref('follower', lazy='joined'),
lazy='dynamic',
cascade='all, delete-orphan')
followers = db.relationship('Follow',
foreign_keys=[Follow.followed_id],
backref=db.backref('followed', lazy='joined'),
lazy='dynamic',
cascade='all, delete-orphan')
comments = db.relationship('Comment', backref='author', lazy='dynamic')
@staticmethod
def generate_fake(count=100):
from sqlalchemy.exc import IntegrityError
from random import seed
import forgery_py
seed()
for i in range(count):
u = User(email=forgery_py.internet.email_address(),
username=forgery_py.internet.user_name(True),
password=forgery_py.lorem_ipsum.word(),
confirmed=True,
name=forgery_py.name.full_name(),
location=forgery_py.address.city(),
about_me=forgery_py.lorem_ipsum.sentence(),
member_since=forgery_py.date.date(True))
db.session.add(u)
try:
db.session.commit()
except IntegrityError:
db.session.rollback()
@staticmethod
def add_self_follows():
for user in User.query.all():
if not user.is_following(user):
user.follow(user)
db.session.add(user)
db.session.commit()
def __init__(self, **kwargs):
super(User, self).__init__(**kwargs)
if self.role is None:
if self.email == current_app.config['CIRCULATE_ADMIN']:
self.role = Role.query.filter_by(permissions=0xff).first()
if self.role is None:
self.role = Role.query.filter_by(default=True).first()
if self.email is not None and self.avatar_hash is None:
self.avatar_hash = hashlib.md5(
self.email.encode('utf-8')).hexdigest()
self.followed.append(Follow(followed=self))
@property
def password(self):
raise AttributeError('password is not a readable attribute')
@password.setter
def password(self, password):
self.password_hash = generate_password_hash(password)
def verify_password(self, password):
return check_password_hash(self.password_hash, password)
def generate_confirmation_token(self, expiration=3600):
s = Serializer(current_app.config['SECRET_KEY'], expiration)
return s.dumps({'confirm': self.id})
def confirm(self, token):
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
except:
return False
if data.get('confirm') != self.id:
return False
self.confirmed = True
db.session.add(self)
return True
def generate_reset_token(self, expiration=3600):
s = Serializer(current_app.config['SECRET_KEY'], expiration)
return s.dumps({'reset': self.id})
def reset_password(self, token, new_password):
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
except:
return False
if data.get('reset') != self.id:
return False
self.password = new_password
db.session.add(self)
return True
def generate_email_change_token(self, new_email, expiration=3600):
s = Serializer(current_app.config['SECRET_KEY'], expiration)
return s.dumps({'change_email': self.id, 'new_email': new_email})
def change_email(self, token):
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
except:
return False
if data.get('change_email') != self.id:
return False
new_email = data.get('new_email')
if new_email is None:
return False
if self.query.filter_by(email=new_email).first() is not None:
return False
self.email = new_email
self.avatar_hash = hashlib.md5(
self.email.encode('utf-8')).hexdigest()
db.session.add(self)
return True
def can(self, permissions):
return self.role is not None and \
(self.role.permissions & permissions) == permissions
def is_administrator(self):
return self.can(Permission.ADMINISTER)
def ping(self):
self.last_seen = datetime.utcnow()
db.session.add(self)
def gravatar(self, size=100, default='identicon', rating='g'):
if request.is_secure:
url = 'https://secure.gravatar.com/avatar'
else:
url = 'http://www.gravatar.com/avatar'
hash = self.avatar_hash or hashlib.md5(
self.email.encode('utf-8')).hexdigest()
return '{url}/{hash}?s={size}&d={default}&r={rating}'.format(
url=url, hash=hash, size=size, default=default, rating=rating)
def follow(self, user):
if not self.is_following(user):
f = Follow(follower=self, followed=user)
db.session.add(f)
def unfollow(self, user):
f = self.followed.filter_by(followed_id=user.id).first()
if f:
db.session.delete(f)
def is_following(self, user):
return self.followed.filter_by(
followed_id=user.id).first() is not None
def is_followed_by(self, user):
return self.followers.filter_by(
follower_id=user.id).first() is not None
@property
def followed_posts(self):
return Post.query.join(Follow, Follow.followed_id == Post.author_id)\
.filter(Follow.follower_id == self.id)
def to_json(self):
json_user = {
'url': url_for('api.get_user', id=self.id, _external=True),
'username': self.username,
'member_since': self.member_since,
'last_seen': self.last_seen,
'posts': url_for('api.get_user_posts', id=self.id, _external=True),
'followed_posts': url_for('api.get_user_followed_posts',
id=self.id, _external=True),
'post_count': self.posts.count()
}
return json_user
def generate_auth_token(self, expiration):
s = Serializer(current_app.config['SECRET_KEY'],
expires_in=expiration)
return s.dumps({'id': self.id}).decode('ascii')
@staticmethod
def verify_auth_token(token):
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
except:
return None
return User.query.get(data['id'])
def __repr__(self):
return '<User %r>' % self.username
class AnonymousUser(AnonymousUserMixin):
def can(self, permissions):
return False
def is_administrator(self):
return False
login_manager.anonymous_user = AnonymousUser
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
class Post(db.Model):
__tablename__ = 'posts'
id = db.Column(db.Integer, primary_key=True)
body = db.Column(db.Text)
body_html = db.Column(db.Text)
timestamp = db.Column(db.DateTime, index=True, default=datetime.utcnow)
author_id = db.Column(db.Integer, db.ForeignKey('users.id'))
comments = db.relationship('Comment', backref='post', lazy='dynamic')
@staticmethod
def generate_fake(count=100):
from random import seed, randint
import forgery_py
seed()
user_count = User.query.count()
for i in range(count):
u = User.query.offset(randint(0, user_count - 1)).first()
p = Post(body=forgery_py.lorem_ipsum.sentences(randint(1, 5)),
timestamp=forgery_py.date.date(True),
author=u)
db.session.add(p)
db.session.commit()
@staticmethod
def on_changed_body(target, value, oldvalue, initiator):
allowed_tags = ['a', 'abbr', 'acronym', 'b', 'blockquote', 'code',
'em', 'i', 'li', 'ol', 'pre', 'strong', 'ul',
'h1', 'h2', 'h3', 'p']
target.body_html = bleach.linkify(bleach.clean(
markdown(value, output_format='html'),
tags=allowed_tags, strip=True))
def to_json(self):
json_post = {
'url': url_for('api.get_post', id=self.id, _external=True),
'body': self.body,
'body_html': self.body_html,
'timestamp': self.timestamp,
'author': url_for('api.get_user', id=self.author_id,
_external=True),
'comments': url_for('api.get_post_comments', id=self.id,
_external=True),
'comment_count': self.comments.count()
}
return json_post
@staticmethod
def from_json(json_post):
body = json_post.get('body')
if body is None or body == '':
raise ValidationError('post does not have a body')
return Post(body=body)
db.event.listen(Post.body, 'set', Post.on_changed_body)
class Comment(db.Model):
__tablename__ = 'comments'
id = db.Column(db.Integer, primary_key=True)
body = db.Column(db.Text)
body_html = db.Column(db.Text)
timestamp = db.Column(db.DateTime, index=True, default=datetime.utcnow)
disabled = db.Column(db.Boolean)
author_id = db.Column(db.Integer, db.ForeignKey('users.id'))
post_id = db.Column(db.Integer, db.ForeignKey('posts.id'))
@staticmethod
def on_changed_body(target, value, oldvalue, initiator):
allowed_tags = ['a', 'abbr', 'acronym', 'b', 'code', 'em', 'i',
'strong']
target.body_html = bleach.linkify(bleach.clean(
markdown(value, output_format='html'),
tags=allowed_tags, strip=True))
def to_json(self):
json_comment = {
'url': url_for('api.get_comment', id=self.id, _external=True),
'post': url_for('api.get_post', id=self.post_id, _external=True),
'body': self.body,
'body_html': self.body_html,
'timestamp': self.timestamp,
'author': url_for('api.get_user', id=self.author_id,
_external=True),
}
return json_comment
@staticmethod
def from_json(json_comment):
body = json_comment.get('body')
if body is None or body == '':
raise ValidationError('comment does not have a body')
return Comment(body=body)
db.event.listen(Comment.body, 'set', Comment.on_changed_body)
| |
#!/usr/bin/env python
'''
Gimp plugin.
Transfer style (color and surface texture) from a source image to the active, target image.
Requires resynthesizer plug-in.
Author:
lloyd konneker, lkk
Version:
1.0 lkk 7/15/2010 Initial version. Released to Gimp Registry.
1.1 lkk 8/1/2010 Unreleased
1.2 lkk 8/10/2010
Change log:
_________________
1.1
Bug: Fixed test of mode variable, since it is a string, needs explicit test for == 1
Bug: Added remove Selection Mask copy channel in make_grayscale_map
1.2
Changes for new resynthesizer: no need to synchronize, remove alphas
Fixed improper adjustment of contrast of source: only adjust source map.
TODO
a quality setting that changes the parameters to resynth
License:
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
The GNU Public License is available at
http://www.gnu.org/copyleft/gpl.html
Users Guide
___________
What this plugin does:
Transfers artistic style from one image to another. Often the source is an artistic image and the target is a realistic, photo image. But you can also transfer between artistic images or between realistic images.
An artist might say this plugin "renders in the media and style from another image." A computer user might say it "renders in the theme of another image."
Transferring style means transferring small scale features (color and texture) to an image while retaining large scale features (objects.)
Map can mean "transfer and transform". This plugin gives limited control of the transform. That is, colors are usually mapped to similar colors (hues.) This plugin is not intended to do "false color" (but it might have that effect.)
Style can mean "color and surface." Texture mapping usually means just surface (pattern of brightness, e.g. a weave or grain.) This plugin can transfer both color and surface.
This plugin has more effect than just an overlay or screen or a map. A screen usually applies a texture uniformly across an image. This plugin transfers style in patches. The style in a region can come from any patch of the source, or be synthesized (mixed) from many patches of the source.
The transfer is not exactly a copy, again because of optional synthesis or mixing.
About the selection:
Usually you transfer between separate images, the target and source images. You can make a selection in either image, or both. If there is no selection, the plugin uses the entire layer.
The target is the active LAYER and you can choose the source LAYER. Note that the plugin doesn't use everything visible in an image, just one layer.
SPECIAL CASE: If the target and source layers are in the same image, the source style comes from the inverse of the selection in the source layer. Similarly, if the target and source layers are the same layer, the target is the selection and the style comes from the inverse of the selection, i.e. outside the selection. In this case, the effect is little if there is no difference in texture between the inside and outside of the selection, or a distort, if there is a difference.
About the settings:
"Percent transfer:" how much style to transfer. Less transfer means the effect retains the large scale objects of the original, but gives the image a grainy surface. More transfer means the effect leaves only a ghost of the large scale objects, and almost fully copies the style image (with less synthesis or mixing.)
"Map by:" whether color affects the style transfer, when both target and source are in color. If you choose "color and brightness", style colors are more apt to be transferred to areas with same colors. However, it is still possible that colors are radically transformed, if the surface (brightness pattern) is a better match. If you choose "brightness only", style colors are more apt to be radically transformed.
This setting has less effect if there are no color matches between source and target (e.g. one is all red and the other is all green) or if the target image is GRAY. This setting has NO effect if the source image or both images are GRAY.
About image modes:
You can transfer style between any combination of RGB and GRAY images. The plugin changes the mode of the target to the mode of the source as necessary.
Why this plugin:
This plugin is a front-end to the separate resynthesizer plugin. This plugin simplifies using the resynthesizer plugin. It automates many steps. It hides several complexities of the resynthesizer plugin: selection, modes, alpha channels, and settings.
Programming notes:
_________________
IN: The active image and layer.
The selection in the active image.
The selection in any layers chosen for source.
OUT: The active image, altered. The source is unaltered.
Target mode can be altered, but with the implied consent of the user.
The print stmts go to the console, info to advanced users and debuggers.
This plugin is mostly about UI and simplifications for user (the engine does the image processing):
making maps automatically
synchronization of alphas (note the new resynthesizer ignores alphas.)
synchronization of modes
abstracting the settings
contrast adjustment
'''
from gimpfu import *
from math import acos
gettext.install("resynthesizer", gimp.locale_directory, unicode=True)
# True if you want to display and retain working, temporary images
debug = False
def display_debug_image(image) :
if debug :
try:
pdb.gimp_display_new(image)
pdb.gimp_displays_flush()
except RuntimeError:
pass # if run-mode not interactive, Gimp throws
def make_grayscale_map(image, drawable):
'''
Make a grayscale copy for a map.
Maps must be same size as their parent image.
If image is already grayscale, return it without copying.
Maps don't need a selection, since the resynthesizer looks at parent drawables for the selection.
'''
if pdb.gimp_image_base_type(image) == GRAY :
return image, drawable
# Save selection, copy entire image, and restore
original_selection = pdb.gimp_selection_save(image)
pdb.gimp_selection_all(image) # copy requires selection
pdb.gimp_edit_copy(drawable)
if original_selection:
pdb.gimp_selection_load(original_selection) # restore selection in image
pdb.gimp_image_remove_channel(image, original_selection) # cleanup the copied selection mask
# !!! Note remove_channel not drawable_delete
# Make a copy, greyscale
temp_image = pdb.gimp_edit_paste_as_new()
pdb.gimp_image_convert_grayscale(temp_image)
display_debug_image(temp_image)
temp_drawable = pdb.gimp_image_get_active_drawable(temp_image)
return temp_image, temp_drawable
def synchronize_modes(target_image, source_image) :
'''
User-friendliness:
If mode of target is not equal to mode of source source, change modes.
Resynthesizer requires target and source to be same mode.
Assert target is RGB or GRAY (since is precondition of plugin.)
UI decision: make this quiet, presume user intends mode change.
But don't permanently change mode of source.
Always upgrade GRAY to RGB, not downgrade RGB to GRAY.
'''
target_mode = pdb.gimp_image_base_type(target_image)
source_mode = pdb.gimp_image_base_type(source_image)
if target_mode != source_mode :
# print("Map style: converted mode\n.")
if target_mode == GRAY:
pdb.gimp_image_convert_rgb(target_image)
else : # target is RGB and source is GRAY
# Assert only convert a copy of source,
# user NEVER intends original source be altered.
pdb.gimp_image_convert_rgb(source_image)
'''
Not used
'''
"""
def synchronize_alphas(target_drawable, source_drawable) :
'''
User-friendliness:
If source has alpha and target doesn't, remove or add alpha to source.
Do this without user dialog since it is done on copies, and really, the alpha doesn't matter.
'''
if pdb.gimp_drawable_has_alpha(source_drawable) :
if not pdb.gimp_drawable_has_alpha(target_drawable) :
# Should never get here, since removed alpha from source_drawable copy earlier
print "Adding alpha channel to target image since style source image has alpha."
pdb.gimp_layer_add_alpha (target_drawable)
else: # source has no alpha
if pdb.gimp_drawable_has_alpha(target_drawable) :
print "Map style: Adding alpha channel to style source image copy since target image has alpha."
pdb.gimp_layer_add_alpha (source_drawable)
"""
def copy_selection_to_image(drawable) :
'''
If image has a selection, copy selection to new image, and prepare it for resynthesizer,
else return a copy of the entire source image.
This is called for the source image, where it helps performance to reduce size and flatten.
'''
image = pdb.gimp_drawable_get_image(drawable)
# copy selection or whole image
pdb.gimp_edit_copy(drawable)
image_copy = pdb.gimp_edit_paste_as_new()
# Activate layer, and remove alpha channel
pdb.gimp_image_flatten(image_copy)
layer_copy = pdb.gimp_image_get_active_layer(image_copy)
# In earlier version, futzed with selection to deal with transparencey
display_debug_image(image_copy)
return image_copy, layer_copy
def synchronize_contrast( drawable, source_drawable, percent_transfer) :
'''
Adjust contrast of source, to match target.
Adjustment depends inversely on percent_transfer.
Very crude histogram matching.
'''
# histogram upper half: typical mean is 191 (3/4*255). Skew of mean towards 255 means high contrast.
mean, deviation, median, pixels, count, percentile = pdb.gimp_histogram(drawable, HISTOGRAM_VALUE, 128, 255)
source_mean, source_deviation, source_median, pixels, count, percentile = pdb.gimp_histogram(
source_drawable, HISTOGRAM_VALUE, 128, 255)
# if mean > source_mean: # target has more contrast than source
# Adjust contrast of source.
# Inversely proportional to percent transfer.
# 2.5 is from experimentation with gimp_brightness_contrast which seems linear in its effect.
contrast_control = (mean - source_mean) * 2.5 * (1 - (percent_transfer / 100))
# clamp to valid range (above formula is lazy, ad hoc)
if contrast_control < -127: contrast_control = -127
if contrast_control > 127: contrast_control = 127
pdb.gimp_brightness_contrast(source_drawable, 0, contrast_control)
# For experimentation, print new values
source_mean, source_deviation, source_median, pixels, count, percentile = pdb.gimp_histogram(
source_drawable, HISTOGRAM_VALUE, 128, 255)
# print "Map style: Source contrast changed by ", contrast_control
# print "Map style: Target and source upper half histogram means", mean, source_mean
def calculate_map_weight(percent_transfer) :
'''
This is a GUI design discussion.
Transform percent_transfer to map_weight parameter to resynthesizer.
For resynthesizer:
map weight 0 means copy source to target, meaning ALL style.
map weight 0.5 means just a grainy transfer of style (as little as is possible.)
Transform from a linear percent GUI, because user more comfortable than with a ratio [.5, 0]
which is backwards to the usual *less on the left*.
By experiment, a sinusoid gives good results for linearizing the non-linear map_weight control.
'''
return acos((percent_transfer/100)*2 -1)/(2*3.14)
def transfer_style(image, drawable, source_drawable, percent_transfer, map_mode ):
'''
Main body of plugin to transfer style from one image to another.
!!! Note map_mode is type string, "if map_mode:" will not work.
'''
pdb.gimp_image_undo_group_start(image)
# Get image of source drawable
source_image = pdb.gimp_drawable_get_image(source_drawable)
'''
User-friendliness.
Note the drawable chooser widget in Pygimp does not allow us to prefilter INDEXED mode.
So check here and give a warning.
'''
# These are the originals base types, and this plugin might change the base types
original_source_base_type = pdb.gimp_image_base_type(source_image)
original_target_base_type = pdb.gimp_image_base_type(image)
if original_source_base_type == INDEXED :
pdb.gimp_message(_("The style source cannot be of mode INDEXED"));
return
if image == source_image and drawable == source_drawable:
is_source_copy = False
'''
If source is same as target,
then the old resynthesizer required a selection (engine used inverse selection for corpus).
New resynthesizer doesn't need a selection.
If source same as target, effect is similar to a blur.
'''
# assert modes and alphas are same (since they are same layer!)
else: # target layer is not the source layer (source could be a copy of target, but effect is none)
# Copy source always, for performance, and for possible mode change.
is_source_copy = True
source_image, source_drawable = copy_selection_to_image(source_drawable)
# Futz with modes if necessary.
synchronize_modes(image, source_image)
'''
Old resythesizer required both images to have alpha, or neither.
synchronize_alphas( drawable, source_drawable)
'''
'''
TODO For performance, if there is a selection in target, it would be better to copy
selection to a new layer, and later merge it back (since resynthesizer engine reads
entire target into memory. Low priority since rarely does user make a selection in target.
'''
'''
!!! Note this plugin always sends maps to the resynthesizer,
and the "percent transfer" setting is always effective.
However, maps may not be separate,copied images unless converted to grayscale.
'''
# Copy and reduce maps to grayscale: at the option of the user
# !!! Or if the target was GRAY and source is RGB, in which case maps give a better result.
# Note that if the target was GRAY, we already upgraded it to RGB.
if map_mode == 1 or (original_source_base_type == RGB and original_target_base_type == GRAY) :
# print "Map style: source mode: ", original_source_base_type, " target mode: ", original_target_base_type
# print "Map style: Converting maps to grayscale"
# Convert mode, but in new temp image and drawable
target_map_image, target_map_drawable = make_grayscale_map(image, drawable)
source_map_image, source_map_drawable = make_grayscale_map(source_image, source_drawable)
target_map = target_map_drawable
source_map = source_map_drawable
# later, delete temp images
# User control: adjust contrast of source_map as a function of percent transfer
# Hard to explain why, but experimentation shows result more like user expectation.
# TODO This could be improved.
# !!! Don't change the original source, only a temporary map we created
synchronize_contrast( drawable, source_map, percent_transfer)
else :
# !!! Maps ARE the target and source, not copies
source_map = source_drawable
target_map = drawable
'''
Parameters to resynthesizer:
htile and vtile = 1 since it reduces artifacts around edge
map_weight I linearize since easier on users than an exponential
use_border = 1 since there might be a selection and context (outside target).
9 neighbors (a 3x3 patch) and 200 tries for speed
'''
map_weight = calculate_map_weight(percent_transfer)
# !!! This is for version of resynthesizer, with an uninverted selection
pdb.plug_in_resynthesizer(image, drawable, 1, 1, 1, source_drawable.ID, source_map.ID, target_map.ID, map_weight, 0.117, 9, 200)
# Clean up.
# Delete working images: separate map images and copy of source image
if not debug:
if map_mode == 1: # if made working map images
pdb.gimp_image_delete(target_map_image)
pdb.gimp_image_delete(source_map_image)
if is_source_copy: # if created a copy earlier
pdb.gimp_image_delete(source_image)
pdb.gimp_image_undo_group_end(image)
pdb.gimp_displays_flush()
register(
"python_fu_map_style",
N_("Transfer style (color and surface) from a chosen source to the active layer. "),
"Transforms image using art media and style from another image. Maps or synthesizes texture or theme from one image onto another. Requires separate resynthesizer plugin.",
"Lloyd Konneker (bootch nc.rr.com)",
"Copyright 2010 Lloyd Konneker",
"2010",
N_("Style..."),
"RGB*, GRAY*",
[
(PF_IMAGE, "image", "Input image", None),
(PF_DRAWABLE, "drawable", "Input drawable", None),
(PF_DRAWABLE, "source_drawable", _("Source of style:"), None),
(PF_SLIDER, "percent_transfer", _("Percent transfer:"), 0, (10, 90, 10.0)),
(PF_RADIO, "map_mode", _("Map by:"), 0, ((_("Color and brightness"), 0),(_("Brightness only"),1)))
],
[],
transfer_style,
menu="<Image>/Filters/Map",
domain=("resynthesizer", gimp.locale_directory)
)
main()
| |
# -*- test-case-name: twisted._threads.test.test_team -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Implementation of a L{Team} of workers; a thread-pool that can allocate work to
workers.
"""
from __future__ import absolute_import, division, print_function
from collections import deque
from zope.interface import implementer
from . import IWorker
from ._convenience import Quit
class Statistics(object):
"""
Statistics about a L{Team}'s current activity.
@ivar idleWorkerCount: The number of idle workers.
@type idleWorkerCount: L{int}
@ivar busyWorkerCount: The number of busy workers.
@type busyWorkerCount: L{int}
@ivar backloggedWorkCount: The number of work items passed to L{Team.do}
which have not yet been sent to a worker to be performed because not
enough workers are available.
@type backloggedWorkCount: L{int}
"""
def __init__(self, idleWorkerCount, busyWorkerCount,
backloggedWorkCount):
self.idleWorkerCount = idleWorkerCount
self.busyWorkerCount = busyWorkerCount
self.backloggedWorkCount = backloggedWorkCount
@implementer(IWorker)
class Team(object):
"""
A composite L{IWorker} implementation.
@ivar _quit: A L{Quit} flag indicating whether this L{Team} has been quit
yet. This may be set by an arbitrary thread since L{Team.quit} may be
called from anywhere.
@ivar _coordinator: the L{IExclusiveWorker} coordinating access to this
L{Team}'s internal resources.
@ivar _createWorker: a callable that will create new workers.
@ivar _logException: a 0-argument callable called in an exception context
when there is an unhandled error from a task passed to L{Team.do}
@ivar _idle: a L{set} of idle workers.
@ivar _busyCount: the number of workers currently busy.
@ivar _pending: a C{deque} of tasks - that is, 0-argument callables passed
to L{Team.do} - that are outstanding.
@ivar _shouldQuitCoordinator: A flag indicating that the coordinator should
be quit at the next available opportunity. Unlike L{Team._quit}, this
flag is only set by the coordinator.
@ivar _toShrink: the number of workers to shrink this L{Team} by at the
next available opportunity; set in the coordinator.
"""
def __init__(self, coordinator, createWorker, logException):
"""
@param coordinator: an L{IExclusiveWorker} which will coordinate access
to resources on this L{Team}; that is to say, an
L{IExclusiveWorker} whose C{do} method ensures that its given work
will be executed in a mutually exclusive context, not in parallel
with other work enqueued by C{do} (although possibly in parallel
with the caller).
@param createWorker: A 0-argument callable that will create an
L{IWorker} to perform work.
@param logException: A 0-argument callable called in an exception
context when the work passed to C{do} raises an exception.
"""
self._quit = Quit()
self._coordinator = coordinator
self._createWorker = createWorker
self._logException = logException
# Don't touch these except from the coordinator.
self._idle = set()
self._busyCount = 0
self._pending = deque()
self._shouldQuitCoordinator = False
self._toShrink = 0
def statistics(self):
"""
Gather information on the current status of this L{Team}.
@return: a L{Statistics} describing the current state of this L{Team}.
"""
return Statistics(len(self._idle), self._busyCount, len(self._pending))
def grow(self, n):
"""
Increase the the number of idle workers by C{n}.
@param n: The number of new idle workers to create.
@type n: L{int}
"""
self._quit.check()
@self._coordinator.do
def createOneWorker():
for x in range(n):
worker = self._createWorker()
if worker is None:
return
self._recycleWorker(worker)
def shrink(self, n=None):
"""
Decrease the number of idle workers by C{n}.
@param n: The number of idle workers to shut down, or L{None} (or
unspecified) to shut down all workers.
@type n: L{int} or L{None}
"""
self._quit.check()
self._coordinator.do(lambda: self._quitIdlers(n))
def _quitIdlers(self, n=None):
"""
The implmentation of C{shrink}, performed by the coordinator worker.
@param n: see L{Team.shrink}
"""
if n is None:
n = len(self._idle) + self._busyCount
for x in range(n):
if self._idle:
self._idle.pop().quit()
else:
self._toShrink += 1
if self._shouldQuitCoordinator and self._busyCount == 0:
self._coordinator.quit()
def do(self, task):
"""
Perform some work in a worker created by C{createWorker}.
@param task: the callable to run
"""
self._quit.check()
self._coordinator.do(lambda: self._coordinateThisTask(task))
def _coordinateThisTask(self, task):
"""
Select a worker to dispatch to, either an idle one or a new one, and
perform it.
This method should run on the coordinator worker.
@param task: the task to dispatch
@type task: 0-argument callable
"""
worker = (self._idle.pop() if self._idle
else self._createWorker())
if worker is None:
# The createWorker method may return None if we're out of resources
# to create workers.
self._pending.append(task)
return
self._busyCount += 1
@worker.do
def doWork():
try:
task()
except:
self._logException()
@self._coordinator.do
def idleAndPending():
self._busyCount -= 1
self._recycleWorker(worker)
def _recycleWorker(self, worker):
"""
Called only from coordinator.
Recycle the given worker into the idle pool.
@param worker: a worker created by C{createWorker} and now idle.
@type worker: L{IWorker}
"""
self._idle.add(worker)
if self._pending:
# Re-try the first enqueued thing.
# (Explicitly do _not_ honor _quit.)
self._coordinateThisTask(self._pending.popleft())
elif self._shouldQuitCoordinator:
self._quitIdlers()
elif self._toShrink > 0:
self._toShrink -= 1
self._idle.remove(worker)
worker.quit()
def quit(self):
"""
Stop doing work and shut down all idle workers.
"""
self._quit.set()
# In case all the workers are idle when we do this.
@self._coordinator.do
def startFinishing():
self._shouldQuitCoordinator = True
self._quitIdlers()
| |
# This is a work in progress - see Demos/win32gui_menu.py
# win32gui_struct.py - helpers for working with various win32gui structures.
# As win32gui is "light-weight", it does not define objects for all possible
# win32 structures - in general, "buffer" objects are passed around - it is
# the callers responsibility to pack the buffer in the correct format.
#
# This module defines some helpers for the commonly used structures.
#
# In general, each structure has 3 functions:
#
# buffer, extras = PackSTRUCTURE(items, ...)
# item, ... = UnpackSTRUCTURE(buffer)
# buffer, extras = EmtpySTRUCTURE(...)
#
# 'extras' is always items that must be held along with the buffer, as the
# buffer refers to these object's memory.
# For structures that support a 'mask', this mask is hidden from the user - if
# 'None' is passed, the mask flag will not be set, or on return, None will
# be returned for the value if the mask is not set.
#
# NOTE: I considered making these structures look like real classes, and
# support 'attributes' etc - however, ctypes already has a good structure
# mechanism - I think it makes more sense to support ctype structures
# at the win32gui level, then there will be no need for this module at all.
# XXX - the above makes sense in terms of what is built and passed to
# win32gui (ie, the Pack* functions) - but doesn't make as much sense for
# the Unpack* functions, where the aim is user convenience.
import sys
import win32gui
import win32con
import struct
import array
import commctrl
import pywintypes
is64bit = "64 bit" in sys.version
try:
from collections import namedtuple
def _MakeResult(names_str, values):
names = names_str.split()
nt = namedtuple(names[0], names[1:])
return nt(*values)
except ImportError:
# no namedtuple support - just return the values as a normal tuple.
def _MakeResult(names_str, values):
return values
_nmhdr_fmt = "PPi"
if is64bit:
# When the item past the NMHDR gets aligned (eg, when it is a struct)
# we need this many bytes padding.
_nmhdr_align_padding = "xxxx"
else:
_nmhdr_align_padding = ""
# Encode a string suitable for passing in a win32gui related structure
# If win32gui is built with UNICODE defined (ie, py3k), then functions
# like InsertMenuItem are actually calling InsertMenuItemW etc, so all
# strings will need to be unicode.
if win32gui.UNICODE:
def _make_text_buffer(text):
# XXX - at this stage win32gui.UNICODE is only True in py3k,
# and in py3k is makes sense to reject bytes.
if not isinstance(text, str):
raise TypeError('MENUITEMINFO text must be unicode')
data = (text+'\0').encode("unicode-internal")
return array.array("b", data)
else:
def _make_text_buffer(text):
if isinstance(text, str):
text = text.encode("mbcs")
return array.array("b", text+'\0')
# make an 'empty' buffer, ready for filling with cch characters.
def _make_empty_text_buffer(cch):
return _make_text_buffer("\0" * cch)
if sys.version_info < (3,0):
def _make_memory(ob):
return str(buffer(ob))
def _make_bytes(sval):
return sval
else:
def _make_memory(ob):
return bytes(memoryview(ob))
def _make_bytes(sval):
return sval.encode('ascii')
# Generic WM_NOTIFY unpacking
def UnpackWMNOTIFY(lparam):
format = "PPi"
buf = win32gui.PyGetMemory(lparam, struct.calcsize(format))
return _MakeResult("WMNOTIFY hwndFrom idFrom code", struct.unpack(format, buf))
def UnpackNMITEMACTIVATE(lparam):
format = _nmhdr_fmt + _nmhdr_align_padding
if is64bit:
# the struct module doesn't handle this correctly as some of the items
# are actually structs in structs, which get individually aligned.
format = format + "iiiiiiixxxxP"
else:
format = format + "iiiiiiiP"
buf = win32gui.PyMakeBuffer(struct.calcsize(format), lparam)
return _MakeResult("NMITEMACTIVATE hwndFrom idFrom code iItem iSubItem uNewState uOldState uChanged actionx actiony lParam",
struct.unpack(format, buf))
# MENUITEMINFO struct
# http://msdn.microsoft.com/library/default.asp?url=/library/en-us/winui/WinUI/WindowsUserInterface/Resources/Menus/MenuReference/MenuStructures/MENUITEMINFO.asp
# We use the struct module to pack and unpack strings as MENUITEMINFO
# structures. We also have special handling for the 'fMask' item in that
# structure to avoid the caller needing to explicitly check validity
# (None is used if the mask excludes/should exclude the value)
_menuiteminfo_fmt = '5i5PiP'
def PackMENUITEMINFO(fType=None, fState=None, wID=None, hSubMenu=None,
hbmpChecked=None, hbmpUnchecked=None, dwItemData=None,
text=None, hbmpItem=None, dwTypeData=None):
# 'extras' are objects the caller must keep a reference to (as their
# memory is used) for the lifetime of the INFO item.
extras = []
# ack - dwItemData and dwTypeData were confused for a while...
assert dwItemData is None or dwTypeData is None, \
"sorry - these were confused - you probably want dwItemData"
# if we are a long way past 209, then we can nuke the above...
if dwTypeData is not None:
import warnings
warnings.warn("PackMENUITEMINFO: please use dwItemData instead of dwTypeData")
if dwItemData is None:
dwItemData = dwTypeData or 0
fMask = 0
if fType is None: fType = 0
else: fMask |= win32con.MIIM_FTYPE
if fState is None: fState = 0
else: fMask |= win32con.MIIM_STATE
if wID is None: wID = 0
else: fMask |= win32con.MIIM_ID
if hSubMenu is None: hSubMenu = 0
else: fMask |= win32con.MIIM_SUBMENU
if hbmpChecked is None:
assert hbmpUnchecked is None, \
"neither or both checkmark bmps must be given"
hbmpChecked = hbmpUnchecked = 0
else:
assert hbmpUnchecked is not None, \
"neither or both checkmark bmps must be given"
fMask |= win32con.MIIM_CHECKMARKS
if dwItemData is None: dwItemData = 0
else: fMask |= win32con.MIIM_DATA
if hbmpItem is None: hbmpItem = 0
else: fMask |= win32con.MIIM_BITMAP
if text is not None:
fMask |= win32con.MIIM_STRING
str_buf = _make_text_buffer(text)
cch = len(text)
# We are taking address of strbuf - it must not die until windows
# has finished with our structure.
lptext = str_buf.buffer_info()[0]
extras.append(str_buf)
else:
lptext = 0
cch = 0
# Create the struct.
# 'P' format does not accept PyHANDLE's !
item = struct.pack(
_menuiteminfo_fmt,
struct.calcsize(_menuiteminfo_fmt), # cbSize
fMask,
fType,
fState,
wID,
int(hSubMenu),
int(hbmpChecked),
int(hbmpUnchecked),
dwItemData,
lptext,
cch,
int(hbmpItem)
)
# Now copy the string to a writable buffer, so that the result
# could be passed to a 'Get' function
return array.array("b", item), extras
def UnpackMENUITEMINFO(s):
(cb,
fMask,
fType,
fState,
wID,
hSubMenu,
hbmpChecked,
hbmpUnchecked,
dwItemData,
lptext,
cch,
hbmpItem) = struct.unpack(_menuiteminfo_fmt, s)
assert cb==len(s)
if fMask & win32con.MIIM_FTYPE==0: fType = None
if fMask & win32con.MIIM_STATE==0: fState = None
if fMask & win32con.MIIM_ID==0: wID = None
if fMask & win32con.MIIM_SUBMENU==0: hSubMenu = None
if fMask & win32con.MIIM_CHECKMARKS==0: hbmpChecked = hbmpUnchecked = None
if fMask & win32con.MIIM_DATA==0: dwItemData = None
if fMask & win32con.MIIM_BITMAP==0: hbmpItem = None
if fMask & win32con.MIIM_STRING:
text = win32gui.PyGetString(lptext, cch)
else:
text = None
return _MakeResult("MENUITEMINFO fType fState wID hSubMenu hbmpChecked "
"hbmpUnchecked dwItemData text hbmpItem",
(fType, fState, wID, hSubMenu, hbmpChecked, hbmpUnchecked, \
dwItemData, text, hbmpItem))
def EmptyMENUITEMINFO(mask = None, text_buf_size=512):
# text_buf_size is number of *characters* - not necessarily no of bytes.
extra = []
if mask is None:
mask = win32con.MIIM_BITMAP | win32con.MIIM_CHECKMARKS | \
win32con.MIIM_DATA | win32con.MIIM_FTYPE | \
win32con.MIIM_ID | win32con.MIIM_STATE | \
win32con.MIIM_STRING | win32con.MIIM_SUBMENU
# Note: No MIIM_TYPE - this screws win2k/98.
if mask & win32con.MIIM_STRING:
text_buffer = _make_empty_text_buffer(text_buf_size)
extra.append(text_buffer)
text_addr, _ = text_buffer.buffer_info()
else:
text_addr = text_buf_size = 0
# Now copy the string to a writable buffer, so that the result
# could be passed to a 'Get' function
buf = struct.pack(
_menuiteminfo_fmt,
struct.calcsize(_menuiteminfo_fmt), # cbSize
mask,
0, #fType,
0, #fState,
0, #wID,
0, #hSubMenu,
0, #hbmpChecked,
0, #hbmpUnchecked,
0, #dwItemData,
text_addr,
text_buf_size,
0, #hbmpItem
)
return array.array("b", buf), extra
# MENUINFO struct
_menuinfo_fmt = 'iiiiPiP'
def PackMENUINFO(dwStyle = None, cyMax = None,
hbrBack = None, dwContextHelpID = None, dwMenuData = None,
fMask = 0):
if dwStyle is None: dwStyle = 0
else: fMask |= win32con.MIM_STYLE
if cyMax is None: cyMax = 0
else: fMask |= win32con.MIM_MAXHEIGHT
if hbrBack is None: hbrBack = 0
else: fMask |= win32con.MIM_BACKGROUND
if dwContextHelpID is None: dwContextHelpID = 0
else: fMask |= win32con.MIM_HELPID
if dwMenuData is None: dwMenuData = 0
else: fMask |= win32con.MIM_MENUDATA
# Create the struct.
item = struct.pack(
_menuinfo_fmt,
struct.calcsize(_menuinfo_fmt), # cbSize
fMask,
dwStyle,
cyMax,
hbrBack,
dwContextHelpID,
dwMenuData)
return array.array("b", item)
def UnpackMENUINFO(s):
(cb,
fMask,
dwStyle,
cyMax,
hbrBack,
dwContextHelpID,
dwMenuData) = struct.unpack(_menuinfo_fmt, s)
assert cb==len(s)
if fMask & win32con.MIM_STYLE==0: dwStyle = None
if fMask & win32con.MIM_MAXHEIGHT==0: cyMax = None
if fMask & win32con.MIM_BACKGROUND==0: hbrBack = None
if fMask & win32con.MIM_HELPID==0: dwContextHelpID = None
if fMask & win32con.MIM_MENUDATA==0: dwMenuData = None
return _MakeResult("MENUINFO dwStyle cyMax hbrBack dwContextHelpID dwMenuData",
(dwStyle, cyMax, hbrBack, dwContextHelpID, dwMenuData))
def EmptyMENUINFO(mask = None):
if mask is None:
mask = win32con.MIM_STYLE | win32con.MIM_MAXHEIGHT| \
win32con.MIM_BACKGROUND | win32con.MIM_HELPID | \
win32con.MIM_MENUDATA
buf = struct.pack(
_menuinfo_fmt,
struct.calcsize(_menuinfo_fmt), # cbSize
mask,
0, #dwStyle
0, #cyMax
0, #hbrBack,
0, #dwContextHelpID,
0, #dwMenuData,
)
return array.array("b", buf)
##########################################################################
#
# Tree View structure support - TVITEM, TVINSERTSTRUCT and TVDISPINFO
#
##########################################################################
# XXX - Note that the following implementation of TreeView structures is ripped
# XXX - from the SpamBayes project. It may not quite work correctly yet - I
# XXX - intend checking them later - but having them is better than not at all!
_tvitem_fmt = "iPiiPiiiiP"
# Helpers for the ugly win32 structure packing/unpacking
# XXX - Note that functions using _GetMaskAndVal run 3x faster if they are
# 'inlined' into the function - see PackLVITEM. If the profiler points at
# _GetMaskAndVal(), you should nuke it (patches welcome once they have been
# tested)
def _GetMaskAndVal(val, default, mask, flag):
if val is None:
return mask, default
else:
if flag is not None:
mask |= flag
return mask, val
def PackTVINSERTSTRUCT(parent, insertAfter, tvitem):
tvitem_buf, extra = PackTVITEM(*tvitem)
tvitem_buf = tvitem_buf.tostring()
format = "PP%ds" % len(tvitem_buf)
return struct.pack(format, parent, insertAfter, tvitem_buf), extra
def PackTVITEM(hitem, state, stateMask, text, image, selimage, citems, param):
extra = [] # objects we must keep references to
mask = 0
mask, hitem = _GetMaskAndVal(hitem, 0, mask, commctrl.TVIF_HANDLE)
mask, state = _GetMaskAndVal(state, 0, mask, commctrl.TVIF_STATE)
if not mask & commctrl.TVIF_STATE:
stateMask = 0
mask, text = _GetMaskAndVal(text, None, mask, commctrl.TVIF_TEXT)
mask, image = _GetMaskAndVal(image, 0, mask, commctrl.TVIF_IMAGE)
mask, selimage = _GetMaskAndVal(selimage, 0, mask, commctrl.TVIF_SELECTEDIMAGE)
mask, citems = _GetMaskAndVal(citems, 0, mask, commctrl.TVIF_CHILDREN)
mask, param = _GetMaskAndVal(param, 0, mask, commctrl.TVIF_PARAM)
if text is None:
text_addr = text_len = 0
else:
text_buffer = _make_text_buffer(text)
text_len = len(text)
extra.append(text_buffer)
text_addr, _ = text_buffer.buffer_info()
buf = struct.pack(_tvitem_fmt,
mask, hitem,
state, stateMask,
text_addr, text_len, # text
image, selimage,
citems, param)
return array.array("b", buf), extra
# Make a new buffer suitable for querying hitem's attributes.
def EmptyTVITEM(hitem, mask = None, text_buf_size=512):
extra = [] # objects we must keep references to
if mask is None:
mask = commctrl.TVIF_HANDLE | commctrl.TVIF_STATE | commctrl.TVIF_TEXT | \
commctrl.TVIF_IMAGE | commctrl.TVIF_SELECTEDIMAGE | \
commctrl.TVIF_CHILDREN | commctrl.TVIF_PARAM
if mask & commctrl.TVIF_TEXT:
text_buffer = _make_empty_text_buffer(text_buf_size)
extra.append(text_buffer)
text_addr, _ = text_buffer.buffer_info()
else:
text_addr = text_buf_size = 0
buf = struct.pack(_tvitem_fmt,
mask, hitem,
0, 0,
text_addr, text_buf_size, # text
0, 0,
0, 0)
return array.array("b", buf), extra
def UnpackTVITEM(buffer):
item_mask, item_hItem, item_state, item_stateMask, \
item_textptr, item_cchText, item_image, item_selimage, \
item_cChildren, item_param = struct.unpack(_tvitem_fmt, buffer)
# ensure only items listed by the mask are valid (except we assume the
# handle is always valid - some notifications (eg, TVN_ENDLABELEDIT) set a
# mask that doesn't include the handle, but the docs explicity say it is.)
if not (item_mask & commctrl.TVIF_TEXT): item_textptr = item_cchText = None
if not (item_mask & commctrl.TVIF_CHILDREN): item_cChildren = None
if not (item_mask & commctrl.TVIF_IMAGE): item_image = None
if not (item_mask & commctrl.TVIF_PARAM): item_param = None
if not (item_mask & commctrl.TVIF_SELECTEDIMAGE): item_selimage = None
if not (item_mask & commctrl.TVIF_STATE): item_state = item_stateMask = None
if item_textptr:
text = win32gui.PyGetString(item_textptr)
else:
text = None
return _MakeResult("TVITEM item_hItem item_state item_stateMask "
"text item_image item_selimage item_cChildren item_param",
(item_hItem, item_state, item_stateMask, text,
item_image, item_selimage, item_cChildren, item_param))
# Unpack the lparm from a "TVNOTIFY" message
def UnpackTVNOTIFY(lparam):
item_size = struct.calcsize(_tvitem_fmt)
format = _nmhdr_fmt + _nmhdr_align_padding
if is64bit:
format = format + "ixxxx"
else:
format = format + "i"
format = format + "%ds%ds" % (item_size, item_size)
buf = win32gui.PyGetMemory(lparam, struct.calcsize(format))
hwndFrom, id, code, action, buf_old, buf_new \
= struct.unpack(format, buf)
item_old = UnpackTVITEM(buf_old)
item_new = UnpackTVITEM(buf_new)
return _MakeResult("TVNOTIFY hwndFrom id code action item_old item_new",
(hwndFrom, id, code, action, item_old, item_new))
def UnpackTVDISPINFO(lparam):
item_size = struct.calcsize(_tvitem_fmt)
format = "PPi%ds" % (item_size,)
buf = win32gui.PyGetMemory(lparam, struct.calcsize(format))
hwndFrom, id, code, buf_item = struct.unpack(format, buf)
item = UnpackTVITEM(buf_item)
return _MakeResult("TVDISPINFO hwndFrom id code item",
(hwndFrom, id, code, item))
#
# List view items
_lvitem_fmt = "iiiiiPiiPi"
def PackLVITEM(item=None, subItem=None, state=None, stateMask=None, text=None, image=None, param=None, indent=None):
extra = [] # objects we must keep references to
mask = 0
# _GetMaskAndVal adds quite a bit of overhead to this function.
if item is None: item = 0 # No mask for item
if subItem is None: subItem = 0 # No mask for sibItem
if state is None:
state = 0
stateMask = 0
else:
mask |= commctrl.LVIF_STATE
if stateMask is None: stateMask = state
if image is None: image = 0
else: mask |= commctrl.LVIF_IMAGE
if param is None: param = 0
else: mask |= commctrl.LVIF_PARAM
if indent is None: indent = 0
else: mask |= commctrl.LVIF_INDENT
if text is None:
text_addr = text_len = 0
else:
mask |= commctrl.LVIF_TEXT
text_buffer = _make_text_buffer(text)
text_len = len(text)
extra.append(text_buffer)
text_addr, _ = text_buffer.buffer_info()
buf = struct.pack(_lvitem_fmt,
mask, item, subItem,
state, stateMask,
text_addr, text_len, # text
image, param, indent)
return array.array("b", buf), extra
def UnpackLVITEM(buffer):
item_mask, item_item, item_subItem, \
item_state, item_stateMask, \
item_textptr, item_cchText, item_image, \
item_param, item_indent = struct.unpack(_lvitem_fmt, buffer)
# ensure only items listed by the mask are valid
if not (item_mask & commctrl.LVIF_TEXT): item_textptr = item_cchText = None
if not (item_mask & commctrl.LVIF_IMAGE): item_image = None
if not (item_mask & commctrl.LVIF_PARAM): item_param = None
if not (item_mask & commctrl.LVIF_INDENT): item_indent = None
if not (item_mask & commctrl.LVIF_STATE): item_state = item_stateMask = None
if item_textptr:
text = win32gui.PyGetString(item_textptr)
else:
text = None
return _MakeResult("LVITEM item_item item_subItem item_state "
"item_stateMask text item_image item_param item_indent",
(item_item, item_subItem, item_state, item_stateMask,
text, item_image, item_param, item_indent))
# Unpack an "LVNOTIFY" message
def UnpackLVDISPINFO(lparam):
item_size = struct.calcsize(_lvitem_fmt)
format = _nmhdr_fmt + _nmhdr_align_padding + ("%ds" % (item_size,))
buf = win32gui.PyGetMemory(lparam, struct.calcsize(format))
hwndFrom, id, code, buf_item = struct.unpack(format, buf)
item = UnpackLVITEM(buf_item)
return _MakeResult("LVDISPINFO hwndFrom id code item",
(hwndFrom, id, code, item))
def UnpackLVNOTIFY(lparam):
format = _nmhdr_fmt + _nmhdr_align_padding + "7i"
if is64bit:
format = format + "xxxx" # point needs padding.
format = format + "P"
buf = win32gui.PyGetMemory(lparam, struct.calcsize(format))
hwndFrom, id, code, item, subitem, newstate, oldstate, \
changed, pt_x, pt_y, lparam = struct.unpack(format, buf)
return _MakeResult("UnpackLVNOTIFY hwndFrom id code item subitem "
"newstate oldstate changed pt lparam",
(hwndFrom, id, code, item, subitem, newstate, oldstate,
changed, (pt_x, pt_y), lparam))
# Make a new buffer suitable for querying an items attributes.
def EmptyLVITEM(item, subitem, mask = None, text_buf_size=512):
extra = [] # objects we must keep references to
if mask is None:
mask = commctrl.LVIF_IMAGE | commctrl.LVIF_INDENT | commctrl.LVIF_TEXT | \
commctrl.LVIF_PARAM | commctrl.LVIF_STATE
if mask & commctrl.LVIF_TEXT:
text_buffer = _make_empty_text_buffer(text_buf_size)
extra.append(text_buffer)
text_addr, _ = text_buffer.buffer_info()
else:
text_addr = text_buf_size = 0
buf = struct.pack(_lvitem_fmt,
mask, item, subitem,
0, 0,
text_addr, text_buf_size, # text
0, 0, 0)
return array.array("b", buf), extra
# List view column structure
_lvcolumn_fmt = "iiiPiiii"
def PackLVCOLUMN(fmt=None, cx=None, text=None, subItem=None, image=None, order=None):
extra = [] # objects we must keep references to
mask = 0
mask, fmt = _GetMaskAndVal(fmt, 0, mask, commctrl.LVCF_FMT)
mask, cx = _GetMaskAndVal(cx, 0, mask, commctrl.LVCF_WIDTH)
mask, text = _GetMaskAndVal(text, None, mask, commctrl.LVCF_TEXT)
mask, subItem = _GetMaskAndVal(subItem, 0, mask, commctrl.LVCF_SUBITEM)
mask, image = _GetMaskAndVal(image, 0, mask, commctrl.LVCF_IMAGE)
mask, order= _GetMaskAndVal(order, 0, mask, commctrl.LVCF_ORDER)
if text is None:
text_addr = text_len = 0
else:
text_buffer = _make_text_buffer(text)
extra.append(text_buffer)
text_addr, _ = text_buffer.buffer_info()
text_len = len(text)
buf = struct.pack(_lvcolumn_fmt,
mask, fmt, cx,
text_addr, text_len, # text
subItem, image, order)
return array.array("b", buf), extra
def UnpackLVCOLUMN(lparam):
mask, fmt, cx, text_addr, text_size, subItem, image, order = \
struct.unpack(_lvcolumn_fmt, lparam)
# ensure only items listed by the mask are valid
if not (mask & commctrl.LVCF_FMT): fmt = None
if not (mask & commctrl.LVCF_WIDTH): cx = None
if not (mask & commctrl.LVCF_TEXT): text_addr = text_size = None
if not (mask & commctrl.LVCF_SUBITEM): subItem = None
if not (mask & commctrl.LVCF_IMAGE): image = None
if not (mask & commctrl.LVCF_ORDER): order = None
if text_addr:
text = win32gui.PyGetString(text_addr)
else:
text = None
return _MakeResult("LVCOLUMN fmt cx text subItem image order",
(fmt, cx, text, subItem, image, order))
# Make a new buffer suitable for querying an items attributes.
def EmptyLVCOLUMN(mask = None, text_buf_size=512):
extra = [] # objects we must keep references to
if mask is None:
mask = commctrl.LVCF_FMT | commctrl.LVCF_WIDTH | commctrl.LVCF_TEXT | \
commctrl.LVCF_SUBITEM | commctrl.LVCF_IMAGE | commctrl.LVCF_ORDER
if mask & commctrl.LVCF_TEXT:
text_buffer = _make_empty_text_buffer(text_buf_size)
extra.append(text_buffer)
text_addr, _ = text_buffer.buffer_info()
else:
text_addr = text_buf_size = 0
buf = struct.pack(_lvcolumn_fmt,
mask, 0, 0,
text_addr, text_buf_size, # text
0, 0, 0)
return array.array("b", buf), extra
# List view hit-test.
def PackLVHITTEST(pt):
format = "iiiii"
buf = struct.pack(format,
pt[0], pt[1],
0, 0, 0)
return array.array("b", buf), None
def UnpackLVHITTEST(buf):
format = "iiiii"
x, y, flags, item, subitem = struct.unpack(format, buf)
return _MakeResult("LVHITTEST pt flags item subitem",
((x,y), flags, item, subitem))
def PackHDITEM(cxy = None, text = None, hbm = None, fmt = None,
param = None, image = None, order = None):
extra = [] # objects we must keep references to
mask = 0
mask, cxy = _GetMaskAndVal(cxy, 0, mask, commctrl.HDI_HEIGHT)
mask, text = _GetMaskAndVal(text, None, mask, commctrl.LVCF_TEXT)
mask, hbm = _GetMaskAndVal(hbm, 0, mask, commctrl.HDI_BITMAP)
mask, fmt = _GetMaskAndVal(fmt, 0, mask, commctrl.HDI_FORMAT)
mask, param = _GetMaskAndVal(param, 0, mask, commctrl.HDI_LPARAM)
mask, image = _GetMaskAndVal(image, 0, mask, commctrl.HDI_IMAGE)
mask, order = _GetMaskAndVal(order, 0, mask, commctrl.HDI_ORDER)
if text is None:
text_addr = text_len = 0
else:
text_buffer = _make_text_buffer(text)
extra.append(text_buffer)
text_addr, _ = text_buffer.buffer_info()
text_len = len(text)
format = "iiPPiiPiiii"
buf = struct.pack(format,
mask, cxy, text_addr, hbm, text_len,
fmt, param, image, order, 0, 0)
return array.array("b", buf), extra
# Device notification stuff
# Generic function for packing a DEV_BROADCAST_* structure - generally used
# by the other PackDEV_BROADCAST_* functions in this module.
def PackDEV_BROADCAST(devicetype, rest_fmt, rest_data, extra_data=_make_bytes('')):
# It seems a requirement is 4 byte alignment, even for the 'BYTE data[1]'
# field (eg, that would make DEV_BROADCAST_HANDLE 41 bytes, but we must
# be 44.
extra_data += _make_bytes('\0' * (4-len(extra_data)%4))
format = "iii" + rest_fmt
full_size = struct.calcsize(format) + len(extra_data)
data = (full_size, devicetype, 0) + rest_data
return struct.pack(format, *data) + extra_data
def PackDEV_BROADCAST_HANDLE(handle, hdevnotify=0, guid=_make_bytes("\0"*16), name_offset=0, data=_make_bytes("\0")):
return PackDEV_BROADCAST(win32con.DBT_DEVTYP_HANDLE, "PP16sl",
(int(handle), int(hdevnotify), _make_memory(guid), name_offset),
data)
def PackDEV_BROADCAST_VOLUME(unitmask, flags):
return PackDEV_BROADCAST(win32con.DBT_DEVTYP_VOLUME, "II",
(unitmask, flags))
def PackDEV_BROADCAST_DEVICEINTERFACE(classguid, name=""):
if win32gui.UNICODE:
# This really means "is py3k?" - so not accepting bytes is OK
if not isinstance(name, str):
raise TypeError("Must provide unicode for the name")
name = name.encode('unicode-internal')
else:
# py2k was passed a unicode object - encode as mbcs.
if isinstance(name, str):
name = name.encode('mbcs')
# 16 bytes for the IID followed by \0 term'd string.
rest_fmt = "16s%ds" % len(name)
# _make_memory(iid) hoops necessary to get the raw IID bytes.
rest_data = (_make_memory(pywintypes.IID(classguid)), name)
return PackDEV_BROADCAST(win32con.DBT_DEVTYP_DEVICEINTERFACE, rest_fmt, rest_data)
# An object returned by UnpackDEV_BROADCAST.
class DEV_BROADCAST_INFO:
def __init__(self, devicetype, **kw):
self.devicetype = devicetype
self.__dict__.update(kw)
def __str__(self):
return "DEV_BROADCAST_INFO:" + str(self.__dict__)
# Support for unpacking the 'lparam'
def UnpackDEV_BROADCAST(lparam):
if lparam == 0:
return None
hdr_format = "iii"
hdr_size = struct.calcsize(hdr_format)
hdr_buf = win32gui.PyGetMemory(lparam, hdr_size)
size, devtype, reserved = struct.unpack("iii", hdr_buf)
# Due to x64 alignment issues, we need to use the full format string over
# the entire buffer. ie, on x64:
# calcsize('iiiP') != calcsize('iii')+calcsize('P')
buf = win32gui.PyGetMemory(lparam, size)
extra = x = {}
if devtype == win32con.DBT_DEVTYP_HANDLE:
# 2 handles, a GUID, a LONG and possibly an array following...
fmt = hdr_format + "PP16sl"
_, _, _, x['handle'], x['hdevnotify'], guid_bytes, x['nameoffset'] = \
struct.unpack(fmt, buf[:struct.calcsize(fmt)])
x['eventguid'] = pywintypes.IID(guid_bytes, True)
elif devtype == win32con.DBT_DEVTYP_DEVICEINTERFACE:
fmt = hdr_format + "16s"
_, _, _, guid_bytes = struct.unpack(fmt, buf[:struct.calcsize(fmt)])
x['classguid'] = pywintypes.IID(guid_bytes, True)
x['name'] = win32gui.PyGetString(lparam + struct.calcsize(fmt))
elif devtype == win32con.DBT_DEVTYP_VOLUME:
# int mask and flags
fmt = hdr_format + "II"
_, _, _, x['unitmask'], x['flags'] = struct.unpack(fmt, buf[:struct.calcsize(fmt)])
else:
raise NotImplementedError("unknown device type %d" % (devtype,))
return DEV_BROADCAST_INFO(devtype, **extra)
| |
from django.contrib.contenttypes.generic import GenericRelation
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.db.models.fields.related import ManyToManyRel, RelatedField
from django.db.models.related import RelatedObject
from django.db.models.fields.related import add_lazy_relation
from django.utils.translation import ugettext_lazy as _
from taggit.forms import TagField
from taggit.models import TaggedItem, GenericTaggedItemBase
from taggit.utils import require_instance_manager
try:
all
except NameError:
# 2.4 compat
try:
from django.utils.itercompat import all
except ImportError:
# 1.1.X compat
def all(iterable):
for item in iterable:
if not item:
return False
return True
class TaggableRel(ManyToManyRel):
def __init__(self):
self.related_name = None
self.limit_choices_to = {}
self.symmetrical = True
self.multiple = True
self.through = None
class TaggableManager(RelatedField):
help_text = _("A comma-separated list of tags.")
verbose_name = _("Tags")
def __init__(self, verbose_name=None,
help_text=None,
through=None,
blank=False,
transform_on_save=False):
self.transform_on_save = transform_on_save
self.through = through or TaggedItem
self.rel = TaggableRel()
self.verbose_name = verbose_name
if help_text is not None:
self.help_text = help_text
self.blank = blank
self.editable = True
self.unique = False
self.creates_table = False
self.db_column = None
self.choices = None
self.serialize = False
self.null = True
self.creation_counter = models.Field.creation_counter
models.Field.creation_counter += 1
def __get__(self, instance, model):
if instance is not None and instance.pk is None:
raise ValueError("%s objects need to have a primary key value "
"before you can access their tags." % model.__name__)
manager = _TaggableManager(
through=self.through, model=model, instance=instance
)
return manager
def contribute_to_class(self, cls, name):
self.name = self.column = name
self.model = cls
cls._meta.add_field(self)
setattr(cls, name, self)
if not cls._meta.abstract:
if isinstance(self.through, basestring):
def resolve_related_class(field, model, cls):
self.through = model
self.post_through_setup(cls)
add_lazy_relation(
cls, self, self.through, resolve_related_class
)
else:
self.post_through_setup(cls)
def post_through_setup(self, cls):
self.use_gfk = (
self.through is None or issubclass(self.through, GenericTaggedItemBase)
)
self.rel.to = self.through._meta.get_field("tag").rel.to
if self.use_gfk:
tagged_items = GenericRelation(self.through)
tagged_items.contribute_to_class(cls, "tagged_items")
def save_form_data(self, instance, value):
getattr(instance, self.name).set(*value)
def formfield(self, form_class=TagField, **kwargs):
defaults = {
"label": self.verbose_name,
"help_text": self.help_text,
"required": not self.blank,
"transform_on_save": self.transform_on_save
}
defaults.update(kwargs)
return form_class(**defaults)
def value_from_object(self, instance):
if instance.pk:
return self.through.objects.filter(**self.through.lookup_kwargs(instance))
return self.through.objects.none()
def related_query_name(self):
return self.model._meta.module_name
def m2m_target_field_name(self):
return self.model._meta.pk.name
def m2m_reverse_target_field_name(self):
return self.rel.to._meta.pk.name
def m2m_reverse_name(self):
return self.through._meta.get_field_by_name("tag")[0].column
def m2m_column_name(self):
if self.use_gfk:
return self.through._meta.virtual_fields[0].fk_field
return self.through._meta.get_field('content_object').column
def db_type(self, connection=None):
return None
def m2m_db_table(self):
return self.through._meta.db_table
def extra_filters(self, pieces, pos, negate):
if negate or not self.use_gfk:
return []
prefix = "__".join(["tagged_items"] + pieces[:pos-2])
cts = map(ContentType.objects.get_for_model, _get_subclasses(self.model))
if len(cts) == 1:
return [("%s__content_type" % prefix, cts[0])]
return [("%s__content_type__in" % prefix, cts)]
def bulk_related_objects(self, new_objs, using):
return []
class _TaggableManager(models.Manager):
def __init__(self, through, model, instance):
self.through = through
self.model = model
self.instance = instance
def get_query_set(self):
return self.through.tags_for(self.model, self.instance)
def _lookup_kwargs(self):
return self.through.lookup_kwargs(self.instance)
@require_instance_manager
def add(self, *tags):
str_tags = set([
t
for t in tags
if not isinstance(t, self.through.tag_model())
])
tag_objs = set(tags) - str_tags
# If str_tags has 0 elements Django actually optimizes that to not do a
# query. Malcolm is very smart.
existing = self.through.tag_model().objects.filter(
name__in=str_tags
)
tag_objs.update(existing)
existing_names = set(t.name for t in existing)
existing_names_lower = set(t.name.lower() for t in existing)
for new_name in str_tags - existing_names:
if len(set([new_name.lower()]) - existing_names_lower) > 0:
tag_objs.add(self.through.tag_model().objects.create(name=new_name))
for tag in tag_objs:
self.through.objects.get_or_create(tag=tag, **self._lookup_kwargs())
@require_instance_manager
def set(self, *tags):
self.clear()
self.add(*tags)
@require_instance_manager
def remove(self, *tags):
self.through.objects.filter(**self._lookup_kwargs()).filter(
tag__name__in=tags).delete()
@require_instance_manager
def clear(self):
self.through.objects.filter(**self._lookup_kwargs()).delete()
def most_common(self):
return self.get_query_set().annotate(
num_times=models.Count(self.through.tag_relname())
).order_by('-num_times')
@require_instance_manager
def similar_objects(self, num=None, **filters):
lookup_kwargs = self._lookup_kwargs()
lookup_keys = sorted(lookup_kwargs)
qs = self.through.objects.values(*lookup_kwargs.keys())
qs = qs.annotate(n=models.Count('pk'))
qs = qs.exclude(**lookup_kwargs)
subq = self.all()
qs = qs.filter(tag__in=list(subq))
qs = qs.order_by('-n')
if filters is not None:
qs = qs.filter(**filters)
if num is not None:
qs = qs[:num]
# TODO: This all feels like a bit of a hack.
items = {}
if len(lookup_keys) == 1:
# Can we do this without a second query by using a select_related()
# somehow?
f = self.through._meta.get_field_by_name(lookup_keys[0])[0]
objs = f.rel.to._default_manager.filter(**{
"%s__in" % f.rel.field_name: [r["content_object"] for r in qs]
})
for obj in objs:
items[(getattr(obj, f.rel.field_name),)] = obj
else:
preload = {}
for result in qs:
preload.setdefault(result['content_type'], set())
preload[result["content_type"]].add(result["object_id"])
for ct, obj_ids in preload.iteritems():
ct = ContentType.objects.get_for_id(ct)
for obj in ct.model_class()._default_manager.filter(pk__in=obj_ids):
items[(ct.pk, obj.pk)] = obj
results = []
for result in qs:
obj = items[
tuple(result[k] for k in lookup_keys)
]
obj.similar_tags = result["n"]
results.append(obj)
return results
def _get_subclasses(model):
subclasses = [model]
for f in model._meta.get_all_field_names():
field = model._meta.get_field_by_name(f)[0]
if (isinstance(field, RelatedObject) and
getattr(field.field.rel, "parent_link", None)):
subclasses.extend(_get_subclasses(field.model))
return subclasses
| |
"""
Support for interfacing to the Logitech SqueezeBox API.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/media_player.squeezebox/
"""
import logging
import asyncio
import urllib.parse
import json
import aiohttp
import async_timeout
import voluptuous as vol
from homeassistant.components.media_player import (
ATTR_MEDIA_ENQUEUE, SUPPORT_PLAY_MEDIA,
MEDIA_TYPE_MUSIC, SUPPORT_NEXT_TRACK, SUPPORT_PAUSE, PLATFORM_SCHEMA,
SUPPORT_PREVIOUS_TRACK, SUPPORT_SEEK, SUPPORT_TURN_OFF, SUPPORT_TURN_ON,
SUPPORT_VOLUME_MUTE, SUPPORT_VOLUME_SET, SUPPORT_PLAY, MediaPlayerDevice)
from homeassistant.const import (
CONF_HOST, CONF_PASSWORD, CONF_USERNAME, STATE_IDLE, STATE_OFF,
STATE_PAUSED, STATE_PLAYING, STATE_UNKNOWN, CONF_PORT)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.util.dt import utcnow
_LOGGER = logging.getLogger(__name__)
DEFAULT_PORT = 9000
TIMEOUT = 10
SUPPORT_SQUEEZEBOX = SUPPORT_PAUSE | SUPPORT_VOLUME_SET | \
SUPPORT_VOLUME_MUTE | SUPPORT_PREVIOUS_TRACK | SUPPORT_NEXT_TRACK | \
SUPPORT_SEEK | SUPPORT_TURN_ON | SUPPORT_TURN_OFF | SUPPORT_PLAY_MEDIA | \
SUPPORT_PLAY
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_PASSWORD): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_USERNAME): cv.string,
})
@asyncio.coroutine
def async_setup_platform(hass, config, async_add_devices, discovery_info=None):
"""Set up the squeezebox platform."""
import socket
username = config.get(CONF_USERNAME)
password = config.get(CONF_PASSWORD)
if discovery_info is not None:
host = discovery_info.get("host")
port = discovery_info.get("port")
else:
host = config.get(CONF_HOST)
port = config.get(CONF_PORT)
# In case the port is not discovered
if port is None:
port = DEFAULT_PORT
# Get IP of host, to prevent duplication of same host (different DNS names)
try:
ipaddr = socket.gethostbyname(host)
except (OSError) as error:
_LOGGER.error(
"Could not communicate with %s:%d: %s", host, port, error)
return False
_LOGGER.debug("Creating LMS object for %s", ipaddr)
lms = LogitechMediaServer(hass, host, port, username, password)
players = yield from lms.create_players()
async_add_devices(players)
return True
class LogitechMediaServer(object):
"""Representation of a Logitech media server."""
def __init__(self, hass, host, port, username, password):
"""Initialize the Logitech device."""
self.hass = hass
self.host = host
self.port = port
self._username = username
self._password = password
@asyncio.coroutine
def create_players(self):
"""Create a list of devices connected to LMS."""
result = []
data = yield from self.async_query('players', 'status')
for players in data.get('players_loop', []):
player = SqueezeBoxDevice(
self, players['playerid'], players['name'])
yield from player.async_update()
result.append(player)
return result
@asyncio.coroutine
def async_query(self, *command, player=""):
"""Abstract out the JSON-RPC connection."""
auth = None if self._username is None else aiohttp.BasicAuth(
self._username, self._password)
url = "http://{}:{}/jsonrpc.js".format(
self.host, self.port)
data = json.dumps({
"id": "1",
"method": "slim.request",
"params": [player, command]
})
_LOGGER.debug("URL: %s Data: %s", url, data)
try:
websession = async_get_clientsession(self.hass)
with async_timeout.timeout(TIMEOUT, loop=self.hass.loop):
response = yield from websession.post(
url,
data=data,
auth=auth)
if response.status != 200:
_LOGGER.error(
"Query failed, response code: %s Full message: %s",
response.status, response)
return False
data = yield from response.json()
except (asyncio.TimeoutError, aiohttp.ClientError) as error:
_LOGGER.error("Failed communicating with LMS: %s", type(error))
return False
try:
return data['result']
except AttributeError:
_LOGGER.error("Received invalid response: %s", data)
return False
class SqueezeBoxDevice(MediaPlayerDevice):
"""Representation of a SqueezeBox device."""
def __init__(self, lms, player_id, name):
"""Initialize the SqueezeBox device."""
super(SqueezeBoxDevice, self).__init__()
self._lms = lms
self._id = player_id
self._status = {}
self._name = name
self._last_update = None
_LOGGER.debug("Creating SqueezeBox object: %s, %s", name, player_id)
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def unique_id(self):
"""Return an unique ID."""
return self._id
@property
def state(self):
"""Return the state of the device."""
if 'power' in self._status and self._status['power'] == 0:
return STATE_OFF
if 'mode' in self._status:
if self._status['mode'] == 'pause':
return STATE_PAUSED
if self._status['mode'] == 'play':
return STATE_PLAYING
if self._status['mode'] == 'stop':
return STATE_IDLE
return STATE_UNKNOWN
def async_query(self, *parameters):
"""Send a command to the LMS.
This method must be run in the event loop and returns a coroutine.
"""
return self._lms.async_query(
*parameters, player=self._id)
@asyncio.coroutine
def async_update(self):
"""Retrieve the current state of the player."""
tags = 'adKl'
response = yield from self.async_query(
"status", "-", "1", "tags:{tags}"
.format(tags=tags))
if response is False:
return
self._status = {}
try:
self._status.update(response["playlist_loop"][0])
except KeyError:
pass
try:
self._status.update(response["remoteMeta"])
except KeyError:
pass
self._status.update(response)
self._last_update = utcnow()
@property
def volume_level(self):
"""Volume level of the media player (0..1)."""
if 'mixer volume' in self._status:
return int(float(self._status['mixer volume'])) / 100.0
@property
def is_volume_muted(self):
"""Return true if volume is muted."""
if 'mixer volume' in self._status:
return str(self._status['mixer volume']).startswith('-')
@property
def media_content_id(self):
"""Content ID of current playing media."""
if 'current_title' in self._status:
return self._status['current_title']
@property
def media_content_type(self):
"""Content type of current playing media."""
return MEDIA_TYPE_MUSIC
@property
def media_duration(self):
"""Duration of current playing media in seconds."""
if 'duration' in self._status:
return int(float(self._status['duration']))
@property
def media_position(self):
"""Duration of current playing media in seconds."""
if 'time' in self._status:
return int(float(self._status['time']))
@property
def media_position_updated_at(self):
"""Last time status was updated."""
return self._last_update
@property
def media_image_url(self):
"""Image url of current playing media."""
if 'artwork_url' in self._status:
media_url = self._status['artwork_url']
elif 'id' in self._status:
media_url = ('/music/{track_id}/cover.jpg').format(
track_id=self._status['id'])
else:
media_url = ('/music/current/cover.jpg?player={player}').format(
player=self._id)
# pylint: disable=protected-access
if self._lms._username:
base_url = 'http://{username}:{password}@{server}:{port}/'.format(
username=self._lms._username,
password=self._lms._password,
server=self._lms.host,
port=self._lms.port)
else:
base_url = 'http://{server}:{port}/'.format(
server=self._lms.host,
port=self._lms.port)
url = urllib.parse.urljoin(base_url, media_url)
return url
@property
def media_title(self):
"""Title of current playing media."""
if 'title' in self._status:
return self._status['title']
if 'current_title' in self._status:
return self._status['current_title']
@property
def media_artist(self):
"""Artist of current playing media."""
if 'artist' in self._status:
return self._status['artist']
@property
def media_album_name(self):
"""Album of current playing media."""
if 'album' in self._status:
return self._status['album']
@property
def supported_features(self):
"""Flag media player features that are supported."""
return SUPPORT_SQUEEZEBOX
def async_turn_off(self):
"""Turn off media player.
This method must be run in the event loop and returns a coroutine.
"""
return self.async_query('power', '0')
def async_volume_up(self):
"""Volume up media player.
This method must be run in the event loop and returns a coroutine.
"""
return self.async_query('mixer', 'volume', '+5')
def async_volume_down(self):
"""Volume down media player.
This method must be run in the event loop and returns a coroutine.
"""
return self.async_query('mixer', 'volume', '-5')
def async_set_volume_level(self, volume):
"""Set volume level, range 0..1.
This method must be run in the event loop and returns a coroutine.
"""
volume_percent = str(int(volume*100))
return self.async_query('mixer', 'volume', volume_percent)
def async_mute_volume(self, mute):
"""Mute (true) or unmute (false) media player.
This method must be run in the event loop and returns a coroutine.
"""
mute_numeric = '1' if mute else '0'
return self.async_query('mixer', 'muting', mute_numeric)
def async_media_play_pause(self):
"""Send pause command to media player.
This method must be run in the event loop and returns a coroutine.
"""
return self.async_query('pause')
def async_media_play(self):
"""Send play command to media player.
This method must be run in the event loop and returns a coroutine.
"""
return self.async_query('play')
def async_media_pause(self):
"""Send pause command to media player.
This method must be run in the event loop and returns a coroutine.
"""
return self.async_query('pause', '1')
def async_media_next_track(self):
"""Send next track command.
This method must be run in the event loop and returns a coroutine.
"""
return self.async_query('playlist', 'index', '+1')
def async_media_previous_track(self):
"""Send next track command.
This method must be run in the event loop and returns a coroutine.
"""
return self.async_query('playlist', 'index', '-1')
def async_media_seek(self, position):
"""Send seek command.
This method must be run in the event loop and returns a coroutine.
"""
return self.async_query('time', position)
def async_turn_on(self):
"""Turn the media player on.
This method must be run in the event loop and returns a coroutine.
"""
return self.async_query('power', '1')
def async_play_media(self, media_type, media_id, **kwargs):
"""
Send the play_media command to the media player.
If ATTR_MEDIA_ENQUEUE is True, add `media_id` to the current playlist.
This method must be run in the event loop and returns a coroutine.
"""
if kwargs.get(ATTR_MEDIA_ENQUEUE):
return self._add_uri_to_playlist(media_id)
return self._play_uri(media_id)
def _play_uri(self, media_id):
"""Replace the current play list with the uri."""
return self.async_query('playlist', 'play', media_id)
def _add_uri_to_playlist(self, media_id):
"""Add a items to the existing playlist."""
return self.async_query('playlist', 'add', media_id)
| |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, import-self
"""MXNet symbol frontend."""
from __future__ import absolute_import as _abs
import json
import tvm
from .. import symbol as _sym
from .common import get_nnvm_op, required_attr, parse_tshape, parse_bool_str
__all__ = ['from_mxnet']
def _rename(new_name):
def impl(inputs, attrs):
return get_nnvm_op(new_name)(*inputs, **attrs)
return impl
def _pooling(inputs, attrs):
kernel = parse_tshape(required_attr(attrs, 'kernel', 'pooling'))
if len(kernel) != 2:
raise tvm.error.OpAttributeUnImplemented(
'Non-2D kernels are not supported for Pool2D.')
global_pool = 'global' if parse_bool_str(attrs, 'global_pool') else ''
pool_type = required_attr(attrs, 'pool_type', 'pooling')
if pool_type not in ['avg', 'max']:
raise tvm.error.OpNotImplemented(
'Only max and average pooling are supported in frontend MXNet.')
op_name, new_attrs = '_'.join([global_pool, pool_type, 'pool2d']).strip('_'), {}
# new_attrs['layout'] = 'NCHW'
if not global_pool:
new_attrs['pool_size'] = kernel
new_attrs['strides'] = attrs.get('stride', (1, 1))
new_attrs['padding'] = attrs.get('pad', (0, 0))
new_attrs['ceil_mode'] = (attrs.get('pooling_convention', 'valid') == 'full')
if pool_type == 'avg':
new_attrs['count_include_pad'] = attrs.get('count_include_pad', True)
return get_nnvm_op(op_name)(*inputs, **new_attrs)
def _batch_norm(inputs, attrs):
if parse_bool_str(attrs, 'output_mean_var'):
raise tvm.error.OpAttributeUnImplemented(
'Attribute "output_mean_var" is not supported in operator batch_norm.')
# if parse_bool_str(attrs, 'fix_gamma'):
# _warn_not_used('fix_gamma', 'batch_norm')
if parse_bool_str(attrs, 'use_global_stats'):
from warnings import warn
warn(
'Attribute "use_global_stats" is ignored in operator batch_norm.')
# if parse_bool_str(attrs, 'momentum'):
# _warn_not_used('momentum', 'batch_norm')
op_name, new_attrs = 'batch_norm', {}
new_attrs['axis'] = attrs.get('axis', 1)
new_attrs['epsilon'] = attrs.get('eps', 0.001)
new_attrs['center'] = True
new_attrs['scale'] = not parse_bool_str(attrs, 'fix_gamma', default="False")
return get_nnvm_op(op_name)(*inputs, **new_attrs)
def _concat(inputs, attrs):
op_name = 'concatenate'
new_attrs = {'axis': attrs.get('dim', 1)}
return get_nnvm_op(op_name)(*inputs, **new_attrs)
def _conv2d(inputs, attrs):
kernel = parse_tshape(required_attr(attrs, 'kernel', 'conv2d'))
if len(kernel) != 2:
raise tvm.error.OpAttributeUnimplemented(
'Non-2D kernels are not supported for operator Conv2D.')
layout = attrs.get('layout', 'NCHW')
if layout not in ['NCHW', 'NHWC']:
raise tvm.error.OpAttributeUnimplemented(
'Layout {} is not supported in operator Conv2D.'.format(layout))
if 'kernel_layout' in attrs:
kernel_layout = attrs['kernel_layout']
else:
kernel_layout = 'HWIO' if layout == 'NHWC' else 'OIHW'
op_name, new_attrs = 'conv2d', {}
new_attrs['channels'] = required_attr(attrs, 'num_filter', 'conv2d')
new_attrs['kernel_size'] = kernel
new_attrs['strides'] = attrs.get('stride', (1, 1))
new_attrs['padding'] = attrs.get('pad', (0, 0))
new_attrs['dilation'] = attrs.get('dilate', (1, 1))
new_attrs['groups'] = attrs.get('num_group', 1)
new_attrs['layout'] = layout
new_attrs['kernel_layout'] = kernel_layout
new_attrs['use_bias'] = attrs.get('no_bias', 'False').strip() == 'False'
return get_nnvm_op(op_name)(*inputs, **new_attrs)
def _conv2d_transpose(inputs, attrs):
if 'target_shape' in attrs:
raise tvm.error.OpAttributeUnimplemented(
'Attribute "target_shape" is not supported in operator Conv2D-transpose.')
kernel = parse_tshape(required_attr(attrs, 'kernel', 'conv2d_transpose'))
if len(kernel) != 2:
raise tvm.error.OpAttributeInvalid(
'Non-2D kernels are not supported in Conv2D-transpose.')
layout = attrs.get('layout', 'NCHW')
if layout not in ['NCHW', 'NHWC']:
raise tvm.error.OpAttributeUnimplemented(
'Layout {} is not supported in operator Conv2D-transpose.')
if 'kernel_layout' in attrs:
kernel_layout = attrs['kernel_layout']
else:
kernel_layout = 'HWIO' if layout == 'NHWC' else 'OIHW'
op_name, new_attrs = 'conv2d_transpose', {}
new_attrs['channels'] = required_attr(attrs, 'num_filter', 'conv2d_transpose')
new_attrs['kernel_size'] = kernel
new_attrs['strides'] = attrs.get('stride', (1, 1))
new_attrs['output_padding'] = attrs.get('adj', (0, 0))
new_attrs['padding'] = attrs.get('pad', (0, 0))
new_attrs['dilation'] = attrs.get('dilate', (1, 1))
new_attrs['groups'] = attrs.get('num_group', 1)
new_attrs['layout'] = layout
new_attrs['kernel_layout'] = kernel_layout
new_attrs['use_bias'] = not parse_bool_str(attrs, 'no_bias')
return get_nnvm_op(op_name)(*inputs, **new_attrs)
def _dense(inputs, attrs):
import mxnet as mx
op_name, new_attrs = 'dense', {}
new_attrs['units'] = required_attr(attrs, 'num_hidden', 'dense')
new_attrs['use_bias'] = not parse_bool_str(attrs, 'no_bias')
try:
_ = mx.sym.FullyConnected(mx.sym.var('x'), num_hidden=1, flatten=True)
has_flatten = True
except mx.base.MXNetError:
# no flatten attribute in old mxnet
has_flatten = False
use_flatten = parse_bool_str(attrs, 'flatten', 'True')
if has_flatten and use_flatten:
inputs[0] = _sym.flatten(inputs[0])
return get_nnvm_op(op_name)(*inputs, **new_attrs)
def _dropout(inputs, attrs):
op_name, new_attrs = 'dropout', {}
new_attrs['rate'] = attrs.get('p', 0.5)
return get_nnvm_op(op_name)(*inputs, **new_attrs)
def _leaky_relu(inputs, attrs):
act_type = required_attr(attrs, 'act_type', 'leaky_relu')
if act_type in ['leaky', 'prelu']:
op_name, new_attrs = act_type, {}
if act_type == 'leaky':
new_attrs['alpha'] = attrs.get('slope', 0.25)
sym = get_nnvm_op(op_name)(*inputs, **new_attrs)
elif act_type == 'elu':
slope = attrs.get('slope', 0.25)
sym = -slope * _sym.relu(1 - _sym.exp(*inputs)) + _sym.relu(*inputs)
elif act_type == 'rrelu':
lower_bound = float(required_attr(attrs, 'lower_bound', 'leaky_relu'))
upper_bound = float(required_attr(attrs, 'upper_bound', 'leaky_relu'))
slope = (lower_bound + upper_bound) / 2.0
op_name, new_attrs = 'leaky_relu', {'alpha': str(slope)}
sym = get_nnvm_op(op_name)(*inputs, **new_attrs)
else:
raise tvm.error.OpNotImplemented(
'Operator {} is not supported in frontend MXNet.'.format(act_type))
return sym
def _activations(inputs, attrs):
act_type = required_attr(attrs, 'act_type', 'activations')
if act_type in ['relu', 'sigmoid', 'tanh']:
op_name, new_attrs = act_type, {}
sym = get_nnvm_op(op_name)(*inputs, **new_attrs)
elif act_type == 'softrelu':
sym = _sym.log((1 + _sym.exp(*inputs)))
else:
raise tvm.error.OpNotImplemented(
'Operator {} is not supported in frontend MXNet.'.format(act_type))
return sym
def _reshape(inputs, attrs):
if parse_bool_str(attrs, 'reverse'):
raise tvm.error.OpAttributeUnimplemented(
'Attribute "reverse" is not supported in operator Reshape.')
op_name, new_attrs = 'reshape', {}
new_attrs['shape'] = required_attr(attrs, 'shape', 'reshape')
return get_nnvm_op(op_name)(*inputs, **new_attrs)
def _slice(inputs, attrs):
begin = attrs.get('begin', None)
end = attrs.get('end', None)
stride = attrs.get('step', None)
if begin is None or end is None:
raise RuntimeError('begin and end are required params')
if 'None' in begin or 'None' in end:
raise RuntimeError('None in begin or end not supported yet...')
new_attrs = {'begin': begin, 'end': end}
if stride is not None:
new_attrs['stride'] = stride
return get_nnvm_op('strided_slice')(inputs[0], **new_attrs)
def _split(inputs, attrs):
op_name, new_attrs = 'split', {}
axis = attrs.get('axis', 1)
new_attrs['indices_or_sections'] = required_attr(attrs, 'num_outputs', 'split')
new_attrs['axis'] = axis
outputs = get_nnvm_op(op_name)(*inputs, **new_attrs)
if parse_bool_str(attrs, 'squeeze_axis'):
squeeze_attrs = {'axis': axis}
outputs = _sym.Group([get_nnvm_op('squeeze')(o, **squeeze_attrs) for o in outputs])
return outputs
def _softmax_activation(inputs, attrs):
op_name, new_attrs = 'softmax', {}
mode = attrs.get('mode', 'instance')
new_attrs['axis'] = 0 if mode == 'instance' else 1
return get_nnvm_op(op_name)(inputs[0], **new_attrs)
def _softmax_output(inputs, attrs):
op_name, new_attrs = 'softmax', {}
if parse_bool_str(attrs, 'multi_output'):
new_attrs['axis'] = 1
return get_nnvm_op(op_name)(inputs[0], **new_attrs)
def _upsampling(inputs, attrs):
scale = attrs.get('scale')
new_attrs = {'scale':int(scale)}
return get_nnvm_op('upsampling')(inputs[0], **new_attrs)
def _clip(inputs, attrs):
op_name, new_attrs = "clip", {}
new_attrs['a_min'] = required_attr(attrs, 'a_min', 'clip')
new_attrs['a_max'] = required_attr(attrs, 'a_max', 'clip')
return get_nnvm_op(op_name)(*inputs, **new_attrs)
def _contrib_multibox_detection(inputs, attrs):
clip = parse_bool_str(attrs, 'clip', default='True')
threshold = attrs.get('threshold') or 0.01
nms_threshold = attrs.get('nms_threshold') or 0.5
force_suppress = parse_bool_str(attrs, 'force_suppress', default='False')
variances = tuple([float(x.strip()) for x in attrs.get('variances').strip('()').split(',')]) \
if attrs.get('variances') is not None else (0.1, 0.1, 0.2, 0.2)
nms_topk = attrs.get('nms_topk') or -1
new_attrs0 = {'clip': clip, 'threshold': float(threshold), 'variances': variances}
new_attrs1 = {'return_indices': False, 'iou_threshold': float(nms_threshold),
'force_suppress': force_suppress, 'top_k': int(nms_topk)}
data, valid_count = get_nnvm_op('multibox_transform_loc')(inputs[0], inputs[1],
inputs[2], **new_attrs0)
return get_nnvm_op('non_max_suppression')(data, valid_count, **new_attrs1)
def _elemwise_sum(inputs, _):
new_attrs = {'num_args':len(inputs)}
return get_nnvm_op('elemwise_sum')(*inputs, **new_attrs)
def _crop_like(inputs, attrs):
new_attrs = {}
offsets = \
tuple([float(x.strip()) for x in attrs.get('offsets').strip('()').split(',')]) \
if attrs.get('offsets') is not None else (0, 0)
if offsets != (0, 0):
raise tvm.error.OpAttributeInvalid(
'crop_like offsets must equal (0,0).')
center_crop = parse_bool_str(attrs, 'center_crop', default="False")
if center_crop:
raise tvm.error.OpAttributeUnimplemented(
'Center crop is not supported in operator crop_like.')
if len(inputs) < 2:
raise tvm.error.OpAttributeUnimplemented("Only support crop_like pattern.")
new_attrs["axis"] = [2, 3]
return get_nnvm_op('slice_like')(inputs[0], inputs[1], **new_attrs)
def _expand_dims(inputs, attrs):
op_name, new_attrs = 'expand_dims', {}
new_attrs['axis'] = required_attr(attrs, 'axis', 'expand_dims')
return get_nnvm_op(op_name)(*inputs, **new_attrs)
def _lrn(inputs, attrs):
op_name, new_attrs = 'lrn', {}
new_attrs['alpha'] = attrs.get('alpha', 0.0001)
new_attrs['beta'] = attrs.get('beta', 0.75)
new_attrs['bias'] = attrs.get('knorm', 2)
# NCHW format and normalization along channel axis
new_attrs['axis'] = 1
new_attrs['size'] = required_attr(attrs, 'nsize', 'lrn')
return get_nnvm_op(op_name)(*inputs, **new_attrs)
def _minimum(inputs, attrs):
return get_nnvm_op('broadcast_min')(*inputs, **attrs)
def _maximum(inputs, attrs):
return get_nnvm_op('broadcast_max')(*inputs, **attrs)
def _ones(_, attrs):
op_name = 'ones'
return get_nnvm_op(op_name)(**attrs)
def _zeros(_, attrs):
op_name = 'zeros'
return get_nnvm_op(op_name)(**attrs)
def _argmax(inputs, attrs):
op_name, new_attrs = 'argmax', {}
new_attrs['dtype'] = 'float32'
new_attrs['axis'] = attrs.get('axis', 0)
new_attrs['keepdims'] = parse_bool_str(attrs, 'keepdims', default="False")
return get_nnvm_op(op_name)(*inputs, **new_attrs)
def _argmin(inputs, attrs):
op_name, new_attrs = 'argmin', {}
new_attrs['dtype'] = 'float32'
new_attrs['axis'] = attrs.get('axis', 0)
new_attrs['keepdims'] = parse_bool_str(attrs, 'keepdims', default="False")
return get_nnvm_op(op_name)(*inputs, **new_attrs)
_identity_list = ['__add_scalar__', '__add_symbol__', '__div_scalar__',
'__div_symbol__', '__mul_scalar__', '__mul_symbol__',
'__pow_scalar__', '__rdiv_scalar__', '__rpow_scalar__',
'__rsub_scalar__', '__sub_scalar__', '__sub_symbol__',
'broadcast_add', 'broadcast_div', 'broadcast_mul',
'broadcast_sub', 'broadcast_to', 'cast', 'elemwise_add',
'elemwise_div', 'elemwise_mul', 'elemwise_sub', 'exp',
'flatten', 'log', 'log_softmax', 'max', 'min', 'negative',
'ones_like', 'relu', 'sigmoid', 'slice_like', 'softmax',
'sum', 'tanh', 'transpose', 'zeros_like', 'gather_nd',
'reshape_like', 'where']
_convert_map = {
'_copy' : _rename('copy'),
'_div_scalar' : _rename('__div_scalar__'),
'_minus_scalar' : _rename('__sub_scalar__'),
'_mul_scalar' : _rename('__mul_scalar__'),
'_plus_scalar' : _rename('__add_scalar__'),
'_rdiv_scalar' : _rename('__rdiv_scalar__'),
'_rminus_scalar': _rename('__rsub_scalar__'),
'_contrib_MultiBoxPrior' : _rename('multibox_prior'),
'_contrib_MultiBoxDetection' : _contrib_multibox_detection,
'_minimum' : _minimum,
'_maximum' : _maximum,
'_ones' : _ones,
'_zeros' : _zeros,
'argmax' : _argmax,
'argmin' : _argmin,
'Activation' : _activations,
'BatchNorm' : _batch_norm,
'BatchNorm_v1' : _batch_norm,
'Cast' : _rename('cast'),
'Concat' : _concat,
'Convolution' : _conv2d,
'Convolution_v1': _conv2d,
'Crop' : _crop_like,
'Deconvolution' : _conv2d_transpose,
'Dropout' : _dropout,
'Flatten' : _rename('flatten'),
'FullyConnected': _dense,
'LeakyReLU' : _leaky_relu,
'Pooling' : _pooling,
'Pooling_v1' : _pooling,
'Reshape' : _reshape,
'slice' : _slice,
'SliceChannel' : _split,
'split' : _split,
'Softmax' : _rename('softmax'),
'SoftmaxActivation' : _softmax_activation,
'SoftmaxOutput' : _softmax_output,
'add_n' : _elemwise_sum,
'concat' : _concat,
'max_axis' : _rename('max'),
'min_axis' : _rename('min'),
'reshape' : _reshape,
'sum_axis' : _rename('sum'),
'UpSampling' : _upsampling,
'clip' : _clip,
'expand_dims' : _expand_dims,
'LRN' : _lrn
}
def _convert_symbol(op_name, inputs, attrs,
identity_list=None,
convert_map=None):
"""Convert from mxnet op to nnvm op.
The converter must specify some conversions explicitly to
support gluon format ops such as conv2d...
Parameters
----------
op_name : str
Operator name, such as Convolution, FullyConnected
inputs : list of nnvm.Symbol
List of input symbols.
attrs : dict
Dict of operator attributes
identity_list : list
List of operators that don't require conversion
convert_map : dict
Dict of name : callable, where name is the op's name that
require conversion to nnvm, callable are functions which
take attrs and return (new_op_name, new_attrs)
Returns
-------
sym : nnvm.Symbol
Converted nnvm Symbol
"""
identity_list = identity_list if identity_list else _identity_list
convert_map = convert_map if convert_map else _convert_map
if op_name in identity_list:
op = get_nnvm_op(op_name)
sym = op(*inputs, **attrs)
elif op_name in convert_map:
sym = convert_map[op_name](inputs, attrs)
else:
raise tvm.error.OpNotImplemented(
'Operator {} is not supported in frontend MXNet.'.format(op_name))
return sym
def _as_list(arr):
"""Force being a list, ignore if already is."""
if isinstance(arr, list):
return arr
return [arr]
def _topo_sort(symbol):
"""Sort all symbols in the mxnet graph in topological order.
Parameters
----------
symbol : mxnet.sym.Symbol
Returns:
-------
list
List of mxnet symbol
"""
queue = []
symbol_map = {}
deps = {}
dep_cnts = {}
for s in symbol:
symbol_map[s.attr('name')] = s
queue.append(s)
while queue:
sym = queue.pop(0)
name = sym.attr('name')
childs = sym.get_children()
if childs is None:
dep_cnts[name] = 0
else:
dep_cnts[name] = len({c.attr('name') for c in childs})
for child in childs:
child_name = child.attr('name')
if child_name not in deps:
deps[child_name] = set()
deps[child_name].add(name)
if child_name not in symbol_map:
symbol_map[child_name] = child
queue.append(child)
order = []
while dep_cnts:
remove = []
for name in dep_cnts:
if dep_cnts[name] == 0:
order.append(symbol_map[name])
remove.append(name)
if name in deps:
for other in deps[name]:
dep_cnts[other] -= 1
for name in remove:
del dep_cnts[name]
return order
def _from_mxnet_impl(symbol, graph):
"""Convert mxnet symbol to nnvm implementation.
Reconstruct a nnvm symbol by traversing the mxnet symbol.
Parameters
----------
symbol : mxnet.sym.Symbol
Incompatible symbol from mxnet, sharing similar graph structure.
The op_name and attrs inside are not always compatible.
graph : dict
Reusable nodes are stored in graph.
Returns:
-------
nnvm.sym.Symbol
Converted symbol
"""
def get_node(sym):
name = sym.attr('name')
if name not in graph:
return None
output_index = json.loads(sym.tojson())['heads'][0][1]
return graph[name][output_index]
assert symbol is not None
# Traverse all symbols in topological order
for sym in _topo_sort(symbol):
name = sym.attr('name')
attr = sym.list_attr()
op_name = sym.attr('op_name')
childs = sym.get_children()
if childs is not None:
childs = [get_node(child) for child in childs]
childs = [x for y in childs for x in _as_list(y)]
node = _convert_symbol(op_name, childs, attr)
elif op_name != 'null':
node = _convert_symbol(op_name, [], attr)
else:
node = _sym.Variable(name=name, **attr)
graph[name] = node
nodes = []
for sym in symbol:
node = get_node(sym)
assert node is not None
nodes.append(node)
if len(nodes) > 1:
return _sym.Group(nodes)
return nodes[0]
def from_mxnet(symbol, arg_params=None, aux_params=None):
"""Convert from MXNet's model into compatible NNVM format.
Parameters
----------
symbol : mxnet.Symbol or mxnet.gluon.HybridBlock
MXNet symbol
arg_params : dict of str to mx.NDArray
The argument parameters in mxnet
aux_params : dict of str to mx.NDArray
The auxiliary parameters in mxnet
Returns
-------
sym : nnvm.Symbol
Compatible nnvm symbol
params : dict of str to tvm.NDArray
The parameter dict to be used by nnvm
"""
try:
import mxnet as mx
except ImportError as e:
raise ImportError('{}. MXNet is required to parse symbols.'.format(e))
if isinstance(symbol, mx.sym.Symbol):
sym = _from_mxnet_impl(symbol, {})
params = {}
arg_params = arg_params if arg_params else {}
aux_params = aux_params if aux_params else {}
for k, v in arg_params.items():
params[k] = tvm.nd.array(v.asnumpy())
for k, v in aux_params.items():
params[k] = tvm.nd.array(v.asnumpy())
elif isinstance(symbol, mx.gluon.HybridBlock):
data = mx.sym.Variable('data')
sym = symbol(data)
sym = _from_mxnet_impl(sym, {})
params = {}
for k, v in symbol.collect_params().items():
params[k] = tvm.nd.array(v.data().asnumpy())
elif isinstance(symbol, mx.gluon.Block):
raise NotImplementedError("Only Hybrid Blocks are supported now.")
else:
msg = "mxnet.Symbol or gluon.HybridBlock expected, got {}".format(type(symbol))
raise ValueError(msg)
if isinstance(sym, list):
sym = _sym.Group(sym)
return sym, params
| |
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
import parse_deps
import os
srcdir = os.path.join(os.path.dirname(__file__), "../src")
class JSStripTests(unittest.TestCase):
def test_tokenize_0(self):
tokens = list(parse_deps._tokenize_js(""))
self.assertEquals([], tokens)
def test_tokenize_nl(self):
tokens = list(parse_deps._tokenize_js("\n"))
self.assertEquals(["\n"], tokens)
def test_tokenize_slashslash_comment(self):
tokens = list(parse_deps._tokenize_js("A // foo"))
self.assertEquals(["A ", "//", " foo"], tokens)
def test_tokenize_slashslash_comment_then_newline2(self):
tokens = list(parse_deps._tokenize_js("""A // foo
bar"""
))
self.assertEquals(["A ", "//", " foo", "\n", "bar"], tokens)
def test_tokenize_cstyle_comment(self):
tokens = list(parse_deps._tokenize_js("""A /* foo */"""))
self.assertEquals(["A ", "/*", " foo ", "*/"], tokens)
def test_tokenize_cstyle_comment(self):
tokens = list(parse_deps._tokenize_js("""A /* foo
*bar
*/"""))
self.assertEquals(["A ", "/*", " foo", "\n", "*bar", "\n", "*/"], tokens)
def test_strip_comments(self):
self.assertEquals("A ", parse_deps._strip_js_comments("A // foo"))
self.assertEquals("A b", parse_deps._strip_js_comments("A /* foo */ b"))
self.assertEquals("A b", parse_deps._strip_js_comments("""A /* foo
*/ b"""))
class ParseTests(unittest.TestCase):
def test_parse_definition_1(self):
text = """// blahblahblah
base.require('dependency1');
base.require('dependency2');
base.requireStylesheet('myStylesheet');
"""
module = parse_deps.Module("myModule")
module.parse_definition_(text)
self.assertEquals(["myStylesheet"], module.style_sheet_names);
self.assertEquals(["dependency1", "dependency2"],
module.dependent_module_names);
def test_parse_definition_missing_semis(self):
text = """// blahblahblah
base.require('dependency1')
base.require('dependency2');
base.requireStylesheet('myStylesheet')
"""
module = parse_deps.Module("myModule")
module.parse_definition_(text)
self.assertEquals(["myStylesheet"], module.style_sheet_names);
self.assertEquals(["dependency1", "dependency2"],
module.dependent_module_names);
def test_parse_definition_with_deps_and_stylesheet_swapped(self):
text = """// blahblahblah
base.require('dependency1');
base.requireStylesheet('myStylesheet');
base.require('dependency2');
"""
module = parse_deps.Module("myModule")
module.parse_definition_(text)
self.assertEquals(["myStylesheet"], module.style_sheet_names);
self.assertEquals(["dependency1", "dependency2"],
module.dependent_module_names);
def test_parse_empty_definition(self):
text = """// blahblahblah
"""
module = parse_deps.Module("myModule")
module.parse_definition_(text, decl_required = False)
self.assertEquals([], module.style_sheet_names);
self.assertEquals([], module.dependent_module_names);
def test_parse_definition_3(self):
text = """// blahblahblah
base.require('dependency1');
//base.require('dependency2');
"""
module = parse_deps.Module("myModule")
module.parse_definition_(text)
self.assertEquals([], module.style_sheet_names);
self.assertEquals(["dependency1"], module.dependent_module_names);
def test_parse_definition_4(self):
text = """// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
'use strict';
/**
* @fileoverview TimelineView visualizes TRACE_EVENT events using the
* tracing.Timeline component and adds in selection summary and control buttons.
*/
base.requireStylesheet('timeline_view')
base.require('timeline');
base.require('timeline_analysis');
base.require('overlay');
base.require('trace_event_importer');
base.require('linux_perf_importer');
base.exportsTo('tracing', function() {"""
module = parse_deps.Module("timeline_view")
module.parse_definition_(text)
self.assertEquals(["timeline_view"], module.style_sheet_names);
self.assertEquals(["timeline",
"timeline_analysis",
"overlay",
"trace_event_importer",
"linux_perf_importer"], module.dependent_module_names);
def test_parse_definition_with_definition_in_comments(self):
text = """// SomeComment
/*
* All subclasses should depend on linux_perf_parser, e.g.
*
* base.require('linux_perf_parser');
* base.exportTo('tracing', function() { });
*
*/
base.require('dependency1');
base.require('dependency2');
"""
module = parse_deps.Module("myModule")
module.parse_definition_(text)
self.assertEquals([], module.style_sheet_names);
self.assertEquals(["dependency1", "dependency2"],
module.dependent_module_names);
def test_parse_dependency_with_slashes(self):
text = """base.require("foo/dependency1")
"""
module = parse_deps.Module("myModule")
self.assertRaises(parse_deps.DepsException,
lambda: module.parse_definition_(text))
def test_parse_dependency_with_dots(self):
text = """base.require("foo.dependency1")
"""
module = parse_deps.Module("myModule")
module.parse_definition_(text)
self.assertEquals([], module.style_sheet_names);
self.assertEquals(["foo.dependency1"],
module.dependent_module_names);
class ResourceFinderStub(object):
def __init__(self):
self.modules = {}
def add_module(self, name, filename, contents):
module = {"filename": filename,
"contents": contents}
self.modules[name] = module
def find_and_load_module(self, current_module, requested_module_name):
if requested_module_name not in self.modules:
return None
return (self.modules[requested_module_name]["filename"],
self.modules[requested_module_name]["contents"])
x_contents = """
base.require('y');
base.require('z');
base.exportTo("xyz", function() { });
"""
y_contents = """
base.require('z');
base.exportsTo("xyz", function() { });
"""
z_contents = """
base.exportsTo("xyz", function() { });
"""
class FlattenTests(unittest.TestCase):
def test_module(self):
resource_finder = ResourceFinderStub()
resource_finder.add_module("y", "y.js", y_contents);
resource_finder.add_module("z", "z.js", z_contents);
x_module = parse_deps.Module("x")
x_module.load_and_parse("x.js", x_contents)
all_resources = {}
x_module.resolve(all_resources, resource_finder)
self.assertEquals([all_resources["scripts"]["y"],
all_resources["scripts"]["z"]],
x_module.dependent_modules)
already_loaded_set = set()
load_sequence = []
x_module.compute_load_sequence_recursive(load_sequence, already_loaded_set)
self.assertEquals([all_resources["scripts"]["z"],
all_resources["scripts"]["y"],
x_module],
load_sequence)
class ResourceFinderTest(unittest.TestCase):
def test_basic(self):
resource_finder = parse_deps.ResourceFinder(srcdir)
module = parse_deps.Module("unittest")
module.load_and_parse(os.path.join(srcdir, "unittest.js"))
filename, contents = resource_finder.find_and_load_module(module, "base")
self.assertTrue(os.path.samefile(filename, os.path.join(srcdir, "base.js")))
expected_contents = ''
with open(os.path.join(srcdir, "base.js")) as f:
expected_contents = f.read()
self.assertEquals(contents, expected_contents)
def test_dependency_in_subdir(self):
resource_finder = parse_deps.ResourceFinder(srcdir)
module = parse_deps.Module("unittest")
module.load_and_parse(os.path.join(srcdir, "unittest.js"))
filename, contents = resource_finder.find_and_load_module(
module, "tracks.timeline_track")
self.assertTrue(os.path.samefile(filename, os.path.join(srcdir, "tracks/timeline_track.js")))
expected_contents = ''
with open(os.path.join(srcdir, "tracks/timeline_track.js")) as f:
expected_contents = f.read()
self.assertEquals(contents, expected_contents)
class CalcLoadSequenceTest(unittest.TestCase):
def test_one_toplevel_nodeps(self):
load_sequence = parse_deps.calc_load_sequence(
[os.path.join(srcdir, "unittest.js")], srcdir)
name_sequence = [x.name for x in load_sequence]
self.assertEquals(["unittest"], name_sequence)
# Tests that we resolve deps between toplevels.
def test_calc_load_sequence_two_toplevels(self):
pass
if __name__ == "__main__":
unittest.main()
| |
#!/usr/bin/env python
#
# $Id: _psbsd.py 1498 2012-07-24 21:41:28Z g.rodola $
#
# Copyright (c) 2009, Jay Loden, Giampaolo Rodola'. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""FreeBSD platform implementation."""
import errno
import os
import sys
import _psutil_bsd
import _psutil_posix
from psutil import _psposix
from psutil.error import AccessDenied, NoSuchProcess, TimeoutExpired
from psutil._compat import namedtuple
from psutil._common import *
__extra__all__ = []
# --- constants
NUM_CPUS = _psutil_bsd.get_num_cpus()
BOOT_TIME = _psutil_bsd.get_system_boot_time()
TOTAL_PHYMEM = _psutil_bsd.get_virtual_mem()[0]
_TERMINAL_MAP = _psposix._get_terminal_map()
_PAGESIZE = os.sysconf("SC_PAGE_SIZE")
_cputimes_ntuple = namedtuple('cputimes', 'user nice system idle irq')
# --- public functions
nt_virtmem_info = namedtuple('vmem', ' '.join([
# all platforms
'total', 'available', 'percent', 'used', 'free',
# FreeBSD specific
'active',
'inactive',
'buffers',
'cached',
'shared',
'wired']))
def virtual_memory():
"""System virtual memory as a namedutple."""
mem = _psutil_bsd.get_virtual_mem()
total, free, active, inactive, wired, cached, buffers, shared = mem
avail = inactive + cached + free
used = active + wired + cached
percent = usage_percent((total - avail), total, _round=1)
return nt_virtmem_info(total, avail, percent, used, free,
active, inactive, buffers, cached, shared, wired)
def swap_memory():
"""System swap memory as (total, used, free, sin, sout) namedtuple."""
total, used, free, sin, sout = \
[x * _PAGESIZE for x in _psutil_bsd.get_swap_mem()]
percent = usage_percent(used, total, _round=1)
return nt_swapmeminfo(total, used, free, percent, sin, sout)
def get_system_cpu_times():
"""Return system per-CPU times as a named tuple"""
user, nice, system, idle, irq = _psutil_bsd.get_system_cpu_times()
return _cputimes_ntuple(user, nice, system, idle, irq)
def get_system_per_cpu_times():
"""Return system CPU times as a named tuple"""
ret = []
for cpu_t in _psutil_bsd.get_system_per_cpu_times():
user, nice, system, idle, irq = cpu_t
item = _cputimes_ntuple(user, nice, system, idle, irq)
ret.append(item)
return ret
# XXX
# Ok, this is very dirty.
# On FreeBSD < 8 we cannot gather per-cpu information, see:
# http://code.google.com/p/psutil/issues/detail?id=226
# If NUM_CPUS > 1, on first call we return single cpu times to avoid a
# crash at psutil import time.
# Next calls will fail with NotImplementedError
if not hasattr(_psutil_bsd, "get_system_per_cpu_times"):
def get_system_per_cpu_times():
if NUM_CPUS == 1:
return [get_system_cpu_times]
if get_system_per_cpu_times.__called__:
raise NotImplementedError("supported only starting from FreeBSD 8")
get_system_per_cpu_times.__called__ = True
return [get_system_cpu_times]
get_system_per_cpu_times.__called__ = False
def disk_partitions(all=False):
retlist = []
partitions = _psutil_bsd.get_disk_partitions()
for partition in partitions:
device, mountpoint, fstype, opts = partition
if device == 'none':
device = ''
if not all:
if not os.path.isabs(device) \
or not os.path.exists(device):
continue
ntuple = nt_partition(device, mountpoint, fstype, opts)
retlist.append(ntuple)
return retlist
def get_system_users():
retlist = []
rawlist = _psutil_bsd.get_system_users()
for item in rawlist:
user, tty, hostname, tstamp = item
if tty == '~':
continue # reboot or shutdown
nt = nt_user(user, tty or None, hostname, tstamp)
retlist.append(nt)
return retlist
get_pid_list = _psutil_bsd.get_pid_list
pid_exists = _psposix.pid_exists
get_disk_usage = _psposix.get_disk_usage
network_io_counters = _psutil_bsd.get_network_io_counters
disk_io_counters = _psutil_bsd.get_disk_io_counters
def wrap_exceptions(method):
"""Call method(self, pid) into a try/except clause so that if an
OSError "No such process" exception is raised we assume the process
has died and raise psutil.NoSuchProcess instead.
"""
def wrapper(self, *args, **kwargs):
try:
return method(self, *args, **kwargs)
except OSError:
err = sys.exc_info()[1]
if err.errno == errno.ESRCH:
raise NoSuchProcess(self.pid, self._process_name)
if err.errno in (errno.EPERM, errno.EACCES):
raise AccessDenied(self.pid, self._process_name)
raise
return wrapper
_status_map = {
_psutil_bsd.SSTOP : STATUS_STOPPED,
_psutil_bsd.SSLEEP : STATUS_SLEEPING,
_psutil_bsd.SRUN : STATUS_RUNNING,
_psutil_bsd.SIDL : STATUS_IDLE,
_psutil_bsd.SWAIT : STATUS_WAITING,
_psutil_bsd.SLOCK : STATUS_LOCKED,
_psutil_bsd.SZOMB : STATUS_ZOMBIE,
}
class Process(object):
"""Wrapper class around underlying C implementation."""
__slots__ = ["pid", "_process_name"]
def __init__(self, pid):
self.pid = pid
self._process_name = None
@wrap_exceptions
def get_process_name(self):
"""Return process name as a string of limited len (15)."""
return _psutil_bsd.get_process_name(self.pid)
@wrap_exceptions
def get_process_exe(self):
"""Return process executable pathname."""
return _psutil_bsd.get_process_exe(self.pid)
@wrap_exceptions
def get_process_cmdline(self):
"""Return process cmdline as a list of arguments."""
return _psutil_bsd.get_process_cmdline(self.pid)
@wrap_exceptions
def get_process_terminal(self):
tty_nr = _psutil_bsd.get_process_tty_nr(self.pid)
try:
return _TERMINAL_MAP[tty_nr]
except KeyError:
return None
@wrap_exceptions
def get_process_ppid(self):
"""Return process parent pid."""
return _psutil_bsd.get_process_ppid(self.pid)
# XXX - available on FreeBSD >= 8 only
if hasattr(_psutil_bsd, "get_process_cwd"):
@wrap_exceptions
def get_process_cwd(self):
"""Return process current working directory."""
# sometimes we get an empty string, in which case we turn
# it into None
return _psutil_bsd.get_process_cwd(self.pid) or None
@wrap_exceptions
def get_process_uids(self):
"""Return real, effective and saved user ids."""
real, effective, saved = _psutil_bsd.get_process_uids(self.pid)
return nt_uids(real, effective, saved)
@wrap_exceptions
def get_process_gids(self):
"""Return real, effective and saved group ids."""
real, effective, saved = _psutil_bsd.get_process_gids(self.pid)
return nt_gids(real, effective, saved)
@wrap_exceptions
def get_cpu_times(self):
"""return a tuple containing process user/kernel time."""
user, system = _psutil_bsd.get_process_cpu_times(self.pid)
return nt_cputimes(user, system)
@wrap_exceptions
def get_memory_info(self):
"""Return a tuple with the process' RSS and VMS size."""
rss, vms = _psutil_bsd.get_process_memory_info(self.pid)[:2]
return nt_meminfo(rss, vms)
_nt_ext_mem = namedtuple('meminfo', 'rss vms text data stack')
@wrap_exceptions
def get_ext_memory_info(self):
return self._nt_ext_mem(*_psutil_bsd.get_process_memory_info(self.pid))
@wrap_exceptions
def get_process_create_time(self):
"""Return the start time of the process as a number of seconds since
the epoch."""
return _psutil_bsd.get_process_create_time(self.pid)
@wrap_exceptions
def get_process_num_threads(self):
"""Return the number of threads belonging to the process."""
return _psutil_bsd.get_process_num_threads(self.pid)
@wrap_exceptions
def get_num_ctx_switches(self):
return nt_ctxsw(*_psutil_bsd.get_process_num_ctx_switches(self.pid))
@wrap_exceptions
def get_num_fds(self):
"""Return the number of file descriptors opened by this process."""
return _psutil_bsd.get_process_num_fds(self.pid)
@wrap_exceptions
def get_process_threads(self):
"""Return the number of threads belonging to the process."""
rawlist = _psutil_bsd.get_process_threads(self.pid)
retlist = []
for thread_id, utime, stime in rawlist:
ntuple = nt_thread(thread_id, utime, stime)
retlist.append(ntuple)
return retlist
@wrap_exceptions
def get_open_files(self):
"""Return files opened by process as a list of namedtuples."""
# XXX - C implementation available on FreeBSD >= 8 only
# else fallback on lsof parser
if hasattr(_psutil_bsd, "get_process_open_files"):
rawlist = _psutil_bsd.get_process_open_files(self.pid)
return [nt_openfile(path, fd) for path, fd in rawlist]
else:
lsof = _psposix.LsofParser(self.pid, self._process_name)
return lsof.get_process_open_files()
@wrap_exceptions
def get_connections(self, kind='inet'):
"""Return etwork connections opened by a process as a list of
namedtuples.
"""
if kind not in conn_tmap:
raise ValueError("invalid %r kind argument; choose between %s"
% (kind, ', '.join([repr(x) for x in conn_tmap])))
families, types = conn_tmap[kind]
ret = _psutil_bsd.get_process_connections(self.pid, families, types)
return [nt_connection(*conn) for conn in ret]
@wrap_exceptions
def process_wait(self, timeout=None):
try:
return _psposix.wait_pid(self.pid, timeout)
except TimeoutExpired:
raise TimeoutExpired(self.pid, self._process_name)
@wrap_exceptions
def get_process_nice(self):
return _psutil_posix.getpriority(self.pid)
@wrap_exceptions
def set_process_nice(self, value):
return _psutil_posix.setpriority(self.pid, value)
@wrap_exceptions
def get_process_status(self):
code = _psutil_bsd.get_process_status(self.pid)
if code in _status_map:
return _status_map[code]
return constant(-1, "?")
@wrap_exceptions
def get_process_io_counters(self):
rc, wc, rb, wb = _psutil_bsd.get_process_io_counters(self.pid)
return nt_io(rc, wc, rb, wb)
nt_mmap_grouped = namedtuple('mmap',
'path rss, private, ref_count, shadow_count')
nt_mmap_ext = namedtuple('mmap',
'addr, perms path rss, private, ref_count, shadow_count')
@wrap_exceptions
def get_memory_maps(self):
return _psutil_bsd.get_process_memory_maps(self.pid)
# FreeBSD < 8 does not support kinfo_getfile() and kinfo_getvmmap()
if not hasattr(_psutil_bsd, 'get_process_open_files'):
def _not_implemented(self):
raise NotImplementedError("supported only starting from FreeBSD 8")
get_open_files = _not_implemented
get_process_cwd = _not_implemented
get_memory_maps = _not_implemented
get_num_fds = _not_implemented
| |
# -*- coding: utf-8 -*-
"""
The MIT License (MIT)
Copyright (c) 2015-2017 Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
import asyncio
import inspect
import discord
import functools
from .errors import *
from .cooldowns import Cooldown, BucketType, CooldownMapping
from .view import quoted_word
from . import converter as converters
__all__ = [ 'Command', 'Group', 'GroupMixin', 'command', 'group',
'has_role', 'has_permissions', 'has_any_role', 'check',
'bot_has_role', 'bot_has_permissions', 'bot_has_any_role',
'cooldown', 'guild_only', 'is_owner', 'is_nsfw', ]
def wrap_callback(coro):
@functools.wraps(coro)
@asyncio.coroutine
def wrapped(*args, **kwargs):
try:
ret = yield from coro(*args, **kwargs)
except CommandError:
raise
except asyncio.CancelledError:
return
except Exception as e:
raise CommandInvokeError(e) from e
return ret
return wrapped
def hooked_wrapped_callback(command, ctx, coro):
@functools.wraps(coro)
@asyncio.coroutine
def wrapped(*args, **kwargs):
try:
ret = yield from coro(*args, **kwargs)
except CommandError:
ctx.command_failed = True
raise
except asyncio.CancelledError:
ctx.command_failed = True
return
except Exception as e:
ctx.command_failed = True
raise CommandInvokeError(e) from e
finally:
yield from command.call_after_hooks(ctx)
return ret
return wrapped
def _convert_to_bool(argument):
lowered = argument.lower()
if lowered in ('yes', 'y', 'true', 't', '1', 'enable', 'on'):
return True
elif lowered in ('no', 'n', 'false', 'f', '0', 'disable', 'off'):
return False
else:
raise BadArgument(lowered + ' is not a recognised boolean option')
class Command:
"""A class that implements the protocol for a bot text command.
These are not created manually, instead they are created via the
decorator or functional interface.
Attributes
-----------
name: str
The name of the command.
callback: coroutine
The coroutine that is executed when the command is called.
help: str
The long help text for the command.
brief: str
The short help text for the command. If this is not specified
then the first line of the long help text is used instead.
usage: str
A replacement for arguments in the default help text.
aliases: list
The list of aliases the command can be invoked under.
enabled: bool
A boolean that indicates if the command is currently enabled.
If the command is invoked while it is disabled, then
:exc:`.DisabledCommand` is raised to the :func:`.on_command_error`
event. Defaults to ``True``.
parent: Optional[command]
The parent command that this command belongs to. ``None`` is there
isn't one.
checks
A list of predicates that verifies if the command could be executed
with the given :class:`.Context` as the sole parameter. If an exception
is necessary to be thrown to signal failure, then one derived from
:exc:`.CommandError` should be used. Note that if the checks fail then
:exc:`.CheckFailure` exception is raised to the :func:`.on_command_error`
event.
description: str
The message prefixed into the default help command.
hidden: bool
If ``True``\, the default help command does not show this in the
help output.
rest_is_raw: bool
If ``False`` and a keyword-only argument is provided then the keyword
only argument is stripped and handled as if it was a regular argument
that handles :exc:`.MissingRequiredArgument` and default values in a
regular matter rather than passing the rest completely raw. If ``True``
then the keyword-only argument will pass in the rest of the arguments
in a completely raw matter. Defaults to ``False``.
ignore_extra: bool
If ``True``\, ignores extraneous strings passed to a command if all its
requirements are met (e.g. ``?foo a b c`` when only expecting ``a``
and ``b``). Otherwise :func:`.on_command_error` and local error handlers
are called with :exc:`.TooManyArguments`. Defaults to ``True``.
"""
def __init__(self, name, callback, **kwargs):
self.name = name
if not isinstance(name, str):
raise TypeError('Name of a command must be a string.')
self.callback = callback
self.enabled = kwargs.get('enabled', True)
self.help = kwargs.get('help')
self.brief = kwargs.get('brief')
self.usage = kwargs.get('usage')
self.rest_is_raw = kwargs.get('rest_is_raw', False)
self.aliases = kwargs.get('aliases', [])
self.description = inspect.cleandoc(kwargs.get('description', ''))
self.hidden = kwargs.get('hidden', False)
signature = inspect.signature(callback)
self.params = signature.parameters.copy()
self.checks = kwargs.get('checks', [])
self.module = callback.__module__
self.ignore_extra = kwargs.get('ignore_extra', True)
self.instance = None
self.parent = None
self._buckets = CooldownMapping(kwargs.get('cooldown'))
self._before_invoke = None
self._after_invoke = None
@asyncio.coroutine
def dispatch_error(self, ctx, error):
ctx.command_failed = True
cog = self.instance
try:
coro = self.on_error
except AttributeError:
pass
else:
injected = wrap_callback(coro)
if cog is not None:
yield from injected(cog, ctx, error)
else:
yield from injected(ctx, error)
try:
local = getattr(cog, '_{0.__class__.__name__}__error'.format(cog))
except AttributeError:
pass
else:
wrapped = wrap_callback(local)
yield from wrapped(ctx, error)
finally:
ctx.bot.dispatch('command_error', ctx, error)
def __get__(self, instance, owner):
if instance is not None:
self.instance = instance
return self
@asyncio.coroutine
def do_conversion(self, ctx, converter, argument):
if converter is bool:
return _convert_to_bool(argument)
if converter.__module__.startswith('discord.') and not converter.__module__.endswith('converter'):
converter = getattr(converters, converter.__name__ + 'Converter')
if inspect.isclass(converter):
if issubclass(converter, converters.Converter):
instance = converter()
ret = yield from instance.convert(ctx, argument)
return ret
else:
method = getattr(converter, 'convert', None)
if method is not None and inspect.ismethod(method):
ret = yield from method(ctx, argument)
return ret
elif isinstance(converter, converters.Converter):
ret = yield from converter.convert(ctx, argument)
return ret
return converter(argument)
def _get_converter(self, param):
converter = param.annotation
if converter is param.empty:
if param.default is not param.empty:
converter = str if param.default is None else type(param.default)
else:
converter = str
return converter
@asyncio.coroutine
def transform(self, ctx, param):
required = param.default is param.empty
converter = self._get_converter(param)
consume_rest_is_special = param.kind == param.KEYWORD_ONLY and not self.rest_is_raw
view = ctx.view
view.skip_ws()
if view.eof:
if param.kind == param.VAR_POSITIONAL:
raise RuntimeError() # break the loop
if required:
raise MissingRequiredArgument(param)
return param.default
if consume_rest_is_special:
argument = view.read_rest().strip()
else:
argument = quoted_word(view)
try:
return (yield from self.do_conversion(ctx, converter, argument))
except CommandError as e:
raise e
except Exception as e:
try:
name = converter.__name__
except AttributeError:
name = converter.__class__.__name__
raise BadArgument('Converting to "{}" failed for parameter "{}".'.format(name, param.name)) from e
@property
def clean_params(self):
"""Retrieves the parameter OrderedDict without the context or self parameters.
Useful for inspecting signature.
"""
result = self.params.copy()
if self.instance is not None:
# first parameter is self
result.popitem(last=False)
try:
# first/second parameter is context
result.popitem(last=False)
except Exception as e:
raise ValueError('Missing context parameter') from None
return result
@property
def full_parent_name(self):
"""Retrieves the fully qualified parent command name.
This the base command name required to execute it. For example,
in ``?one two three`` the parent name would be ``one two``.
"""
entries = []
command = self
while command.parent is not None:
command = command.parent
entries.append(command.name)
return ' '.join(reversed(entries))
@property
def root_parent(self):
"""Retrieves the root parent of this command.
If the command has no parents then it returns ``None``.
For example in commands ``?a b c test``, the root parent is
``a``.
"""
entries = []
command = self
while command.parent is not None:
command = command.parent
entries.append(command)
if len(entries) == 0:
return None
return entries[-1]
@property
def qualified_name(self):
"""Retrieves the fully qualified command name.
This is the full parent name with the command name as well.
For example, in ``?one two three`` the qualified name would be
``one two three``.
"""
parent = self.full_parent_name
if parent:
return parent + ' ' + self.name
else:
return self.name
def __str__(self):
return self.qualified_name
@asyncio.coroutine
def _parse_arguments(self, ctx):
ctx.args = [ctx] if self.instance is None else [self.instance, ctx]
ctx.kwargs = {}
args = ctx.args
kwargs = ctx.kwargs
view = ctx.view
iterator = iter(self.params.items())
if self.instance is not None:
# we have 'self' as the first parameter so just advance
# the iterator and resume parsing
try:
next(iterator)
except StopIteration:
fmt = 'Callback for {0.name} command is missing "self" parameter.'
raise discord.ClientException(fmt.format(self))
# next we have the 'ctx' as the next parameter
try:
next(iterator)
except StopIteration:
fmt = 'Callback for {0.name} command is missing "ctx" parameter.'
raise discord.ClientException(fmt.format(self))
for name, param in iterator:
if param.kind == param.POSITIONAL_OR_KEYWORD:
transformed = yield from self.transform(ctx, param)
args.append(transformed)
elif param.kind == param.KEYWORD_ONLY:
# kwarg only param denotes "consume rest" semantics
if self.rest_is_raw:
converter = self._get_converter(param)
argument = view.read_rest()
kwargs[name] = yield from self.do_conversion(ctx, converter, argument)
else:
kwargs[name] = yield from self.transform(ctx, param)
break
elif param.kind == param.VAR_POSITIONAL:
while not view.eof:
try:
transformed = yield from self.transform(ctx, param)
args.append(transformed)
except RuntimeError:
break
if not self.ignore_extra:
if not view.eof:
raise TooManyArguments('Too many arguments passed to ' + self.qualified_name)
@asyncio.coroutine
def _verify_checks(self, ctx):
if not self.enabled:
raise DisabledCommand('{0.name} command is disabled'.format(self))
if not (yield from self.can_run(ctx)):
raise CheckFailure('The check functions for command {0.qualified_name} failed.'.format(self))
@asyncio.coroutine
def call_before_hooks(self, ctx):
# now that we're done preparing we can call the pre-command hooks
# first, call the command local hook:
cog = self.instance
if self._before_invoke is not None:
if cog is None:
yield from self._before_invoke(ctx)
else:
yield from self._before_invoke(cog, ctx)
# call the cog local hook if applicable:
try:
hook = getattr(cog, '_{0.__class__.__name__}__before_invoke'.format(cog))
except AttributeError:
pass
else:
yield from hook(ctx)
# call the bot global hook if necessary
hook = ctx.bot._before_invoke
if hook is not None:
yield from hook(ctx)
@asyncio.coroutine
def call_after_hooks(self, ctx):
cog = self.instance
if self._after_invoke is not None:
if cog is None:
yield from self._after_invoke(ctx)
else:
yield from self._after_invoke(cog, ctx)
try:
hook = getattr(cog, '_{0.__class__.__name__}__after_invoke'.format(cog))
except AttributeError:
pass
else:
yield from hook(ctx)
hook = ctx.bot._after_invoke
if hook is not None:
yield from hook(ctx)
@asyncio.coroutine
def prepare(self, ctx):
ctx.command = self
yield from self._verify_checks(ctx)
if self._buckets.valid:
bucket = self._buckets.get_bucket(ctx)
retry_after = bucket.is_rate_limited()
if retry_after:
raise CommandOnCooldown(bucket, retry_after)
yield from self._parse_arguments(ctx)
yield from self.call_before_hooks(ctx)
def reset_cooldown(self, ctx):
"""Resets the cooldown on this command.
Parameters
-----------
ctx: :class:`.Context`
The invocation context to reset the cooldown under.
"""
if self._buckets.valid:
bucket = self._buckets.get_bucket(ctx)
bucket.reset()
@asyncio.coroutine
def invoke(self, ctx):
yield from self.prepare(ctx)
# terminate the invoked_subcommand chain.
# since we're in a regular command (and not a group) then
# the invoked subcommand is None.
ctx.invoked_subcommand = None
injected = hooked_wrapped_callback(self, ctx, self.callback)
yield from injected(*ctx.args, **ctx.kwargs)
@asyncio.coroutine
def reinvoke(self, ctx, *, call_hooks=False):
ctx.command = self
yield from self._parse_arguments(ctx)
if call_hooks:
yield from self.call_before_hooks(ctx)
ctx.invoked_subcommand = None
try:
yield from self.callback(*ctx.args, **ctx.kwargs)
except:
ctx.command_failed = True
raise
finally:
if call_hooks:
yield from self.call_after_hooks(ctx)
def error(self, coro):
"""A decorator that registers a coroutine as a local error handler.
A local error handler is an :func:`.on_command_error` event limited to
a single command. However, the :func:`.on_command_error` is still
invoked afterwards as the catch-all.
Parameters
-----------
coro
The coroutine to register as the local error handler.
Raises
-------
discord.ClientException
The coroutine is not actually a coroutine.
"""
if not asyncio.iscoroutinefunction(coro):
raise discord.ClientException('The error handler must be a coroutine.')
self.on_error = coro
return coro
def before_invoke(self, coro):
"""A decorator that registers a coroutine as a pre-invoke hook.
A pre-invoke hook is called directly before the command is
called. This makes it a useful function to set up database
connections or any type of set up required.
This pre-invoke hook takes a sole parameter, a :class:`.Context`.
See :meth:`.Bot.before_invoke` for more info.
Parameters
-----------
coro
The coroutine to register as the pre-invoke hook.
Raises
-------
:exc:`.ClientException`
The coroutine is not actually a coroutine.
"""
if not asyncio.iscoroutinefunction(coro):
raise discord.ClientException('The error handler must be a coroutine.')
self._before_invoke = coro
return coro
def after_invoke(self, coro):
"""A decorator that registers a coroutine as a post-invoke hook.
A post-invoke hook is called directly after the command is
called. This makes it a useful function to clean-up database
connections or any type of clean up required.
This post-invoke hook takes a sole parameter, a :class:`.Context`.
See :meth:`.Bot.after_invoke` for more info.
Parameters
-----------
coro
The coroutine to register as the post-invoke hook.
Raises
-------
:exc:`.ClientException`
The coroutine is not actually a coroutine.
"""
if not asyncio.iscoroutinefunction(coro):
raise discord.ClientException('The error handler must be a coroutine.')
self._after_invoke = coro
return coro
@property
def cog_name(self):
"""The name of the cog this command belongs to. None otherwise."""
return type(self.instance).__name__ if self.instance is not None else None
@property
def short_doc(self):
"""Gets the "short" documentation of a command.
By default, this is the :attr:`brief` attribute.
If that lookup leads to an empty string then the first line of the
:attr:`help` attribute is used instead.
"""
if self.brief:
return self.brief
if self.help:
return self.help.split('\n', 1)[0]
return ''
@property
def signature(self):
"""Returns a POSIX-like signature useful for help command output."""
result = []
parent = self.full_parent_name
if len(self.aliases) > 0:
aliases = '|'.join(self.aliases)
fmt = '[%s|%s]' % (self.name, aliases)
if parent:
fmt = parent + ' ' + fmt
result.append(fmt)
else:
name = self.name if not parent else parent + ' ' + self.name
result.append(name)
if self.usage:
result.append(self.usage)
return ' '.join(result)
params = self.clean_params
if not params:
return ' '.join(result)
for name, param in params.items():
if param.default is not param.empty:
# We don't want None or '' to trigger the [name=value] case and instead it should
# do [name] since [name=None] or [name=] are not exactly useful for the user.
should_print = param.default if isinstance(param.default, str) else param.default is not None
if should_print:
result.append('[%s=%s]' % (name, param.default))
else:
result.append('[%s]' % name)
elif param.kind == param.VAR_POSITIONAL:
result.append('[%s...]' % name)
else:
result.append('<%s>' % name)
return ' '.join(result)
@asyncio.coroutine
def can_run(self, ctx):
"""|coro|
Checks if the command can be executed by checking all the predicates
inside the :attr:`.checks` attribute.
Parameters
-----------
ctx: :class:`.Context`
The ctx of the command currently being invoked.
Returns
--------
bool
A boolean indicating if the command can be invoked.
"""
original = ctx.command
ctx.command = self
try:
if not (yield from ctx.bot.can_run(ctx)):
raise CheckFailure('The global check functions for command {0.qualified_name} failed.'.format(self))
cog = self.instance
if cog is not None:
try:
local_check = getattr(cog, '_{0.__class__.__name__}__local_check'.format(cog))
except AttributeError:
pass
else:
ret = yield from discord.utils.maybe_coroutine(local_check, ctx)
if not ret:
return False
predicates = self.checks
if not predicates:
# since we have no checks, then we just return True.
return True
return (yield from discord.utils.async_all(predicate(ctx) for predicate in predicates))
finally:
ctx.command = original
class GroupMixin:
"""A mixin that implements common functionality for classes that behave
similar to :class:`.Group` and are allowed to register commands.
Attributes
-----------
all_commands: dict
A mapping of command name to :class:`.Command` or superclass
objects.
"""
def __init__(self, **kwargs):
self.all_commands = {}
super().__init__(**kwargs)
@property
def commands(self):
"""Set[:class:`.Command`]: A unique set of commands without aliases that are registered."""
return set(self.all_commands.values())
def recursively_remove_all_commands(self):
for command in self.all_commands.copy().values():
if isinstance(command, GroupMixin):
command.recursively_remove_all_commands()
self.remove_command(command.name)
def add_command(self, command):
"""Adds a :class:`.Command` or its superclasses into the internal list
of commands.
This is usually not called, instead the :meth:`~.GroupMixin.command` or
:meth:`~.GroupMixin.group` shortcut decorators are used instead.
Parameters
-----------
command
The command to add.
Raises
-------
:exc:`.ClientException`
If the command is already registered.
TypeError
If the command passed is not a subclass of :class:`.Command`.
"""
if not isinstance(command, Command):
raise TypeError('The command passed must be a subclass of Command')
if isinstance(self, Command):
command.parent = self
if command.name in self.all_commands:
raise discord.ClientException('Command {0.name} is already registered.'.format(command))
self.all_commands[command.name] = command
for alias in command.aliases:
if alias in self.all_commands:
raise discord.ClientException('The alias {} is already an existing command or alias.'.format(alias))
self.all_commands[alias] = command
def remove_command(self, name):
"""Remove a :class:`.Command` or subclasses from the internal list
of commands.
This could also be used as a way to remove aliases.
Parameters
-----------
name: str
The name of the command to remove.
Returns
--------
:class:`.Command` or subclass
The command that was removed. If the name is not valid then
`None` is returned instead.
"""
command = self.all_commands.pop(name, None)
# does not exist
if command is None:
return None
if name in command.aliases:
# we're removing an alias so we don't want to remove the rest
return command
# we're not removing the alias so let's delete the rest of them.
for alias in command.aliases:
self.all_commands.pop(alias, None)
return command
def walk_commands(self):
"""An iterator that recursively walks through all commands and subcommands."""
for command in tuple(self.all_commands.values()):
yield command
if isinstance(command, GroupMixin):
yield from command.walk_commands()
def get_command(self, name):
"""Get a :class:`.Command` or subclasses from the internal list
of commands.
This could also be used as a way to get aliases.
The name could be fully qualified (e.g. ``'foo bar'``) will get
the subcommand ``bar`` of the group command ``foo``. If a
subcommand is not found then ``None`` is returned just as usual.
Parameters
-----------
name: str
The name of the command to get.
Returns
--------
Command or subclass
The command that was requested. If not found, returns ``None``.
"""
names = name.split()
obj = self.all_commands.get(names[0])
if not isinstance(obj, GroupMixin):
return obj
for name in names[1:]:
try:
obj = obj.all_commands[name]
except (AttributeError, KeyError):
return None
return obj
def command(self, *args, **kwargs):
"""A shortcut decorator that invokes :func:`.command` and adds it to
the internal command list via :meth:`~.GroupMixin.add_command`.
"""
def decorator(func):
result = command(*args, **kwargs)(func)
self.add_command(result)
return result
return decorator
def group(self, *args, **kwargs):
"""A shortcut decorator that invokes :func:`.group` and adds it to
the internal command list via :meth:`~.GroupMixin.add_command`.
"""
def decorator(func):
result = group(*args, **kwargs)(func)
self.add_command(result)
return result
return decorator
class Group(GroupMixin, Command):
"""A class that implements a grouping protocol for commands to be
executed as subcommands.
This class is a subclass of :class:`.Command` and thus all options
valid in :class:`.Command` are valid in here as well.
Attributes
-----------
invoke_without_command: bool
Indicates if the group callback should begin parsing and
invocation only if no subcommand was found. Useful for
making it an error handling function to tell the user that
no subcommand was found or to have different functionality
in case no subcommand was found. If this is ``False``, then
the group callback will always be invoked first. This means
that the checks and the parsing dictated by its parameters
will be executed. Defaults to ``False``.
"""
def __init__(self, **attrs):
self.invoke_without_command = attrs.pop('invoke_without_command', False)
super().__init__(**attrs)
@asyncio.coroutine
def invoke(self, ctx):
early_invoke = not self.invoke_without_command
if early_invoke:
yield from self.prepare(ctx)
view = ctx.view
previous = view.index
view.skip_ws()
trigger = view.get_word()
if trigger:
ctx.subcommand_passed = trigger
ctx.invoked_subcommand = self.all_commands.get(trigger, None)
if early_invoke:
injected = hooked_wrapped_callback(self, ctx, self.callback)
yield from injected(*ctx.args, **ctx.kwargs)
if trigger and ctx.invoked_subcommand:
ctx.invoked_with = trigger
yield from ctx.invoked_subcommand.invoke(ctx)
elif not early_invoke:
# undo the trigger parsing
view.index = previous
view.previous = previous
yield from super().invoke(ctx)
@asyncio.coroutine
def reinvoke(self, ctx, *, call_hooks=False):
early_invoke = not self.invoke_without_command
if early_invoke:
ctx.command = self
yield from self._parse_arguments(ctx)
if call_hooks:
yield from self.call_before_hooks(ctx)
view = ctx.view
previous = view.index
view.skip_ws()
trigger = view.get_word()
if trigger:
ctx.subcommand_passed = trigger
ctx.invoked_subcommand = self.all_commands.get(trigger, None)
if early_invoke:
try:
yield from self.callback(*ctx.args, **ctx.kwargs)
except:
ctx.command_failed = True
raise
finally:
if call_hooks:
yield from self.call_after_hooks(ctx)
if trigger and ctx.invoked_subcommand:
ctx.invoked_with = trigger
yield from ctx.invoked_subcommand.reinvoke(ctx, call_hooks=call_hooks)
elif not early_invoke:
# undo the trigger parsing
view.index = previous
view.previous = previous
yield from super().reinvoke(ctx, call_hooks=call_hooks)
# Decorators
def command(name=None, cls=None, **attrs):
"""A decorator that transforms a function into a :class:`.Command`
or if called with :func:`.group`, :class:`.Group`.
By default the ``help`` attribute is received automatically from the
docstring of the function and is cleaned up with the use of
``inspect.cleandoc``. If the docstring is ``bytes``, then it is decoded
into ``str`` using utf-8 encoding.
All checks added using the :func:`.check` & co. decorators are added into
the function. There is no way to supply your own checks through this
decorator.
Parameters
-----------
name: str
The name to create the command with. By default this uses the
function name unchanged.
cls
The class to construct with. By default this is :class:`.Command`.
You usually do not change this.
attrs
Keyword arguments to pass into the construction of the class denoted
by ``cls``.
Raises
-------
TypeError
If the function is not a coroutine or is already a command.
"""
if cls is None:
cls = Command
def decorator(func):
if isinstance(func, Command):
raise TypeError('Callback is already a command.')
if not asyncio.iscoroutinefunction(func):
raise TypeError('Callback must be a coroutine.')
try:
checks = func.__commands_checks__
checks.reverse()
del func.__commands_checks__
except AttributeError:
checks = []
try:
cooldown = func.__commands_cooldown__
del func.__commands_cooldown__
except AttributeError:
cooldown = None
help_doc = attrs.get('help')
if help_doc is not None:
help_doc = inspect.cleandoc(help_doc)
else:
help_doc = inspect.getdoc(func)
if isinstance(help_doc, bytes):
help_doc = help_doc.decode('utf-8')
attrs['help'] = help_doc
fname = name or func.__name__
return cls(name=fname, callback=func, checks=checks, cooldown=cooldown, **attrs)
return decorator
def group(name=None, **attrs):
"""A decorator that transforms a function into a :class:`.Group`.
This is similar to the :func:`.command` decorator but creates a
:class:`.Group` instead of a :class:`.Command`.
"""
return command(name=name, cls=Group, **attrs)
def check(predicate):
"""A decorator that adds a check to the :class:`.Command` or its
subclasses. These checks could be accessed via :attr:`.Command.checks`.
These checks should be predicates that take in a single parameter taking
a :class:`.Context`. If the check returns a ``False``\-like value then
during invocation a :exc:`.CheckFailure` exception is raised and sent to
the :func:`.on_command_error` event.
If an exception should be thrown in the predicate then it should be a
subclass of :exc:`.CommandError`. Any exception not subclassed from it
will be propagated while those subclassed will be sent to
:func:`.on_command_error`.
.. note::
These functions can either be regular functions or coroutines.
Parameters
-----------
predicate
The predicate to check if the command should be invoked.
Examples
---------
Creating a basic check to see if the command invoker is you.
.. code-block:: python3
def check_if_it_is_me(ctx):
return ctx.message.author.id == 85309593344815104
@bot.command()
@commands.check(check_if_it_is_me)
async def only_for_me(ctx):
await ctx.send('I know you!')
Transforming common checks into its own decorator:
.. code-block:: python3
def is_me():
def predicate(ctx):
return ctx.message.author.id == 85309593344815104
return commands.check(predicate)
@bot.command()
@is_me()
async def only_me(ctx):
await ctx.send('Only you!')
"""
def decorator(func):
if isinstance(func, Command):
func.checks.append(predicate)
else:
if not hasattr(func, '__commands_checks__'):
func.__commands_checks__ = []
func.__commands_checks__.append(predicate)
return func
return decorator
def has_role(name):
"""A :func:`.check` that is added that checks if the member invoking the
command has the role specified via the name specified.
The name is case sensitive and must be exact. No normalisation is done in
the input.
If the message is invoked in a private message context then the check will
return ``False``.
Parameters
-----------
name: str
The name of the role to check.
"""
def predicate(ctx):
if not isinstance(ctx.channel, discord.abc.GuildChannel):
return False
role = discord.utils.get(ctx.author.roles, name=name)
return role is not None
return check(predicate)
def has_any_role(*names):
"""A :func:`.check` that is added that checks if the member invoking the
command has **any** of the roles specified. This means that if they have
one out of the three roles specified, then this check will return `True`.
Similar to :func:`.has_role`\, the names passed in must be exact.
Parameters
-----------
names
An argument list of names to check that the member has roles wise.
Example
--------
.. code-block:: python3
@bot.command()
@commands.has_any_role('Library Devs', 'Moderators')
async def cool(ctx):
await ctx.send('You are cool indeed')
"""
def predicate(ctx):
if not isinstance(ctx.channel, discord.abc.GuildChannel):
return False
getter = functools.partial(discord.utils.get, ctx.author.roles)
return any(getter(name=name) is not None for name in names)
return check(predicate)
def has_permissions(**perms):
"""A :func:`.check` that is added that checks if the member has any of
the permissions necessary.
The permissions passed in must be exactly like the properties shown under
:class:`.discord.Permissions`.
Parameters
------------
perms
An argument list of permissions to check for.
Example
---------
.. code-block:: python3
@bot.command()
@commands.has_permissions(manage_messages=True)
async def test(ctx):
await ctx.send('You can manage messages.')
"""
def predicate(ctx):
ch = ctx.channel
permissions = ch.permissions_for(ctx.author)
return all(getattr(permissions, perm, None) == value for perm, value in perms.items())
return check(predicate)
def bot_has_role(name):
"""Similar to :func:`.has_role` except checks if the bot itself has the
role.
"""
def predicate(ctx):
ch = ctx.channel
if not isinstance(ch, discord.abc.GuildChannel):
return False
me = ch.guild.me
role = discord.utils.get(me.roles, name=name)
return role is not None
return check(predicate)
def bot_has_any_role(*names):
"""Similar to :func:`.has_any_role` except checks if the bot itself has
any of the roles listed.
"""
def predicate(ctx):
ch = ctx.channel
if not isinstance(ch, discord.abc.GuildChannel):
return False
me = ch.guild.me
getter = functools.partial(discord.utils.get, me.roles)
return any(getter(name=name) is not None for name in names)
return check(predicate)
def bot_has_permissions(**perms):
"""Similar to :func:`.has_permissions` except checks if the bot itself has
the permissions listed.
"""
def predicate(ctx):
guild = ctx.guild
me = guild.me if guild is not None else ctx.bot.user
permissions = ctx.channel.permissions_for(me)
return all(getattr(permissions, perm, None) == value for perm, value in perms.items())
return check(predicate)
def guild_only():
"""A :func:`.check` that indicates this command must only be used in a
guild context only. Basically, no private messages are allowed when
using the command.
This check raises a special exception, :exc:`.NoPrivateMessage`
that is derived from :exc:`.CheckFailure`.
"""
def predicate(ctx):
if ctx.guild is None:
raise NoPrivateMessage('This command cannot be used in private messages.')
return True
return check(predicate)
def is_owner():
"""A :func:`.check` that checks if the person invoking this command is the
owner of the bot.
This is powered by :meth:`.Bot.is_owner`.
This check raises a special exception, :exc:`.NotOwner` that is derived
from :exc:`.CheckFailure`.
"""
@asyncio.coroutine
def predicate(ctx):
if not (yield from ctx.bot.is_owner(ctx.author)):
raise NotOwner('You do not own this bot.')
return True
return check(predicate)
def is_nsfw():
"""A :func:`.check` that checks if the channel is a NSFW channel."""
def pred(ctx):
return isinstance(ctx.channel, discord.TextChannel) and ctx.channel.is_nsfw()
return check(pred)
def cooldown(rate, per, type=BucketType.default):
"""A decorator that adds a cooldown to a :class:`.Command`
or its subclasses.
A cooldown allows a command to only be used a specific amount
of times in a specific time frame. These cooldowns can be based
either on a per-guild, per-channel, per-user, or global basis.
Denoted by the third argument of ``type`` which must be of enum
type ``BucketType`` which could be either:
- ``BucketType.default`` for a global basis.
- ``BucketType.user`` for a per-user basis.
- ``BucketType.guild`` for a per-guild basis.
- ``BucketType.channel`` for a per-channel basis.
If a cooldown is triggered, then :exc:`.CommandOnCooldown` is triggered in
:func:`.on_command_error` and the local error handler.
A command can only have a single cooldown.
Parameters
------------
rate: int
The number of times a command can be used before triggering a cooldown.
per: float
The amount of seconds to wait for a cooldown when it's been triggered.
type: ``BucketType``
The type of cooldown to have.
"""
def decorator(func):
if isinstance(func, Command):
func._buckets = CooldownMapping(Cooldown(rate, per, type))
else:
func.__commands_cooldown__ = Cooldown(rate, per, type)
return func
return decorator
| |
# -*- coding: utf-8 -*-
""" Configuration management """
import sys
from dynamic_dynamodb.config import config_file_parser
from dynamic_dynamodb.config import command_line_parser
try:
from collections import OrderedDict as ordereddict
except ImportError:
from ordereddict import OrderedDict as ordereddict
DEFAULT_OPTIONS = {
'global': {
# Command line only
'config': None,
'daemon': False,
'instance': 'default',
'dry_run': False,
'pid_file_dir': '/tmp',
'run_once': False,
# [global]
'region': 'us-east-1',
'aws_access_key_id': None,
'aws_secret_access_key': None,
'check_interval': 300,
'circuit_breaker_url': None,
'circuit_breaker_timeout': 10000.00
},
'logging': {
# [logging]
'log_file': None,
'log_level': 'info',
'log_config_file': None
},
'table': {
'reads-upper-alarm-threshold': 0,
'reads-lower-alarm-threshold': 0,
'writes-upper-alarm-threshold': 0,
'writes-lower-alarm-threshold': 0,
'enable_reads_autoscaling': True,
'enable_writes_autoscaling': True,
'enable_reads_up_scaling': True,
'enable_reads_down_scaling': True,
'enable_writes_up_scaling': True,
'enable_writes_down_scaling': True,
'reads_lower_threshold': 30,
'reads_upper_threshold': 90,
'throttled_reads_upper_threshold': 0,
'increase_reads_with': 50,
'decrease_reads_with': 50,
'increase_reads_unit': 'percent',
'decrease_reads_unit': 'percent',
'writes_lower_threshold': 30,
'writes_upper_threshold': 90,
'throttled_writes_upper_threshold': 0,
'increase_writes_with': 50,
'decrease_writes_with': 50,
'increase_writes_unit': 'percent',
'decrease_writes_unit': 'percent',
'min_provisioned_reads': None,
'max_provisioned_reads': None,
'min_provisioned_writes': None,
'max_provisioned_writes': None,
'num_read_checks_before_scale_down': 1,
'num_write_checks_before_scale_down': 1,
'num_read_checks_reset_percent': 0,
'num_write_checks_reset_percent': 0,
'allow_scaling_down_reads_on_0_percent': False,
'allow_scaling_down_writes_on_0_percent': False,
'always_decrease_rw_together': False,
'lookback_window_start': 15,
'lookback_period': 5,
'maintenance_windows': None,
'sns_topic_arn': None,
'sns_message_types': [],
'increase_consumed_reads_unit': None,
'increase_consumed_reads_with': None,
'increase_consumed_reads_scale': None,
'increase_consumed_writes_unit': None,
'increase_consumed_writes_with': None,
'increase_consumed_writes_scale': None,
'increase_throttled_by_provisioned_reads_unit': None,
'increase_throttled_by_provisioned_reads_scale': None,
'increase_throttled_by_provisioned_writes_unit': None,
'increase_throttled_by_provisioned_writes_scale': None,
'increase_throttled_by_consumed_reads_unit': None,
'increase_throttled_by_consumed_reads_scale': None,
'increase_throttled_by_consumed_writes_unit': None,
'increase_throttled_by_consumed_writes_scale': None,
'decrease_consumed_reads_unit': None,
'decrease_consumed_reads_with': None,
'decrease_consumed_reads_scale': None,
'decrease_consumed_writes_unit': None,
'decrease_consumed_writes_with': None,
'decrease_consumed_writes_scale': None,
'circuit_breaker_url': None,
'circuit_breaker_timeout': 10000.00
},
'gsi': {
'reads-upper-alarm-threshold': 0,
'reads-lower-alarm-threshold': 0,
'writes-upper-alarm-threshold': 0,
'writes-lower-alarm-threshold': 0,
'enable_reads_autoscaling': True,
'enable_writes_autoscaling': True,
'enable_reads_up_scaling': True,
'enable_reads_down_scaling': True,
'enable_writes_up_scaling': True,
'enable_writes_down_scaling': True,
'reads_lower_threshold': 30,
'reads_upper_threshold': 90,
'throttled_reads_upper_threshold': 0,
'increase_reads_with': 50,
'decrease_reads_with': 50,
'increase_reads_unit': 'percent',
'decrease_reads_unit': 'percent',
'writes_lower_threshold': 30,
'writes_upper_threshold': 90,
'throttled_writes_upper_threshold': 0,
'increase_writes_with': 50,
'decrease_writes_with': 50,
'increase_writes_unit': 'percent',
'decrease_writes_unit': 'percent',
'min_provisioned_reads': None,
'max_provisioned_reads': None,
'min_provisioned_writes': None,
'max_provisioned_writes': None,
'num_read_checks_before_scale_down': 1,
'num_write_checks_before_scale_down': 1,
'num_read_checks_reset_percent': 0,
'num_write_checks_reset_percent': 0,
'allow_scaling_down_reads_on_0_percent': False,
'allow_scaling_down_writes_on_0_percent': False,
'always_decrease_rw_together': False,
'lookback_window_start': 15,
'lookback_period': 5,
'maintenance_windows': None,
'sns_topic_arn': None,
'sns_message_types': [],
'increase_consumed_reads_unit': None,
'increase_consumed_reads_with': None,
'increase_consumed_reads_scale': None,
'increase_consumed_writes_unit': None,
'increase_consumed_writes_with': None,
'increase_consumed_writes_scale': None,
'increase_throttled_by_provisioned_reads_unit': None,
'increase_throttled_by_provisioned_reads_scale': None,
'increase_throttled_by_provisioned_writes_unit': None,
'increase_throttled_by_provisioned_writes_scale': None,
'increase_throttled_by_consumed_reads_unit': None,
'increase_throttled_by_consumed_reads_scale': None,
'increase_throttled_by_consumed_writes_unit': None,
'increase_throttled_by_consumed_writes_scale': None,
'circuit_breaker_url': None,
'circuit_breaker_timeout': 10000.00
}
}
def get_configuration():
""" Get the configuration from command line and config files """
# This is the dict we will return
configuration = {
'global': {},
'logging': {},
'tables': ordereddict()
}
# Read the command line options
cmd_line_options = command_line_parser.parse()
# If a configuration file is specified, read that as well
conf_file_options = None
if 'config' in cmd_line_options:
conf_file_options = config_file_parser.parse(
cmd_line_options['config'])
# Extract global config
configuration['global'] = __get_global_options(
cmd_line_options,
conf_file_options)
# Extract logging config
configuration['logging'] = __get_logging_options(
cmd_line_options,
conf_file_options)
# Extract table configuration
# If the --table cmd line option is set, it indicates that only table
# options from the command line should be used
if 'table_name' in cmd_line_options:
configuration['tables'] = __get_cmd_table_options(cmd_line_options)
else:
configuration['tables'] = __get_config_table_options(conf_file_options)
# Ensure some basic rules
__check_gsi_rules(configuration)
__check_logging_rules(configuration)
__check_table_rules(configuration)
return configuration
def __get_cmd_table_options(cmd_line_options):
""" Get all table options from the command line
:type cmd_line_options: dict
:param cmd_line_options: Dictionary with all command line options
:returns: dict -- E.g. {'table_name': {}}
"""
table_name = cmd_line_options['table_name']
options = {table_name: {}}
for option in DEFAULT_OPTIONS['table'].keys():
options[table_name][option] = DEFAULT_OPTIONS['table'][option]
if option in cmd_line_options:
options[table_name][option] = cmd_line_options[option]
return options
def __get_config_table_options(conf_file_options):
""" Get all table options from the config file
:type conf_file_options: ordereddict
:param conf_file_options: Dictionary with all config file options
:returns: ordereddict -- E.g. {'table_name': {}}
"""
options = ordereddict()
if not conf_file_options:
return options
for table_name in conf_file_options['tables']:
options[table_name] = {}
# Regular table options
for option in DEFAULT_OPTIONS['table'].keys():
options[table_name][option] = DEFAULT_OPTIONS['table'][option]
if option not in conf_file_options['tables'][table_name]:
continue
if option == 'sns_message_types':
try:
raw_list = conf_file_options['tables'][table_name][option]
options[table_name][option] = \
[i.strip() for i in raw_list.split(',')]
except:
print(
'Error parsing the "sns-message-types" '
'option: {0}'.format(
conf_file_options['tables'][table_name][option]))
else:
options[table_name][option] = \
conf_file_options['tables'][table_name][option]
# GSI specific options
if 'gsis' in conf_file_options['tables'][table_name]:
for gsi_name in conf_file_options['tables'][table_name]['gsis']:
for option in DEFAULT_OPTIONS['gsi'].keys():
opt = DEFAULT_OPTIONS['gsi'][option]
if 'gsis' not in options[table_name]:
options[table_name]['gsis'] = {}
if gsi_name not in options[table_name]['gsis']:
options[table_name]['gsis'][gsi_name] = {}
if (option not in conf_file_options[
'tables'][table_name]['gsis'][gsi_name]):
options[table_name]['gsis'][gsi_name][option] = opt
continue
if option == 'sns_message_types':
try:
raw_list = conf_file_options[
'tables'][table_name]['gsis'][gsi_name][option]
opt = [i.strip() for i in raw_list.split(',')]
except:
print(
'Error parsing the "sns-message-types" '
'option: {0}'.format(
conf_file_options[
'tables'][table_name][
'gsis'][gsi_name][option]))
else:
opt = conf_file_options[
'tables'][table_name]['gsis'][gsi_name][option]
options[table_name]['gsis'][gsi_name][option] = opt
return options
def __get_global_options(cmd_line_options, conf_file_options=None):
""" Get all global options
:type cmd_line_options: dict
:param cmd_line_options: Dictionary with all command line options
:type conf_file_options: dict
:param conf_file_options: Dictionary with all config file options
:returns: dict
"""
options = {}
for option in DEFAULT_OPTIONS['global'].keys():
options[option] = DEFAULT_OPTIONS['global'][option]
if conf_file_options and option in conf_file_options:
options[option] = conf_file_options[option]
if cmd_line_options and option in cmd_line_options:
options[option] = cmd_line_options[option]
return options
def __get_logging_options(cmd_line_options, conf_file_options=None):
""" Get all logging options
:type cmd_line_options: dict
:param cmd_line_options: Dictionary with all command line options
:type conf_file_options: dict
:param conf_file_options: Dictionary with all config file options
:returns: dict
"""
options = {}
for option in DEFAULT_OPTIONS['logging'].keys():
options[option] = DEFAULT_OPTIONS['logging'][option]
if conf_file_options and option in conf_file_options:
options[option] = conf_file_options[option]
if cmd_line_options and option in cmd_line_options:
options[option] = cmd_line_options[option]
return options
def __check_gsi_rules(configuration):
""" Do some basic checks on the configuration """
for table_name in configuration['tables']:
if 'gsis' not in configuration['tables'][table_name]:
continue
for gsi_name in configuration['tables'][table_name]['gsis']:
gsi = configuration['tables'][table_name]['gsis'][gsi_name]
# Check that increase/decrease units is OK
valid_units = ['percent', 'units']
if gsi['increase_reads_unit'] not in valid_units:
print(
'increase-reads-unit must be set to '
'either percent or units')
sys.exit(1)
if gsi['decrease_reads_unit'] not in valid_units:
print(
'decrease-reads-unit must be set to '
'either percent or units')
sys.exit(1)
if gsi['increase_writes_unit'] not in valid_units:
print(
'increase-writes-unit must be set to '
'either percent or units')
sys.exit(1)
if gsi['decrease_writes_unit'] not in valid_units:
print(
'decrease-writes-unit must be set to '
'either percent or units')
sys.exit(1)
if 'increase_consumed_reads_unit' in gsi and gsi['increase_consumed_reads_unit'] and \
gsi['increase_consumed_reads_unit'] not in valid_units:
print(
'increase-consumed-reads-unit must be set to '
'either percent or units, or left unset')
sys.exit(1)
if 'increase_consumed_writes_unit' in gsi and gsi['increase_consumed_writes_unit'] and \
gsi['increase_consumed_writes_unit'] not in valid_units:
print(
'increase-consumed-writes-unit must be set to '
'either percent or units, or left unset')
sys.exit(1)
if ('increase_throttled_by_consumed_reads_unit' in gsi
and gsi['increase_throttled_by_consumed_reads_unit']
and gsi['increase_throttled_by_consumed_reads_unit']
not in valid_units):
print(
'increase-throttled-by-consumed-reads-unit must be set to '
'either percent or units, or left unset')
sys.exit(1)
if ('increase_throttled_by_consumed_writes_unit' in gsi
and gsi['increase_throttled_by_consumed_writes_unit']
and gsi['increase_throttled_by_consumed_writes_unit']
not in valid_units):
print(
'increase-throttled-by-consumed-writes-unit must be set to '
'either percent or units, or left unset')
sys.exit(1)
if ('increase_throttled_by_provisioned_reads_unit' in gsi
and gsi['increase_throttled_by_provisioned_reads_unit']
and gsi['increase_throttled_by_provisioned_reads_unit']
not in valid_units):
print(
'increase-throttled-by-provisioned-reads-unit must be set '
'to either percent or units, or left unset')
sys.exit(1)
if ('increase_throttled_by_provisioned_writes_unit' in gsi
and gsi['increase_throttled_by_provisioned_writes_unit']
and gsi['increase_throttled_by_provisioned_writes_unit']
not in valid_units):
print(
'increase-throttled-by-provisioned-writes-unit must be set '
'to either percent or units, or left unset')
sys.exit(1)
# Check lookback-window start
if gsi['lookback_window_start'] < 1:
print(
'lookback-window-start must be a value higher than 1, '
'as DynamoDB sends CloudWatch data every minute')
sys.exit(1)
# Check sns-message-types
valid_sns_message_types = [
'scale-up',
'scale-down',
'high-throughput-alarm',
'low-throughput-alarm']
if gsi['sns_message_types']:
for sns_type in gsi['sns_message_types']:
if sns_type not in valid_sns_message_types:
print('Warning: Invalid sns-message-type: {0}'.format(
sns_type))
gsi['sns_message_types'].remove(sns_type)
# Ensure values > 1 for some important configuration options
options = [
'reads_lower_threshold',
'reads_upper_threshold',
'increase_reads_with',
'decrease_reads_with',
'writes_lower_threshold',
'writes_upper_threshold',
'increase_writes_with',
'decrease_writes_with',
'min_provisioned_reads',
'max_provisioned_reads',
'min_provisioned_writes',
'max_provisioned_writes',
'increase_consumed_reads_with',
'increase_consumed_writes_with',
'decrease_consumed_reads_with',
'decrease_consumed_writes_with'
]
# Config options without a mandatory default
# should be allowed a None value
non_default = [
'increase_consumed_reads_with',
'increase_consumed_writes_with',
'decrease_consumed_reads_with',
'decrease_consumed_writes_with'
]
for option in options:
if (option in non_default
and option in gsi
and gsi[option]
and gsi[option] < 1):
print('{0} may not be lower than 1 for GSI {1}'.format(
option, gsi_name))
sys.exit(1)
if (option in gsi
and option not in non_default
and gsi[option] < 1):
print('{0} may not be lower than 1 for GSI {1}'.format(
option, gsi_name))
sys.exit(1)
if (int(gsi['min_provisioned_reads']) >
int(gsi['max_provisioned_reads'])):
print(
'min-provisioned-reads ({0}) may not be higher than '
'max-provisioned-reads ({1}) for GSI {2}'.format(
gsi['min_provisioned_reads'],
gsi['max_provisioned_reads'],
gsi_name))
sys.exit(1)
elif (int(gsi['min_provisioned_writes']) >
int(gsi['max_provisioned_writes'])):
print(
'min-provisioned-writes ({0}) may not be higher than '
'max-provisioned-writes ({1}) for GSI {2}'.format(
gsi['min_provisioned_writes'],
gsi['max_provisioned_writes'],
gsi_name))
sys.exit(1)
def __check_logging_rules(configuration):
""" Check that the logging values are proper """
valid_log_levels = [
'debug',
'info',
'warning',
'error'
]
if configuration['logging']['log_level'].lower() not in valid_log_levels:
print('Log level must be one of {0}'.format(
', '.join(valid_log_levels)))
sys.exit(1)
def __check_table_rules(configuration):
""" Do some basic checks on the configuration """
for table_name in configuration['tables']:
table = configuration['tables'][table_name]
# Check that increase/decrease units is OK
valid_units = ['percent', 'units']
if table['increase_reads_unit'] not in valid_units:
print('increase-reads-unit must be set to either percent or units')
sys.exit(1)
if table['decrease_reads_unit'] not in valid_units:
print('decrease-reads-unit must be set to either percent or units')
sys.exit(1)
if table['increase_writes_unit'] not in valid_units:
print(
'increase-writes-unit must be set to either percent or units')
sys.exit(1)
if table['decrease_writes_unit'] not in valid_units:
print(
'decrease-writes-unit must be set to either percent or units')
sys.exit(1)
if ('increase_consumed_reads_unit' in table
and table['increase_consumed_reads_unit']
and table['increase_consumed_reads_unit'] not in valid_units):
print(
'increase-consumed-reads-unit must be set to '
'either percent or units, or left unset')
sys.exit(1)
if ('increase_consumed_writes_unit' in table
and table['increase_consumed_writes_unit']
and table['increase_consumed_writes_unit'] not in valid_units):
print(
'increase-consumed-writes-unit must be set to '
'either percent or units, or left unset')
sys.exit(1)
if ('increase_throttled_by_consumed_reads_unit' in table
and table['increase_throttled_by_consumed_reads_unit']
and table['increase_throttled_by_consumed_reads_unit']
not in valid_units):
print(
'increase-throttled-by-consumed-reads-unit must be set to '
'either percent or units, or left unset')
sys.exit(1)
if ('increase_throttled_by_consumed_writes_unit' in table
and table['increase_throttled_by_consumed_writes_unit']
and table['increase_throttled_by_consumed_writes_unit']
not in valid_units):
print(
'increase-throttled-by-consumed-writes-unit must be set to '
'either percent or units, or left unset')
sys.exit(1)
if ('increase_throttled_by_provisioned_reads_unit' in table
and table['increase_throttled_by_provisioned_reads_unit']
and table['increase_throttled_by_provisioned_reads_unit']
not in valid_units):
print(
'increase-throttled-by-provisioned-reads-unit must be set to '
'either percent or units, or left unset')
sys.exit(1)
if ('increase_throttled_by_provisioned_writes_unit' in table
and table['increase_throttled_by_provisioned_writes_unit']
and table['increase_throttled_by_provisioned_writes_unit']
not in valid_units):
print(
'increase-throttled-by-provisioned-writes-unit must be set to '
'either percent or units, or left unset')
sys.exit(1)
# Check lookback-window start
if table['lookback_window_start'] < 1:
print(
'lookback-window-start must be a value higher than 1, '
'as DynamoDB sends CloudWatch data every minute')
sys.exit(1)
# Check sns-message-types
valid_sns_message_types = [
'scale-up',
'scale-down',
'high-throughput-alarm',
'low-throughput-alarm']
if table['sns_message_types']:
for sns_type in table['sns_message_types']:
if sns_type not in valid_sns_message_types:
print('Warning: Invalid sns-message-type: {0}'.format(
sns_type))
table['sns_message_types'].remove(sns_type)
# Ensure values > 0 for some important configuration options
options = [
'reads_lower_threshold',
'reads_upper_threshold',
'increase_reads_with',
'decrease_reads_with',
'writes_lower_threshold',
'writes_upper_threshold',
'increase_writes_with',
'decrease_writes_with',
'min_provisioned_reads',
'max_provisioned_reads',
'min_provisioned_writes',
'max_provisioned_writes',
'num_read_checks_before_scale_down',
'num_write_checks_before_scale_down',
'increase_consumed_reads_with',
'increase_consumed_writes_with'
]
# Config options without a mandatory default
# should be allowed a None value
non_default = [
'increase_consumed_reads_with',
'increase_consumed_writes_with'
]
for option in options:
if (option in non_default
and option in table
and table[option] and table[option] < 1):
print('{0} may not be lower than 1 for table {1}'.format(
option, table_name))
sys.exit(1)
if (option in table
and option not in non_default
and table[option] < 1):
print('{0} may not be lower than 1 for table {1}'.format(
option, table_name))
sys.exit(1)
if (int(table['min_provisioned_reads']) >
int(table['max_provisioned_reads'])):
print(
'min_provisioned_reads ({0}) may not be higher than '
'max_provisioned_reads ({1}) for table {2}'.format(
table['min_provisioned_reads'],
table['max_provisioned_reads'],
table_name))
sys.exit(1)
elif (int(table['min_provisioned_writes']) >
int(table['max_provisioned_writes'])):
print(
'min_provisioned_writes ({0}) may not be higher than '
'max_provisioned_writes ({1}) for table {2}'.format(
table['min_provisioned_writes'],
table['max_provisioned_writes'],
table_name))
sys.exit(1)
| |
"""
Created by Anne Pajon on 11 Apr 2013
Copyright (c) 2012 Cancer Research UK - Cambridge Institute.
This source file is licensed under The MIT License (MIT).
And many ideas have been taken from:
- https://github.com/chapmanb/bcbb/blob/master/galaxy/galaxy_fabfile.py
- https://github.com/chapmanb/cloudbiolinux
It is a fabric deployment file to set up the chipseq pipeline developed by
Thomas Carroll. Fabric (http://docs.fabfile.org) is used to manage the automation
of the installation.
Usage:
fab -f scripts/chipseq_installer.py local deploy > chipseq_installer.out
"""
import os
from contextlib import contextmanager
from fabric.api import *
from fabric.contrib.files import *
from fabric.operations import local as lrun
import yaml
# -- Common setup
env.hosts = ['localhost']
env.project_dir = os.getenv('PWD')
env.tmp_dir = os.path.join(env.project_dir, 'tmp')
env.bin_dir = os.path.join(env.project_dir, 'bin')
env.lib_dir = os.path.join(env.project_dir, 'lib')
env.annotation_dir = os.path.join(env.project_dir, 'annotation')
env.grch37_dir = os.path.join(env.annotation_dir, "grch37_ensembl")
env.mm9_dir = os.path.join(env.annotation_dir, "mm9_Ensembl")
env.test_dir = os.path.join(env.project_dir, "chipseq-test")
env.testfq_dir = os.path.join(env.test_dir, "fqdirectory")
env.r_lib_dir = os.path.join(env.project_dir, 'lib/R/library')
env.perl_dir = os.path.join(env.bin_dir, 'perl')
env.meme_dir = os.path.join(env.bin_dir, 'meme')
env.sicer_dir = os.path.join(env.bin_dir, 'sicer')
env.java_dir = os.path.join(env.lib_dir, 'jdk1.7.0_51')
env.chipseq_installer = os.path.join(env.project_dir, 'chipseq-installer-master')
env.chipseq_pipeline = os.path.join(env.project_dir, 'chipseq-pipeline-master')
env.chipseq_path = os.path.join(env.chipseq_pipeline, 'Process10')
env.chipseq_config_path = os.path.join(env.chipseq_path, 'Config')
env.use_sudo = False
# ================================================================================
# == Host specific setup
def local():
"""Setup environment for local installation in bash shell for running chipseq jobs on the cluster.
"""
env.r_dir = env.project_dir
env.shell = "/bin/bash"
env.env_setup = ('env.sh')
env.activate = 'activate'
def local_csh():
"""Setup environment for local installation in csh shell for running chipseq jobs on the cluster.
"""
env.r_dir = env.project_dir
env.shell = "/bin/csh"
env.env_setup = ('env_csh.sh')
env.activate = 'activate.csh'
# ================================================================================
# == Fabric instructions
def deploy():
"""Setup environment, install dependencies and tools
and deploy chipseq pipeline
"""
setup_environment()
install_dependencies()
install_tools()
install_data()
install_chipseq()
install_test()
def deploy_withextras():
"""Setup environment, install dependencies and tools
and deploy chipseq pipeline with extras such as atlas and openssl
"""
setup_environment()
install_atlas() # needed for installing SciPy library
install_openssl() # needed for ucsc tools and perl
install_dependencies()
install_tools()
install_data()
install_chipseq()
install_test()
# ================================================================================
# == Decorators and build utilities
def setup_environment():
"""Copy adhoc environment variables, set CHIPSEQ_ROOT path and create tmp directory
"""
sed_chipseq_root = env.project_dir.replace('/', '\/')
setup_ori = os.path.join(env.chipseq_installer, env.env_setup)
setup_dest = os.path.join(env.project_dir, env.env_setup)
lrun("sed 's/\/Path\/To\/Edit\//%s/' %s > %s" % (sed_chipseq_root, setup_ori, setup_dest))
_make_dir(env.tmp_dir)
def lexists(path):
return os.path.exists(path)
def vlrun(command):
"""Run a command in a virtual environment. This prefixes the run command with the source command.
Usage:
vlrun('pip install tables')
"""
source = 'source %(project_dir)s/bin/%(activate)s && source %(project_dir)s/%(env_setup)s && ' % env
return lrun(source + command, shell='%s' % env.shell)
def _if_not_python_lib(library):
"""Decorator that checks if a python library is installed.
"""
def argcatcher(func):
def decorator(*args, **kwargs):
with settings(warn_only=True):
result = vlrun("python -c 'import %s'" % library)
if result.failed:
return func(*args, **kwargs)
return decorator
return argcatcher
def _make_dir(path):
with settings(warn_only=True):
if lrun("test -d %s" % path).failed:
lrun("mkdir -p %s" % path)
def _get_expected_file(path, url):
tar_file = os.path.split(url)[-1]
safe_tar = "--pax-option='delete=SCHILY.*,delete=LIBARCHIVE.*'"
exts = {(".tar.gz", ".tgz") : "tar %s -xzpf" % safe_tar,
(".tar.xz",) : "tar %s -xJpf" % safe_tar,
(".tar.bz2",): "tar %s -xjpf" % safe_tar,
(".zip",) : "unzip -o"}
for ext_choices, tar_cmd in exts.iteritems():
for ext in ext_choices:
if tar_file.endswith(ext):
return tar_file, tar_file[:-len(ext)], tar_cmd
raise ValueError("Did not find extract command for %s" % url)
def _safe_dir_name(path, dir_name, need_dir=True):
replace_try = ["", "-src", "_core"]
for replace in replace_try:
check = dir_name.replace(replace, "")
if lexists(os.path.join(path, check)):
return check
# still couldn't find it, it's a nasty one
for check_part in (dir_name.split("-")[0].split("_")[0],
dir_name.split("-")[-1].split("_")[-1],
dir_name.split(".")[0]):
with settings(hide('warnings', 'running', 'stdout', 'stderr'),
warn_only=True):
dirs = lrun("ls -d1 %s/*%s*/" % (path, check_part)).split("\n")
dirs = [x for x in dirs if "cannot access" not in x and "No such" not in x]
if len(dirs) == 1:
return dirs[0]
if need_dir:
raise ValueError("Could not find directory %s" % dir_name)
def _fetch_and_unpack(path, url, need_dir=True, wget_options=''):
tar_file, dir_name, tar_cmd = _get_expected_file(path, url)
if lexists(os.path.join(path, tar_file)):
lrun("rm -rf %s" % os.path.join(path, tar_file))
lrun("wget --no-check-certificate %s %s" % (wget_options, url))
vlrun("%s %s" % (tar_cmd, tar_file))
return _safe_dir_name(path, dir_name, need_dir)
def _fetch(path, url):
tar_file = os.path.split(url)[-1]
if not lexists(os.path.join(path, tar_file)):
lrun("wget -r %s -O %s" % (url, os.path.join(path, tar_file)))
def _fetch_and_unpack_genome(path, url):
tar_file = os.path.split(url)[-1]
if not lexists(os.path.join(path, tar_file)):
lrun("wget -r %s -O %s" % (url, tar_file))
lrun("gzip -d -r %s" % tar_file)
def _configure_make(env, options=''):
vlrun("./configure --disable-error --prefix=%s %s" % (env.project_dir, options))
vlrun("make")
vlrun("make install")
def _get_install(url, env, make_command, make_options=''):
"""Retrieve source from a URL and install in our system directory.
"""
with lcd(env.tmp_dir):
dir_name = _fetch_and_unpack(env.tmp_dir, url)
with lcd(dir_name):
make_command(env, make_options)
# ================================================================================
# == Required dependencies to install chipseq pipeline
def install_dependencies():
"""Install chipseq dependencies:
- tar
- Perl & libraries
- Cairo
- R & libraries
- Python libraries
- rsync
- git
- Java
- Richard Bowers' workflow
"""
install_tar()
install_perl()
install_perl_libraries()
install_cairo()
install_r()
install_r_libraries()
install_python_libraries()
install_rsync()
install_git()
install_java()
install_workflow()
def install_tar():
"""Install tar 1.27 with xz 5.0.5
to uncompress xz archive
"""
xz_url = "http://tukaani.org/xz/xz-5.0.5.tar.gz"
url = "http://ftp.gnu.org/gnu/tar/tar-1.27.tar.gz"
_get_install(xz_url, env, _configure_make)
_get_install(url, env, _configure_make)
def install_atlas():
"""Install atlas 3.10.1
Atlas may need to be installed to have numpy anc scipy installed
"""
lapack_url = "http://www.netlib.org/lapack/lapack-3.4.1.tgz"
lapack_tar = os.path.join(env.tmp_dir, 'lapack-3.4.1.tgz')
atlas_url = "http://sourceforge.net/projects/math-atlas/files/Stable/3.10.1/atlas3.10.1.tar.bz2"
atlas_dir = "ATLAS3.10.1"
atlas_lib = os.path.join(env.lib_dir, 'atlas')
_make_dir(atlas_lib)
with lcd(env.tmp_dir):
lrun("wget %s" % lapack_url)
dir_name = _fetch_and_unpack(env.tmp_dir, atlas_url)
lrun("mv ATLAS %s" % atlas_dir)
with lcd(atlas_dir):
_make_dir("linux_install")
with lcd("linux_install"):
lrun("../configure -b 64 -D c -DPentiumCPS=2400 --shared --prefix=%s --with-netlib-lapack-tarfile=%s" % (atlas_lib, lapack_tar))
lrun("make build")
lrun("make check")
lrun("make ptcheck")
lrun("make install")
with lcd(env.lib_dir):
# all shared lib needs to be moved from lib/atlas/lib to lib/atlas to be picked up by scipy installer
lrun("mv atlas/lib/* atlas/.")
def install_cairo():
"""Install cairo 1.12.16
Needed when no X11 support available
"""
pixman_url = "http://www.cairographics.org/releases/pixman-0.30.2.tar.gz"
cairo_url = "http://www.cairographics.org/releases/cairo-1.12.16.tar.xz"
_get_install(pixman_url, env, _configure_make)
_get_install(cairo_url, env, _configure_make, "--disable-static --disable-gobject")
def install_python_libraries():
"""Install Python libraries
"""
vlrun("pip install fluent-logger==0.3.3")
vlrun("pip install nose==1.3.0")
vlrun("pip install numpy==1.7.1")
vlrun("pip install cython==0.19.2")
vlrun("pip install numexpr==2.2.2")
vlrun("pip install pyyaml==3.10")
vlrun("pip install rpy2==2.3.8")
vlrun("pip install pysam==0.7.4")
vlrun("pip install scipy==0.12.1")
vlrun("pip install bx-python==0.7.1")
vlrun("pip install configparser")
vlrun("pip install biopython==1.62")
_install_rpy_lib()
@_if_not_python_lib("rpy")
def _install_rpy_lib():
"""Install RPy 1.0.3
"""
url = "http://sourceforge.net/projects/rpy/files/rpy/1.0.3/rpy-1.0.3.tar.gz"
with lcd(env.tmp_dir):
dir_name = _fetch_and_unpack(env.tmp_dir, url)
with lcd(dir_name):
lrun("sed -i 's/\[0\-9\]/\[0\-9\]\+/g' rpy_tools.py")
lrun("sed -i 's/Rdevices.h/Rembedded.h/g' src/RPy.h")
vlrun("python setup.py install")
def install_r():
"""Install R 2.15.0
"""
_make_dir(env.r_lib_dir)
url = "http://cran.r-project.org/src/base/R-2/R-2.15.0.tar.gz"
option = "--enable-R-shlib"
if not lexists(os.path.join(env.r_dir, "bin/R")):
_get_install(url, env, _configure_make, option)
# create symlinks in bin for installation on mac only
if not lexists(os.path.join(env.bin_dir, "R")):
lrun('ln -fs %(r_dir)s/bin/R %(bin_dir)s/R' % env)
lrun('ln -fs %(r_dir)s/bin/Rscript %(bin_dir)s/Rscript' % env)
def install_r_libraries():
"""Install R libraries listed in r-libraries.yaml needed to run chipseq pipeline
"""
# Load list of R libraries to install
config_file = open(os.path.join(env.chipseq_installer, "scripts/r-libraries.yaml"), 'r')
config = yaml.load(config_file)
# Create an Rscript file with install details.
out_file = "install_packages.R"
if lexists(out_file):
lrun("rm -f %s" % out_file)
lrun("touch %s" % out_file)
repo_info = """
cran.repos <- getOption(\"repos\")
cran.repos[\"CRAN\" ] <- \"%s\"
options(repos=cran.repos)
source(\"%s\")
""" % (config["cranrepo"], config["biocrepo"])
lrun("echo '%s' >> %s" % (repo_info, out_file))
bioc_install = """
bioc.pkgs <- c(%s)
""" % (", ".join('"%s"' % p for p in config['bioc']))
bioc_install2 = """
biocLite(lib=\"%(r_lib_dir)s\",lib.loc=\"%(r_lib_dir)s\",ask=F)
biocLite(bioc.pkgs,lib=\"%(r_lib_dir)s\",lib.loc=\"%(r_lib_dir)s\",ask=F)
""" % env
lrun("echo '%s' >> %s" % (bioc_install, out_file))
lrun("echo '%s' >> %s" % (bioc_install2, out_file))
std_install = """
std.pkgs <- c(%s)
""" % (", ".join('"%s"' % p for p in config['cran']))
lrun("echo '%s' >> %s" % (std_install, out_file))
std_install2 = """
install.packages(std.pkgs,lib=\"%(r_lib_dir)s\")
""" % env
lrun("echo '%s' >> %s" % (std_install2, out_file))
gplots_install = """
download.file(\"http://cran.r-project.org/src/contrib/Archive/gplots/gplots_2.10.1.tar.gz\",\"%(tmp_dir)s/gplots_2.10.1.tar.gz\")
""" % env
lrun("echo '%s' >> %s" % (gplots_install, out_file))
gplots_install2 = """
install.packages(\"%(tmp_dir)s/gplots_2.10.1.tar.gz\",lib=\"%(r_lib_dir)s\")
""" % env
lrun("echo '%s' >> %s" % (gplots_install2, out_file))
spp_install = """
download.file(\"http://compbio.med.harvard.edu/Supplements/ChIP-seq/spp_1.11.tar.gz\",\"%(tmp_dir)s/spp_1.11.tar.gz\")
""" % env
lrun("echo '%s' >> %s" % (spp_install, out_file))
spp_install2 = """
install.packages(\"%(tmp_dir)s/spp_1.11.tar.gz\",lib=\"%(r_lib_dir)s\")
""" % env
lrun("echo '%s' >> %s" % (spp_install2, out_file))
GMC_install = """
download.file(\"http://genometricorr.sourceforge.net/R/src/contrib/GenometriCorr_1.1.9.tar.gz\",\"%(tmp_dir)s/GenometriCorr_1.1.9.tar.gz\")
""" % env
lrun("echo '%s' >> %s" % (GMC_install, out_file))
GMC_install2 = """
install.packages(\"%(tmp_dir)s/GenometriCorr_1.1.9.tar.gz\",lib=\"%(r_lib_dir)s\")
""" % env
lrun("echo '%s' >> %s" % (GMC_install2, out_file))
hmisc_install = """
download.file(\"http://cran.r-project.org/src/contrib/Archive/Hmisc/Hmisc_3.10-1.1.tar.gz\",\"%(tmp_dir)s/Hmisc_3.10-1.1.tar.gz\")
""" % env
lrun("echo '%s' >> %s" % (hmisc_install, out_file))
hmisc_install2 = """
install.packages(\"%(tmp_dir)s/Hmisc_3.10-1.1.tar.gz\",lib=\"%(r_lib_dir)s\")
""" % env
lrun("echo '%s' >> %s" % (hmisc_install2, out_file))
gdd_install = """
download.file(\"http://www.rforge.net/src/contrib/GDD_0.1-13.tar.gz\",\"%(tmp_dir)s/GDD_0.1-13.tar.gz\")
""" % env
lrun("echo '%s' >> %s" % (gdd_install, out_file))
gdd_install2 = """
install.packages(\"%(tmp_dir)s/GDD_0.1-13.tar.gz\",lib=\"%(r_lib_dir)s\")
""" % env
lrun("echo '%s' >> %s" % (gdd_install2, out_file))
gridsvg_install = """
download.file(\"http://cran.r-project.org/src/contrib/Archive/gridSVG/gridSVG_0.9-1.tar.gz\",\"%(tmp_dir)s/gridSVG_0.9-1.tar.gz\")
""" % env
lrun("echo '%s' >> %s" % (gridsvg_install, out_file))
gridsvg_install2 = """
install.packages(\"%(tmp_dir)s/gridSVG_0.9-1.tar.gz\",lib=\"%(r_lib_dir)s\")
""" % env
lrun("echo '%s' >> %s" % (gridsvg_install2, out_file))
# Run the script and then get rid of it
vlrun("%s %s" % (os.path.join(env.bin_dir, "Rscript"), out_file))
lrun("rm -f %s" % out_file)
def install_perl():
"""Install perl 5.18.0
"""
url = "http://www.cpan.org/src/5.0/perl-5.18.0.tar.gz"
with lcd(env.tmp_dir):
dir_name = _fetch_and_unpack(env.tmp_dir, url)
if not lexists(env.perl_dir):
_make_dir(env.perl_dir)
with lcd(dir_name):
lrun("sh Configure -de -Dprefix='%s'" % (env.perl_dir))
lrun("make")
lrun("make install")
def install_perl_libraries():
"""Install perl libraries
"""
lrun("%s/bin/cpan App::cpanminus < /dev/null" % (env.perl_dir))
lrun("%s/bin/cpanm --skip-installed --notest HTML::PullParser < /dev/null" % (env.perl_dir))
lrun("%s/bin/cpanm --skip-installed --notest HTML::Template < /dev/null" % (env.perl_dir))
lrun("%s/bin/cpanm --skip-installed --notest LWP < /dev/null" % (env.perl_dir))
lrun("%s/bin/cpanm --skip-installed --notest SOAP::Lite < /dev/null" % (env.perl_dir))
lrun("%s/bin/cpanm --skip-installed --notest XML::Simple < /dev/null" % (env.perl_dir))
def install_rsync():
"""Install rsync 3.1.0
"""
url = "http://rsync.samba.org/ftp/rsync/src/rsync-3.1.0.tar.gz"
_get_install(url, env, _configure_make)
def install_git():
"""Install git 1.8.4.2
"""
url = "http://git-core.googlecode.com/files/git-1.8.4.2.tar.gz"
with lcd(env.tmp_dir):
dir_name = _fetch_and_unpack(env.tmp_dir, url)
with lcd(dir_name):
lrun("make prefix=%s all" % env.project_dir)
lrun("make prefix=%s install" % env.project_dir)
def install_java():
"""Install Java 7
http://download.oracle.com/otn-pub/java/jdk/7u51-b13/jdk-7u51-linux-x64.tar.gz
"""
tar_file = "jdk-7u51-linux-x64.tar.gz"
with lcd(env.tmp_dir):
lrun('wget --no-check-certificate --no-cookies --header "Cookie: oraclelicense=accept-securebackup-cookie" http://download.oracle.com/otn-pub/java/jdk/7u51-b13/%s -O %s' % (tar_file, tar_file))
lrun ("tar zxvf %s -C %s" % (tar_file, env.lib_dir))
def install_workflow():
"""Install Richard Bower CRUK-CI workflow manager
Checkout the workflow manager from repository.
"""
with lcd(env.lib_dir):
workflow_path = os.path.join(env.chipseq_installer, "workflow-manager")
lrun('cp -r %s .' % workflow_path)
# ================================================================================
# == Required specific tools to install chipseq pipeline
def install_tools():
"""Install chipseq specific tools:
- UCSC tools: liftOver, TwoBitToFa, FaToTwoBit, BedToBigBed, WigToBigWig, BedGraphToBigWig
- samtools
- BEDTools
- picard
- bwa
- macs
- meme
- sicer
"""
install_ucsc_tools()
install_samtools()
install_bedtools()
install_picard()
install_bwa()
install_macs()
install_meme()
install_sicer()
install_gtf2bed()
def install_gtf2bed():
"""Install gtf2bed from trunk
"""
url = "https://ea-utils.googlecode.com/svn/trunk/clipper/gtf2bed"
with lcd(env.bin_dir):
lrun("wget %s -O gtf2bed.pl" % (url))
def install_openssl():
"""Install openssl 1.0.1e
For UCSC tools that gives libssl.so.10 error while loading shared libraries
"""
url = "http://www.openssl.org/source/openssl-1.0.1e.tar.gz"
with lcd(env.tmp_dir):
dir_name = _fetch_and_unpack(env.tmp_dir, url)
with lcd(dir_name):
lrun("./config --prefix=%s --shared" % env.project_dir)
lrun("make")
lrun("make install")
with lcd(env.lib_dir):
lrun("ln -s ../lib64/libssl.so.1.0.0 libssl.so.10")
lrun("ln -s ../lib64/libcrypto.so.1.0.0 libcrypto.so.10")
def install_ucsc_tools():
"""Install useful executables from UCSC.
see https://github.com/chapmanb/cloudbiolinux/blob/master/cloudbio/custom/bio_nextgen.py
for an up-to-date version
"""
tools = ["liftOver", "faToTwoBit", "twoBitToFa", "bedToBigBed", "wigToBigWig", "bedGraphToBigWig"]
url = "http://hgdownload.cse.ucsc.edu/admin/exe/linux.x86_64/"
for tool in tools:
with lcd(env.bin_dir):
if not lexists(os.path.join(env.bin_dir, tool)):
lrun("wget %s%s" % (url, tool))
lrun("chmod a+rwx %s" % tool)
def install_samtools():
"""Install samtools 0.1.18
"""
url = "http://sourceforge.net/projects/samtools/files/samtools/0.1.18/samtools-0.1.18.tar.bz2"
with lcd(env.tmp_dir):
dir_name = _fetch_and_unpack(env.tmp_dir, url)
with lcd(dir_name):
lrun("make")
# copy executables to bin
lrun("find . -perm /u=x -type f -exec cp {} %(bin_dir)s \;" % env)
def install_bedtools():
"""Install BEDTools 2.17.0
"""
url = "http://bedtools.googlecode.com/files/BEDTools.v2.17.0.tar.gz"
with lcd(env.tmp_dir):
# cannot _fetch_and_unpack return because package name does not match unpacked dir
_fetch_and_unpack(env.tmp_dir, url, False)
with lcd("bedtools-2.17.0"):
lrun("make clean")
lrun("make all")
lrun("find bin/. -perm /u=x -type f -exec cp {} %(bin_dir)s \;" % env)
def install_picard():
"""Install Picard 1.96
"""
version = "1.96"
url = 'http://downloads.sourceforge.net/project/picard/picard-tools/%s/picard-tools-%s.zip' % (version, version)
picard_dir = os.path.join(env.bin_dir, "picard")
_make_dir(picard_dir)
with lcd(env.tmp_dir):
dir_name = _fetch_and_unpack(env.tmp_dir, url)
with lcd(dir_name):
lrun("mv *.jar %s" % picard_dir)
def install_bwa():
"""Install BWA 0.5.9
Aligns short nucleotide sequences against a long reference sequence.
http://bio-bwa.sourceforge.net/
"""
version = "0.5.9"
url = "http://downloads.sourceforge.net/project/bio-bwa/bwa-%s.tar.bz2" % (version)
with lcd(env.tmp_dir):
dir_name = _fetch_and_unpack(env.tmp_dir, url)
with lcd(dir_name):
arch = lrun("uname -m")
# if not 64bit, remove the appropriate flag
if arch.find("x86_64") == -1:
lrun("sed -i.bak -r -e 's/-O2 -m64/-O2/g' Makefile")
lrun("make")
# copy executables to bin
lrun("find . -perm /u=x -type f -exec cp {} %(bin_dir)s \;" % env)
def install_macs():
"""Install MACS 1.4.2
Model-based Analysis for ChIP-Seq.
http://liulab.dfci.harvard.edu/MACS/
"""
version = "1.4.2"
url = "https://github.com/downloads/taoliu/MACS/MACS-%s.tar.gz" % version
with lcd(env.tmp_dir):
dir_name = _fetch_and_unpack(env.tmp_dir, url)
with lcd(dir_name):
vlrun("python setup.py install")
lrun("chmod a+rwx bin/*")
lrun("find bin/. -perm /u=x -type f -exec cp {} %(bin_dir)s \;" % env)
def install_meme():
"""Install meme 4.9.1
"""
url = "http://ebi.edu.au/ftp/software/MEME/4.9.1/meme_4.9.1.tar.gz"
with lcd(env.tmp_dir):
dir_name = _fetch_and_unpack(env.tmp_dir, url)
with lcd(dir_name):
lrun("./configure --prefix=%(meme_dir)s --with-url='http://meme.nbcr.net/meme' --with-perl=%(bin_dir)s/perl/bin/perl --with-python=%(bin_dir)s/python2.7" % env)
lrun("make")
lrun("make install")
def install_sicer():
"""Install SICER 1.1
"""
url = "http://home.gwu.edu/~wpeng/SICER_V1.1.tgz"
with lcd(env.tmp_dir):
dir_name = _fetch_and_unpack(env.tmp_dir, url)
with lcd(dir_name):
lrun("mv SICER %(sicer_dir)s" % env)
# ================================================================================
# == Install chipseq pipeline and update config file
def install_chipseq():
install_chipseq_pipeline()
update_config()
def install_chipseq_pipeline():
"""Get the latest chipseq code from github.
"""
with lcd(env.project_dir):
lrun("wget --no-check-certificate -r https://github.com/crukci-bioinformatics/chipseq-pipeline/archive/master.zip -O master-pipeline.zip")
lrun("unzip master-pipeline.zip")
with lcd(env.chipseq_path):
lrun("( ( echo '#!/usr/bin/env Rscript' ; echo 'RLIBSVar = \"%s\"' ; sed '1,2d' RScripts/Kick.r ) > RScripts/ChipSeq.r )" % env.r_lib_dir)
lrun("chmod a+x RScripts/ChipSeq.r")
def update_config():
import ConfigParser
config = ConfigParser.SafeConfigParser()
config_file = os.path.join(env.chipseq_config_path, "config.ini")
if os.path.exists(config_file):
config.read(config_file)
inifile = open(config_file, 'w')
config.set("Executables", "meme", os.path.join(env.bin_dir, "meme/bin/meme-chip"))
config.set("Executables", "python", os.path.join(env.bin_dir, "python"))
config.set("Executables", "perl", os.path.join(env.bin_dir, "perl/bin/perl"))
config.set("Executables", "bwa", os.path.join(env.bin_dir, "bwa"))
config.set("Executables", "samtools", os.path.join(env.bin_dir, "samtools"))
config.set("Executables", "picard", os.path.join(env.bin_dir, "picard"))
config.set("Executables", "rsync", os.path.join(env.bin_dir, "rsync"))
config.set("Executables", "bedtools", env.bin_dir)
config.set("Executables", "java", os.path.join(env.lib_dir, "%s/bin/java" % env.java_dir))
config.set("Executables", "rexec", os.path.join(env.bin_dir, "Rscript"))
config.set("Executables", "bigwig", os.path.join(env.bin_dir, "bedGraphToBigWig"))
config.set("Executables", "gtftobed", os.path.join(env.bin_dir, "gtf2bed.pl"))
config.set("Executables", "macs", os.path.join(env.bin_dir, "macs14"))
config.set("Executables", "ame", os.path.join(env.bin_dir, "ame"))
config.set("Executables", "sicer", os.path.join(env.bin_dir, "sicer"))
config.set("Executables", "tpics", os.path.join(env.chipseq_pipeline, "CRI_TPICS/tpic.r"))
config.set("Executables", "tpicszeta", os.path.join(env.chipseq_pipeline, "CRI_TPICS/zeta.pl"))
config.set("Executables", "tpicscreatecoverage", os.path.join(env.chipseq_pipeline, "CRI_TPICS/create_coverate.pl"))
config.set("Workflow", "executable", os.path.join(env.lib_dir, "workflow-manager/workflow-all-1.4-SNAPSHOT.jar"))
config.set("Workflow", "taskdirectories", os.path.join(env.chipseq_path, "src/main/tasks"))
config.set("Workflow", "summaryfile", os.path.join(env.test_dir, "tmp"))
config.set("Workflow", "lsfoutputdirectory", os.path.join(env.test_dir, "tmp/joboutputs"))
config.set("Libraries", "rlibs", env.r_lib_dir)
config.set("Libraries", "pythonlibs", os.path.join(env.lib_dir, "python2.7/site-packages/"))
config.set("Libraries", "perllibs", os.path.join(env.bin_dir, "perl/lib/site_perl/5.18.0/"))
config.set("Libraries", "javalibs", "")
config.set("meme parameters", "tfdb", os.path.join(env.annotation_dir, "jaspar_CORE/Jaspar_NonRedunadant.meme"))
config.set("Genomes", "grch37", os.path.join(env.grch37_dir, "Homo_sapiens.GRCh37.67.dna.toplevel.fa"))
config.set("Genomes", "hg18", "")
config.set("Genomes", "mm9", os.path.join(env.mm9_dir, "Mus_musculus.NCBIM37.67.dna.toplevel.fa"))
config.set("Gene Positions", "grch37", ":".join([os.path.join(env.mm9_dir, "Homo_sapiens.GRCh37.67.gtf"), os.path.join(env.mm9_dir, "hsapiens_gene_ensembl__transcript__main.txt")]))
config.set("Gene Positions", "hg18", "")
config.set("Gene Positions", "mm9", ":".join([os.path.join(env.mm9_dir, "Mus_musculus.NCBIM37.67.gtf"), os.path.join(env.mm9_dir, "mmusculus_gene_ensembl__transcript__main.txt")]))
config.set("GeneSets", "mm9", "")
config.set("Excluded Regions", "grch37", "No_Excluded")
config.set("Excluded Regions", "hg18", "No_Excluded")
config.set("Excluded Regions", "mm9", "No_Excluded")
config.set("ExcludedRegions", "grch37", "No_Excluded")
config.set("ExcludedRegions", "hg18", "No_Excluded")
config.set("ExcludedRegions", "mm9", "No_Excluded")
config.set("Chromosome Lengths", "grch37", "")
config.set("Chromosome Lengths", "hg18", "")
config.set("Chromosome Lengths", "mm9", "")
config.set("Sequence Dictionary", "grch37", "")
config.set("Sequence Dictionary", "hg18", "")
config.set("Sequence Dictionary", "mm9", "")
config.write(inifile)
inifile.close()
# ================================================================================
# == Install hg19 and mm9 genomes
def install_data():
install_genomes()
configure_meme()
def install_genomes():
_make_dir(env.grch37_dir)
grch37_urls = ["ftp://ftp.ensembl.org/pub/release-67/fasta/homo_sapiens/dna/Homo_sapiens.GRCh37.67.dna.toplevel.fa.gz",
"ftp://ftp.ensembl.org/pub/release-67/gtf/homo_sapiens/Homo_sapiens.GRCh37.67.gtf.gz",
"ftp://ftp.ensembl.org/pub/release-67/mysql/ensembl_mart_67/hsapiens_gene_ensembl__exon_transcript__dm.txt.gz",
"ftp://ftp.ensembl.org/pub/release-67/mysql/ensembl_mart_67/hsapiens_gene_ensembl__transcript__main.txt.gz"]
with lcd(env.grch37_dir):
for url in grch37_urls:
_fetch_and_unpack_genome(env.grch37_dir, url)
_make_dir(env.mm9_dir)
mm9_urls = ["ftp://ftp.ensembl.org/pub/release-67/fasta/mus_musculus/dna/Mus_musculus.NCBIM37.67.dna.toplevel.fa.gz",
"ftp://ftp.ensembl.org/pub/release-67/gtf/mus_musculus/Mus_musculus.NCBIM37.67.gtf.gz",
"ftp://ftp.ensembl.org/pub/release-67/mysql/ensembl_mart_67/mmusculus_gene_ensembl__exon_transcript__dm.txt.gz",
"ftp://ftp.ensembl.org/pub/release-67/mysql/ensembl_mart_67/mmusculus_gene_ensembl__transcript__main.txt.gz"]
with lcd(env.mm9_dir):
for url in mm9_urls:
_fetch_and_unpack_genome(env.mm9_dir, url)
def configure_meme():
with lcd(env.annotation_dir):
URLForJasparAll = "http://jaspar.genereg.net/html/DOWNLOAD/ARCHIVE/JASPAR2010/JASPAR_CORE/non_redundant/all_species/FlatFileDir/"
lrun('wget -r -nH --cut-dirs=2 --no-parent --reject=\"index.html*\" %s ' % (URLForJasparAll))
JasparLocation = os.path.join(env.annotation_dir, "ARCHIVE/JASPAR2010/JASPAR_CORE/non_redundant/all_species/FlatFileDir/")
MemeJasparLocation = os.path.join(env.annotation_dir, "ARCHIVE/JASPAR2010/JASPAR_CORE/Jaspar_NonRedunadant.meme")
ConvertCMD = os.path.join(env.bin_dir, "meme/bin/jaspar2meme -pfm")
lrun("%s %s > %s" % (ConvertCMD,JasparLocation,MemeJasparLocation))
# ================================================================================
# == Install Ikaros ChIP test data
def install_test():
with lcd(env.project_dir):
lrun('mv %s .' % os.path.join(env.chipseq_installer, 'chipseq-test'))
def fetch_testdata():
_make_dir(env.testfq_dir)
fq_urls = ["ftp://ftp.sra.ebi.ac.uk/vol1/fastq/SRR619/SRR619469/SRR619469.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/SRR619/SRR619470/SRR619470.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/SRR619/SRR619471/SRR619471.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/SRR619/SRR619472/SRR619472.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/SRR619/SRR619473/SRR619473.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/SRR619/SRR619474/SRR619474.fastq.gz"]
with cd(env.testfq_dir):
for fq_url in fq_urls:
_fetch(env.testfq_dir, fq_url)
| |
# -*- coding: utf-8 -*-
'''
Create ssh executor system
'''
# Import python libs
from __future__ import absolute_import, print_function
import copy
import getpass
import json
import logging
import multiprocessing
import subprocess
import hashlib
import tarfile
import os
import re
import sys
import time
import yaml
import uuid
import tempfile
import binascii
import sys
# Import salt libs
import salt.client.ssh.shell
import salt.client.ssh.wrapper
import salt.config
import salt.exceptions
import salt.defaults.exitcodes
import salt.log
import salt.loader
import salt.minion
import salt.roster
import salt.serializers.yaml
import salt.state
import salt.utils
import salt.utils.args
import salt.utils.event
import salt.utils.atomicfile
import salt.utils.thin
import salt.utils.url
import salt.utils.verify
import salt.utils.network
from salt.utils import is_windows
# Import 3rd-party libs
import salt.ext.six as six
from salt.ext.six.moves import input # pylint: disable=import-error,redefined-builtin
try:
import zmq
HAS_ZMQ = True
except ImportError:
HAS_ZMQ = False
# The directory where salt thin is deployed
DEFAULT_THIN_DIR = '/tmp/.%%USER%%_%%FQDNUUID%%_salt'
# RSTR is just a delimiter to distinguish the beginning of salt STDOUT
# and STDERR. There is no special meaning. Messages prior to RSTR in
# stderr and stdout are either from SSH or from the shim.
#
# RSTR on both stdout and stderr:
# no errors in SHIM - output after RSTR is from salt
# No RSTR in stderr, RSTR in stdout:
# no errors in SSH_SH_SHIM, but SHIM commands for salt master are after
# RSTR in stdout
# No RSTR in stderr, no RSTR in stdout:
# Failure in SHIM
# RSTR in stderr, No RSTR in stdout:
# Undefined behavior
RSTR = '_edbc7885e4f9aac9b83b35999b68d015148caf467b78fa39c05f669c0ff89878'
# The regex to find RSTR in output - Must be on an output line by itself
# NOTE - must use non-grouping match groups or output splitting will fail.
RSTR_RE = r'(?:^|\r?\n)' + RSTR + '(?:\r?\n|$)'
# METHODOLOGY:
#
# 1) Make the _thinnest_ /bin/sh shim (SSH_SH_SHIM) to find the python
# interpreter and get it invoked
# 2) Once a qualified python is found start it with the SSH_PY_SHIM
# 3) The shim is converted to a single semicolon separated line, so
# some constructs are needed to keep it clean.
# NOTE:
# * SSH_SH_SHIM is generic and can be used to load+exec *any* python
# script on the target.
# * SSH_PY_SHIM is in a separate file rather than stuffed in a string
# in salt/client/ssh/__init__.py - this makes testing *easy* because
# it can be invoked directly.
# * SSH_PY_SHIM is base64 encoded and formatted into the SSH_SH_SHIM
# string. This makes the python script "armored" so that it can
# all be passed in the SSH command and will not need special quoting
# (which likely would be impossibe to do anyway)
# * The formatted SSH_SH_SHIM with the SSH_PY_SHIM payload is a bit
# big (~7.5k). If this proves problematic for an SSH command we
# might try simply invoking "/bin/sh -s" and passing the formatted
# SSH_SH_SHIM on SSH stdin.
# NOTE: there are two passes of formatting:
# 1) Substitute in static values
# - EX_THIN_PYTHON_INVALID - exit code if a suitable python is not found
# 2) Substitute in instance-specific commands
# - DEBUG - enable shim debugging (any non-zero string enables)
# - SUDO - load python and execute as root (any non-zero string enables)
# - SSH_PY_CODE - base64-encoded python code to execute
# - SSH_PY_ARGS - arguments to pass to python code
# This shim generically loads python code . . . and *no* more.
# - Uses /bin/sh for maximum compatibility - then jumps to
# python for ultra-maximum compatibility.
#
# 1. Identify a suitable python
# 2. Jump to python
SSH_SH_SHIM = r'''/bin/sh << 'EOF'
set -e
set -u
DEBUG="{{DEBUG}}"
if [ -n "$DEBUG" ]
then set -x
fi
SUDO=""
if [ -n "{{SUDO}}" ]
then SUDO="sudo "
fi
EX_PYTHON_INVALID={EX_THIN_PYTHON_INVALID}
PYTHON_CMDS="python27 python2.7 python26 python2.6 python2 python"
for py_cmd in $PYTHON_CMDS
do if "$py_cmd" -c "import sys; sys.exit(not (sys.hexversion >= 0x02060000 and sys.version_info[0] == {{HOST_PY_MAJOR}}));" >/dev/null 2>&1
then py_cmd_path=`"$py_cmd" -c 'from __future__ import print_function; import sys; print(sys.executable);'`
exec $SUDO "$py_cmd_path" -c 'import base64; exec(base64.b64decode("""{{SSH_PY_CODE}}""").decode("utf-8"))'
exit 0
else continue
fi
done
echo "ERROR: Unable to locate appropriate python command" >&2
exit $EX_PYTHON_INVALID
EOF'''.format(
EX_THIN_PYTHON_INVALID=salt.defaults.exitcodes.EX_THIN_PYTHON_INVALID,
)
if not is_windows():
shim_file = os.path.join(os.path.dirname(__file__), 'ssh_py_shim.py')
if not os.path.exists(shim_file):
# On esky builds we only have the .pyc file
shim_file += "c"
with salt.utils.fopen(shim_file) as ssh_py_shim:
SSH_PY_SHIM = ssh_py_shim.read()
log = logging.getLogger(__name__)
class SSH(object):
'''
Create an SSH execution system
'''
def __init__(self, opts):
pull_sock = os.path.join(opts['sock_dir'], 'master_event_pull.ipc')
if os.path.isfile(pull_sock) and HAS_ZMQ:
self.event = salt.utils.event.get_event(
'master',
opts['sock_dir'],
opts['transport'],
opts=opts,
listen=False)
else:
self.event = None
self.opts = opts
if not salt.utils.which('ssh'):
raise salt.exceptions.SaltSystemExit('No ssh binary found in path -- ssh must be installed for salt-ssh to run. Exiting.')
self.opts['_ssh_version'] = ssh_version()
self.tgt_type = self.opts['selected_target_option'] \
if self.opts['selected_target_option'] else 'glob'
self.roster = salt.roster.Roster(opts, opts.get('roster', 'flat'))
self.targets = self.roster.targets(
self.opts['tgt'],
self.tgt_type)
# If we're in a wfunc, we need to get the ssh key location from the
# top level opts, stored in __master_opts__
if '__master_opts__' in self.opts:
priv = self.opts['__master_opts__'].get(
'ssh_priv',
os.path.join(
self.opts['__master_opts__']['pki_dir'],
'ssh',
'salt-ssh.rsa'
)
)
else:
priv = self.opts.get(
'ssh_priv',
os.path.join(
self.opts['pki_dir'],
'ssh',
'salt-ssh.rsa'
)
)
if priv != 'agent-forwarding':
if not os.path.isfile(priv):
try:
salt.client.ssh.shell.gen_key(priv)
except OSError:
raise salt.exceptions.SaltClientError('salt-ssh could not be run because it could not generate keys.\n\nYou can probably resolve this by executing this script with increased permissions via sudo or by running as root.\nYou could also use the \'-c\' option to supply a configuration directory that you have permissions to read and write to.')
self.defaults = {
'user': self.opts.get(
'ssh_user',
salt.config.DEFAULT_MASTER_OPTS['ssh_user']
),
'port': self.opts.get(
'ssh_port',
salt.config.DEFAULT_MASTER_OPTS['ssh_port']
),
'passwd': self.opts.get(
'ssh_passwd',
salt.config.DEFAULT_MASTER_OPTS['ssh_passwd']
),
'priv': priv,
'timeout': self.opts.get(
'ssh_timeout',
salt.config.DEFAULT_MASTER_OPTS['ssh_timeout']
) + self.opts.get(
'timeout',
salt.config.DEFAULT_MASTER_OPTS['timeout']
),
'sudo': self.opts.get(
'ssh_sudo',
salt.config.DEFAULT_MASTER_OPTS['ssh_sudo']
),
'identities_only': self.opts.get(
'ssh_identities_only',
salt.config.DEFAULT_MASTER_OPTS['ssh_identities_only']
),
}
if self.opts.get('rand_thin_dir'):
self.defaults['thin_dir'] = os.path.join(
'/tmp',
'.{0}'.format(uuid.uuid4().hex[:6]))
self.opts['wipe_ssh'] = 'True'
self.serial = salt.payload.Serial(opts)
self.returners = salt.loader.returners(self.opts, {})
self.fsclient = salt.fileclient.FSClient(self.opts)
self.thin = salt.utils.thin.gen_thin(self.opts['cachedir'])
self.mods = mod_data(self.fsclient)
def get_pubkey(self):
'''
Return the key string for the SSH public key
'''
priv = self.opts.get(
'ssh_priv',
os.path.join(
self.opts['pki_dir'],
'ssh',
'salt-ssh.rsa'
)
)
pub = '{0}.pub'.format(priv)
with salt.utils.fopen(pub, 'r') as fp_:
return '{0} rsa root@master'.format(fp_.read().split()[1])
def key_deploy(self, host, ret):
'''
Deploy the SSH key if the minions don't auth
'''
if not isinstance(ret[host], dict) or self.opts.get('ssh_key_deploy'):
target = self.targets[host]
if 'passwd' in target or self.opts['ssh_passwd']:
self._key_deploy_run(host, target, False)
return ret
if ret[host].get('stderr', '').count('Permission denied'):
target = self.targets[host]
# permission denied, attempt to auto deploy ssh key
print(('Permission denied for host {0}, do you want to deploy '
'the salt-ssh key? (password required):').format(host))
deploy = input('[Y/n] ')
if deploy.startswith(('n', 'N')):
return ret
target['passwd'] = getpass.getpass(
'Password for {0}@{1}: '.format(target['user'], host)
)
return self._key_deploy_run(host, target, True)
return ret
def _key_deploy_run(self, host, target, re_run=True):
'''
The ssh-copy-id routine
'''
argv = [
'ssh.set_auth_key',
target.get('user', 'root'),
self.get_pubkey(),
]
single = Single(
self.opts,
argv,
host,
mods=self.mods,
fsclient=self.fsclient,
thin=self.thin,
**target)
if salt.utils.which('ssh-copy-id'):
# we have ssh-copy-id, use it!
stdout, stderr, retcode = single.shell.copy_id()
else:
stdout, stderr, retcode = single.run()
if re_run:
target.pop('passwd')
single = Single(
self.opts,
self.opts['argv'],
host,
mods=self.mods,
fsclient=self.fsclient,
thin=self.thin,
**target)
stdout, stderr, retcode = single.cmd_block()
try:
data = salt.utils.find_json(stdout)
return {host: data.get('local', data)}
except Exception:
if stderr:
return {host: stderr}
return {host: 'Bad Return'}
if salt.defaults.exitcodes.EX_OK != retcode:
return {host: stderr}
return {host: stdout}
def handle_routine(self, que, opts, host, target, mine=False):
'''
Run the routine in a "Thread", put a dict on the queue
'''
opts = copy.deepcopy(opts)
single = Single(
opts,
opts['argv'],
host,
mods=self.mods,
fsclient=self.fsclient,
thin=self.thin,
mine=mine,
**target)
ret = {'id': single.id}
stdout, stderr, retcode = single.run()
# This job is done, yield
try:
data = salt.utils.find_json(stdout)
if len(data) < 2 and 'local' in data:
ret['ret'] = data['local']
else:
ret['ret'] = {
'stdout': stdout,
'stderr': stderr,
'retcode': retcode,
}
except Exception:
ret['ret'] = {
'stdout': stdout,
'stderr': stderr,
'retcode': retcode,
}
que.put(ret)
def handle_ssh(self, mine=False):
'''
Spin up the needed threads or processes and execute the subsequent
routines
'''
que = multiprocessing.Queue()
running = {}
target_iter = self.targets.__iter__()
returned = set()
rets = set()
init = False
if not self.targets:
raise salt.exceptions.SaltClientError('No matching targets found in roster.')
while True:
if len(running) < self.opts.get('ssh_max_procs', 25) and not init:
try:
host = next(target_iter)
except StopIteration:
init = True
continue
for default in self.defaults:
if default not in self.targets[host]:
self.targets[host][default] = self.defaults[default]
args = (
que,
self.opts,
host,
self.targets[host],
mine,
)
routine = multiprocessing.Process(
target=self.handle_routine,
args=args)
routine.start()
running[host] = {'thread': routine}
continue
ret = {}
try:
ret = que.get(False)
if 'id' in ret:
returned.add(ret['id'])
yield {ret['id']: ret['ret']}
except Exception:
pass
for host in running:
if not running[host]['thread'].is_alive():
if host not in returned:
# Try to get any returns that came through since we
# last checked
try:
while True:
ret = que.get(False)
if 'id' in ret:
returned.add(ret['id'])
yield {ret['id']: ret['ret']}
except Exception:
pass
if host not in returned:
error = ('Target \'{0}\' did not return any data, '
'probably due to an error.').format(host)
ret = {'id': host,
'ret': error}
log.error(error)
yield {ret['id']: ret['ret']}
running[host]['thread'].join()
rets.add(host)
for host in rets:
if host in running:
running.pop(host)
if len(rets) >= len(self.targets):
break
# Sleep when limit or all threads started
if len(running) >= self.opts.get('ssh_max_procs', 25) or len(self.targets) >= len(running):
time.sleep(0.1)
def run_iter(self, mine=False):
'''
Execute and yield returns as they come in, do not print to the display
mine
The Single objects will use mine_functions defined in the roster,
pillar, or master config (they will be checked in that order) and
will modify the argv with the arguments from mine_functions
'''
fstr = '{0}.prep_jid'.format(self.opts['master_job_cache'])
jid = self.returners[fstr]()
# Save the invocation information
argv = self.opts['argv']
if self.opts.get('raw_shell', False):
fun = 'ssh._raw'
args = argv
else:
fun = argv[0] if argv else ''
args = argv[1:]
job_load = {
'jid': jid,
'tgt_type': self.tgt_type,
'tgt': self.opts['tgt'],
'user': self.opts['user'],
'fun': fun,
'arg': args,
}
# save load to the master job cache
self.returners['{0}.save_load'.format(self.opts['master_job_cache'])](jid, job_load)
for ret in self.handle_ssh(mine=mine):
host = next(six.iterkeys(ret))
self.cache_job(jid, host, ret[host], fun)
if self.event:
self.event.fire_event(
ret,
salt.utils.event.tagify(
[jid, 'ret', host],
'job'))
yield ret
def cache_job(self, jid, id_, ret, fun):
'''
Cache the job information
'''
self.returners['{0}.returner'.format(self.opts['master_job_cache'])]({'jid': jid,
'id': id_,
'return': ret,
'fun': fun})
def run(self):
'''
Execute the overall routine, print results via outputters
'''
fstr = '{0}.prep_jid'.format(self.opts['master_job_cache'])
jid = self.returners[fstr]()
# Save the invocation information
argv = self.opts['argv']
if self.opts.get('raw_shell', False):
fun = 'ssh._raw'
args = argv
else:
fun = argv[0] if argv else ''
args = argv[1:]
job_load = {
'jid': jid,
'tgt_type': self.tgt_type,
'tgt': self.opts['tgt'],
'user': self.opts['user'],
'fun': fun,
'arg': args,
}
# save load to the master job cache
try:
if self.opts['master_job_cache'] == 'local_cache':
self.returners['{0}.save_load'.format(self.opts['master_job_cache'])](jid, job_load, minions=self.targets.keys())
else:
self.returners['{0}.save_load'.format(self.opts['master_job_cache'])](jid, job_load)
except Exception as exc:
log.error('Could not save load with returner {0}: {1}'.format(self.opts['master_job_cache'], exc))
if self.opts.get('verbose'):
msg = 'Executing job with jid {0}'.format(jid)
print(msg)
print('-' * len(msg) + '\n')
print('')
sret = {}
outputter = self.opts.get('output', 'nested')
final_exit = 0
for ret in self.handle_ssh():
host = next(six.iterkeys(ret))
if isinstance(ret[host], dict):
host_ret = ret[host].get('retcode', 0)
if host_ret != 0:
final_exit = 1
else:
# Error on host
final_exit = 1
self.cache_job(jid, host, ret[host], fun)
ret = self.key_deploy(host, ret)
if isinstance(ret[host], dict) and ret[host].get('stderr', '').startswith('ssh:'):
ret[host] = ret[host]['stderr']
if not isinstance(ret[host], dict):
p_data = {host: ret[host]}
elif 'return' not in ret[host]:
p_data = ret
else:
outputter = ret[host].get('out', self.opts.get('output', 'nested'))
p_data = {host: ret[host].get('return', {})}
if self.opts.get('static'):
sret.update(p_data)
else:
salt.output.display_output(
p_data,
outputter,
self.opts)
if self.event:
self.event.fire_event(
ret,
salt.utils.event.tagify(
[jid, 'ret', host],
'job'))
if self.opts.get('static'):
salt.output.display_output(
sret,
outputter,
self.opts)
if final_exit:
sys.exit(salt.defaults.exitcodes.EX_AGGREGATE)
class Single(object):
'''
Hold onto a single ssh execution
'''
# 1. Get command ready
# 2. Check if target has salt
# 3. deploy salt-thin
# 4. execute requested command via salt-thin
def __init__(
self,
opts,
argv,
id_,
host,
user=None,
port=None,
passwd=None,
priv=None,
timeout=30,
sudo=False,
tty=False,
mods=None,
fsclient=None,
thin=None,
mine=False,
minion_opts=None,
identities_only=False,
**kwargs):
# Get mine setting and mine_functions if defined in kwargs (from roster)
self.mine = mine
self.mine_functions = kwargs.get('mine_functions')
self.cmd_umask = kwargs.get('cmd_umask', None)
self.opts = opts
self.tty = tty
if kwargs.get('wipe'):
self.wipe = 'False'
else:
self.wipe = 'True' if self.opts.get('wipe_ssh') else 'False'
if kwargs.get('thin_dir'):
self.thin_dir = kwargs['thin_dir']
else:
if user:
thin_dir = DEFAULT_THIN_DIR.replace('%%USER%%', user)
else:
thin_dir = DEFAULT_THIN_DIR.replace('%%USER%%', 'root')
self.thin_dir = thin_dir.replace(
'%%FQDNUUID%%',
uuid.uuid3(uuid.NAMESPACE_DNS,
salt.utils.network.get_fqhostname()).hex[:6]
)
self.opts['thin_dir'] = self.thin_dir
self.fsclient = fsclient
self.context = {'master_opts': self.opts,
'fileclient': self.fsclient}
if isinstance(argv, six.string_types):
self.argv = [argv]
else:
self.argv = argv
self.fun, self.args, self.kwargs = self.__arg_comps()
self.id = id_
self.mods = mods if isinstance(mods, dict) else {}
args = {'host': host,
'user': user,
'port': port,
'passwd': passwd,
'priv': priv,
'timeout': timeout,
'sudo': sudo,
'tty': tty,
'mods': self.mods,
'identities_only': identities_only}
self.minion_opts = opts.get('ssh_minion_opts', {})
if minion_opts is not None:
self.minion_opts.update(minion_opts)
self.minion_opts.update({
'root_dir': os.path.join(self.thin_dir, 'running_data'),
'id': self.id,
'sock_dir': '/',
'log_file': 'salt-call.log'
})
self.minion_config = salt.serializers.yaml.serialize(self.minion_opts)
self.target = kwargs
self.target.update(args)
self.serial = salt.payload.Serial(opts)
self.wfuncs = salt.loader.ssh_wrapper(opts, None, self.context)
self.shell = salt.client.ssh.shell.Shell(opts, **args)
self.thin = thin if thin else salt.utils.thin.thin_path(opts['cachedir'])
def __arg_comps(self):
'''
Return the function name and the arg list
'''
fun = self.argv[0] if self.argv else ''
parsed = salt.utils.args.parse_input(self.argv[1:], condition=False)
args = parsed[0]
kws = parsed[1]
return fun, args, kws
def _escape_arg(self, arg):
'''
Properly escape argument to protect special characters from shell
interpretation. This avoids having to do tricky argument quoting.
Effectively just escape all characters in the argument that are not
alphanumeric!
'''
return ''.join(['\\' + char if re.match(r'\W', char) else char for char in arg])
def deploy(self):
'''
Deploy salt-thin
'''
self.shell.send(
self.thin,
os.path.join(self.thin_dir, 'salt-thin.tgz'),
)
self.deploy_ext()
return True
def deploy_ext(self):
'''
Deploy the ext_mods tarball
'''
if self.mods.get('file'):
self.shell.send(
self.mods['file'],
os.path.join(self.thin_dir, 'salt-ext_mods.tgz'),
)
return True
def run(self, deploy_attempted=False):
'''
Execute the routine, the routine can be either:
1. Execute a raw shell command
2. Execute a wrapper func
3. Execute a remote Salt command
If a (re)deploy is needed, then retry the operation after a deploy
attempt
Returns tuple of (stdout, stderr, retcode)
'''
stdout = stderr = retcode = None
if self.opts.get('raw_shell', False):
cmd_str = ' '.join([self._escape_arg(arg) for arg in self.argv])
stdout, stderr, retcode = self.shell.exec_cmd(cmd_str)
elif self.fun in self.wfuncs or self.mine:
stdout, retcode = self.run_wfunc()
else:
stdout, stderr, retcode = self.cmd_block()
return stdout, stderr, retcode
def run_wfunc(self):
'''
Execute a wrapper function
Returns tuple of (json_data, '')
'''
# Ensure that opts/grains are up to date
# Execute routine
data_cache = False
data = None
cdir = os.path.join(self.opts['cachedir'], 'minions', self.id)
if not os.path.isdir(cdir):
os.makedirs(cdir)
datap = os.path.join(cdir, 'ssh_data.p')
refresh = False
if not os.path.isfile(datap):
refresh = True
else:
passed_time = (time.time() - os.stat(datap).st_mtime) / 60
if passed_time > self.opts.get('cache_life', 60):
refresh = True
if self.opts.get('refresh_cache'):
refresh = True
conf_grains = {}
# Save conf file grains before they get clobbered
if 'ssh_grains' in self.opts:
conf_grains = self.opts['ssh_grains']
if not data_cache:
refresh = True
if refresh:
# Make the datap
# TODO: Auto expire the datap
pre_wrapper = salt.client.ssh.wrapper.FunctionWrapper(
self.opts,
self.id,
fsclient=self.fsclient,
minion_opts=self.minion_opts,
**self.target)
opts_pkg = pre_wrapper['test.opts_pkg']() # pylint: disable=E1102
opts_pkg['file_roots'] = self.opts['file_roots']
opts_pkg['pillar_roots'] = self.opts['pillar_roots']
opts_pkg['ext_pillar'] = self.opts['ext_pillar']
opts_pkg['extension_modules'] = self.opts['extension_modules']
opts_pkg['_ssh_version'] = self.opts['_ssh_version']
opts_pkg['__master_opts__'] = self.context['master_opts']
if '_caller_cachedir' in self.opts:
opts_pkg['_caller_cachedir'] = self.opts['_caller_cachedir']
else:
opts_pkg['_caller_cachedir'] = self.opts['cachedir']
# Use the ID defined in the roster file
opts_pkg['id'] = self.id
retcode = 0
if '_error' in opts_pkg:
# Refresh failed
ret = json.dumps({'local': opts_pkg})
retcode = opts_pkg['retcode']
return ret, retcode
pillar = salt.pillar.Pillar(
opts_pkg,
opts_pkg['grains'],
opts_pkg['id'],
opts_pkg.get('environment', 'base')
)
pillar_dirs = {}
pillar_data = pillar.compile_pillar(pillar_dirs=pillar_dirs)
# TODO: cache minion opts in datap in master.py
data = {'opts': opts_pkg,
'grains': opts_pkg['grains'],
'pillar': pillar_data}
if data_cache:
with salt.utils.fopen(datap, 'w+b') as fp_:
fp_.write(
self.serial.dumps(data)
)
if not data and data_cache:
with salt.utils.fopen(datap, 'rb') as fp_:
data = self.serial.load(fp_)
opts = data.get('opts', {})
opts['grains'] = data.get('grains')
# Restore master grains
for grain in conf_grains:
opts['grains'][grain] = conf_grains[grain]
# Enable roster grains support
if 'grains' in self.target:
for grain in self.target['grains']:
opts['grains'][grain] = self.target['grains'][grain]
opts['pillar'] = data.get('pillar')
wrapper = salt.client.ssh.wrapper.FunctionWrapper(
opts,
self.id,
fsclient=self.fsclient,
minion_opts=self.minion_opts,
**self.target)
self.wfuncs = salt.loader.ssh_wrapper(opts, wrapper, self.context)
wrapper.wfuncs = self.wfuncs
# We're running in the mind, need to fetch the arguments from the
# roster, pillar, master config (in that order)
if self.mine:
mine_args = None
if self.mine_functions and self.fun in self.mine_functions:
mine_args = self.mine_functions[self.fun]
elif opts['pillar'] and self.fun in opts['pillar'].get('mine_functions', {}):
mine_args = opts['pillar']['mine_functions'][self.fun]
elif self.fun in self.context['master_opts'].get('mine_functions', {}):
mine_args = self.context['master_opts']['mine_functions'][self.fun]
# If we found mine_args, replace our command's args
if isinstance(mine_args, dict):
self.args = []
self.kwargs = mine_args
elif isinstance(mine_args, list):
self.args = mine_args
self.kwargs = {}
try:
if self.mine:
result = wrapper[self.fun](*self.args, **self.kwargs)
else:
result = self.wfuncs[self.fun](*self.args, **self.kwargs)
except TypeError as exc:
result = 'TypeError encountered executing {0}: {1}'.format(self.fun, exc)
retcode = 1
except Exception as exc:
result = 'An Exception occurred while executing {0}: {1}'.format(self.fun, exc)
retcode = 1
# Mimic the json data-structure that "salt-call --local" will
# emit (as seen in ssh_py_shim.py)
if isinstance(result, dict) and 'local' in result:
ret = json.dumps({'local': result['local']})
else:
ret = json.dumps({'local': {'return': result}})
return ret, retcode
def _cmd_str(self):
'''
Prepare the command string
'''
sudo = 'sudo' if self.target['sudo'] else ''
if '_caller_cachedir' in self.opts:
cachedir = self.opts['_caller_cachedir']
else:
cachedir = self.opts['cachedir']
thin_sum = salt.utils.thin.thin_sum(cachedir, 'sha1')
debug = ''
if not self.opts.get('log_level'):
self.opts['log_level'] = 'info'
if salt.log.LOG_LEVELS['debug'] >= salt.log.LOG_LEVELS[self.opts.get('log_level', 'info')]:
debug = '1'
arg_str = '''
OPTIONS = OBJ()
OPTIONS.config = \
"""
{0}
"""
OPTIONS.delimiter = '{1}'
OPTIONS.saltdir = '{2}'
OPTIONS.checksum = '{3}'
OPTIONS.hashfunc = '{4}'
OPTIONS.version = '{5}'
OPTIONS.ext_mods = '{6}'
OPTIONS.wipe = {7}
OPTIONS.tty = {8}
OPTIONS.cmd_umask = {9}
ARGS = {10}\n'''.format(self.minion_config,
RSTR,
self.thin_dir,
thin_sum,
'sha1',
salt.version.__version__,
self.mods.get('version', ''),
self.wipe,
self.tty,
self.cmd_umask,
self.argv)
py_code = SSH_PY_SHIM.replace('#%%OPTS', arg_str)
py_code_enc = py_code.encode('base64')
cmd = SSH_SH_SHIM.format(
DEBUG=debug,
SUDO=sudo,
SSH_PY_CODE=py_code_enc,
HOST_PY_MAJOR=sys.version_info[0],
)
return cmd
def shim_cmd(self, cmd_str):
'''
Run a shim command.
If tty is enabled, we must scp the shim to the target system and
execute it there
'''
if not self.tty:
return self.shell.exec_cmd(cmd_str)
# Write the shim to a file
shim_dir = os.path.join(self.opts['cachedir'], 'ssh_shim')
if not os.path.exists(shim_dir):
os.makedirs(shim_dir)
with tempfile.NamedTemporaryFile(mode='w',
prefix='shim_',
dir=shim_dir,
delete=False) as shim_tmp_file:
shim_tmp_file.write(cmd_str)
# Copy shim to target system, under $HOME/.<randomized name>
target_shim_file = '.{0}'.format(binascii.hexlify(os.urandom(6)))
self.shell.send(shim_tmp_file.name, target_shim_file)
# Remove our shim file
try:
os.remove(shim_tmp_file.name)
except IOError:
pass
# Execute shim
ret = self.shell.exec_cmd('/bin/sh \'$HOME/{0}\''.format(target_shim_file))
# Remove shim from target system
self.shell.exec_cmd('rm \'$HOME/{0}\''.format(target_shim_file))
return ret
def cmd_block(self, is_retry=False):
'''
Prepare the pre-check command to send to the subsystem
1. execute SHIM + command
2. check if SHIM returns a master request or if it completed
3. handle any master request
4. re-execute SHIM + command
5. split SHIM results from command results
6. return command results
'''
self.argv = _convert_args(self.argv)
log.debug('Performing shimmed, blocking command as follows:\n{0}'.format(' '.join(self.argv)))
cmd_str = self._cmd_str()
stdout, stderr, retcode = self.shim_cmd(cmd_str)
log.trace('STDOUT {1}\n{0}'.format(stdout, self.target['host']))
log.trace('STDERR {1}\n{0}'.format(stderr, self.target['host']))
log.debug('RETCODE {1}: {0}'.format(retcode, self.target['host']))
error = self.categorize_shim_errors(stdout, stderr, retcode)
if error:
if error == 'Undefined SHIM state':
self.deploy()
stdout, stderr, retcode = self.shim_cmd(cmd_str)
if not re.search(RSTR_RE, stdout) or not re.search(RSTR_RE, stderr):
# If RSTR is not seen in both stdout and stderr then there
# was a thin deployment problem.
return 'ERROR: Failure deploying thin, undefined state: {0}'.format(stdout), stderr, retcode
stdout = re.split(RSTR_RE, stdout, 1)[1].strip()
stderr = re.split(RSTR_RE, stderr, 1)[1].strip()
else:
return 'ERROR: {0}'.format(error), stderr, retcode
# FIXME: this discards output from ssh_shim if the shim succeeds. It should
# always save the shim output regardless of shim success or failure.
if re.search(RSTR_RE, stdout):
stdout = re.split(RSTR_RE, stdout, 1)[1].strip()
else:
# This is actually an error state prior to the shim but let it fall through
pass
if re.search(RSTR_RE, stderr):
# Found RSTR in stderr which means SHIM completed and only
# and remaining output is only from salt.
stderr = re.split(RSTR_RE, stderr, 1)[1].strip()
else:
# RSTR was found in stdout but not stderr - which means there
# is a SHIM command for the master.
shim_command = re.split(r'\r?\n', stdout, 1)[0].strip()
log.debug('SHIM retcode({0}) and command: {1}'.format(retcode, shim_command))
if 'deploy' == shim_command and retcode == salt.defaults.exitcodes.EX_THIN_DEPLOY:
self.deploy()
stdout, stderr, retcode = self.shim_cmd(cmd_str)
if not re.search(RSTR_RE, stdout) or not re.search(RSTR_RE, stderr):
if not self.tty:
# If RSTR is not seen in both stdout and stderr then there
# was a thin deployment problem.
return 'ERROR: Failure deploying thin: {0}\n{1}'.format(stdout, stderr), stderr, retcode
elif not re.search(RSTR_RE, stdout):
# If RSTR is not seen in stdout with tty, then there
# was a thin deployment problem.
return 'ERROR: Failure deploying thin: {0}\n{1}'.format(stdout, stderr), stderr, retcode
stdout = re.split(RSTR_RE, stdout, 1)[1].strip()
if self.tty:
stderr = ''
else:
stderr = re.split(RSTR_RE, stderr, 1)[1].strip()
elif 'ext_mods' == shim_command:
self.deploy_ext()
stdout, stderr, retcode = self.shim_cmd(cmd_str)
if not re.search(RSTR_RE, stdout) or not re.search(RSTR_RE, stderr):
# If RSTR is not seen in both stdout and stderr then there
# was a thin deployment problem.
return 'ERROR: Failure deploying ext_mods: {0}'.format(stdout), stderr, retcode
stdout = re.split(RSTR_RE, stdout, 1)[1].strip()
stderr = re.split(RSTR_RE, stderr, 1)[1].strip()
return stdout, stderr, retcode
def categorize_shim_errors(self, stdout, stderr, retcode):
if re.search(RSTR_RE, stdout) and stdout != RSTR+'\n':
# RSTR was found in stdout which means that the shim
# functioned without *errors* . . . but there may be shim
# commands, unless the only thing we found is RSTR
return None
if re.search(RSTR_RE, stderr):
# Undefined state
return 'Undefined SHIM state'
if stderr.startswith('Permission denied'):
# SHIM was not even reached
return None
perm_error_fmt = 'Permissions problem, target user may need '\
'to be root or use sudo:\n {0}'
errors = [
(
(),
'sudo: no tty present and no askpass program specified',
'sudo expected a password, NOPASSWD required'
),
(
(salt.defaults.exitcodes.EX_THIN_PYTHON_INVALID,),
'Python interpreter is too old',
'salt requires python 2.6 or newer on target hosts, must have same major version as origin host'
),
(
(salt.defaults.exitcodes.EX_THIN_CHECKSUM,),
'checksum mismatched',
'The salt thin transfer was corrupted'
),
(
(salt.defaults.exitcodes.EX_SCP_NOT_FOUND,),
'scp not found',
'No scp binary. openssh-clients package required'
),
(
(salt.defaults.exitcodes.EX_CANTCREAT,),
'salt path .* exists but is not a directory',
'A necessary path for salt thin unexpectedly exists:\n ' + stderr,
),
(
(),
'sudo: sorry, you must have a tty to run sudo',
'sudo is configured with requiretty'
),
(
(),
'Failed to open log file',
perm_error_fmt.format(stderr)
),
(
(),
'Permission denied:.*/salt',
perm_error_fmt.format(stderr)
),
(
(),
'Failed to create directory path.*/salt',
perm_error_fmt.format(stderr)
),
(
(salt.defaults.exitcodes.EX_SOFTWARE,),
'exists but is not',
'An internal error occurred with the shim, please investigate:\n ' + stderr,
),
]
for error in errors:
if retcode in error[0] or re.search(error[1], stderr):
return error[2]
return None
def check_refresh(self, data, ret):
'''
Stub out check_refresh
'''
return
def module_refresh(self):
'''
Module refresh is not needed, stub it out
'''
return
def lowstate_file_refs(chunks):
'''
Create a list of file ref objects to reconcile
'''
refs = {}
for chunk in chunks:
saltenv = 'base'
crefs = []
for state in chunk:
if state == '__env__':
saltenv = chunk[state]
elif state == 'saltenv':
saltenv = chunk[state]
elif state.startswith('__'):
continue
crefs.extend(salt_refs(chunk[state]))
if crefs:
if saltenv not in refs:
refs[saltenv] = []
refs[saltenv].append(crefs)
return refs
def salt_refs(data):
'''
Pull salt file references out of the states
'''
proto = 'salt://'
ret = []
if isinstance(data, str):
if data.startswith(proto):
return [data]
if isinstance(data, list):
for comp in data:
if isinstance(comp, str):
if comp.startswith(proto):
ret.append(comp)
return ret
def mod_data(fsclient):
'''
Generate the module arguments for the shim data
'''
# TODO, change out for a fileserver backend
sync_refs = [
'modules',
'states',
'grains',
'renderers',
'returners',
]
ret = {}
envs = fsclient.envs()
ver_base = ''
for env in envs:
files = fsclient.file_list(env)
for ref in sync_refs:
mods_data = {}
pref = '_{0}'.format(ref)
for fn_ in sorted(files):
if fn_.startswith(pref):
if fn_.endswith(('.py', '.so', '.pyx')):
full = salt.utils.url.create(fn_)
mod_path = fsclient.cache_file(full, env)
if not os.path.isfile(mod_path):
continue
mods_data[os.path.basename(fn_)] = mod_path
chunk = salt.utils.get_hash(mod_path)
ver_base += chunk
if mods_data:
if ref in ret:
ret[ref].update(mods_data)
else:
ret[ref] = mods_data
if not ret:
return {}
ver = hashlib.sha1(ver_base).hexdigest()
ext_tar_path = os.path.join(
fsclient.opts['cachedir'],
'ext_mods.{0}.tgz'.format(ver))
mods = {'version': ver,
'file': ext_tar_path}
if os.path.isfile(ext_tar_path):
return mods
tfp = tarfile.open(ext_tar_path, 'w:gz')
verfile = os.path.join(fsclient.opts['cachedir'], 'ext_mods.ver')
with salt.utils.fopen(verfile, 'w+') as fp_:
fp_.write(ver)
tfp.add(verfile, 'ext_version')
for ref in ret:
for fn_ in ret[ref]:
tfp.add(ret[ref][fn_], os.path.join(ref, fn_))
tfp.close()
return mods
def ssh_version():
'''
Returns the version of the installed ssh command
'''
# This function needs more granular checks and to be validated against
# older versions of ssh
ret = subprocess.Popen(
['ssh', '-V'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE).communicate()
try:
return ret[1].split(b',')[0].split(b'_')[1]
except IndexError:
return '2.0'
def _convert_args(args):
'''
Take a list of args, and convert any dicts inside the list to keyword
args in the form of `key=value`, ready to be passed to salt-ssh
'''
converted = []
for arg in args:
if isinstance(arg, dict):
for key in list(arg.keys()):
if key == '__kwarg__':
continue
converted.append('{0}={1}'.format(key, arg[key]))
else:
converted.append(arg)
return converted
| |
from __future__ import unicode_literals
import logging
from django.contrib.auth.models import User
from django.template import Context, RequestContext, Template
from django.test import RequestFactory
from djblets.testing.decorators import add_fixtures
from kgb import SpyAgency
from reviewboard.accounts.trophies import TrophyType, trophies_registry
from reviewboard.deprecation import RemovedInReviewBoard40Warning
from reviewboard.reviews.fields import (BaseReviewRequestField,
BaseReviewRequestFieldSet,
register_review_request_fieldset,
unregister_review_request_fieldset)
from reviewboard.reviews.models import Comment
from reviewboard.testing import TestCase
class DisplayReviewRequestTrophiesTests(TestCase):
fixtures = ['test_users']
@classmethod
def setUpClass(cls):
super(DisplayReviewRequestTrophiesTests, cls).setUpClass()
cls._request_factory = RequestFactory()
def tearDown(self):
super(DisplayReviewRequestTrophiesTests, self).tearDown()
trophies_registry.reset()
def test_new_style_trophy(self):
"""Testing {% display_review_request_trophies %} for new-style
TrophyType
"""
class SomeTrophy(TrophyType):
category = 'trophy'
image_width = 1
image_height = 1
display_format_str = 'Trophy get!'
def qualifies(self, review_request):
return True
trophies_registry.register(SomeTrophy)
review_request = self.create_review_request(publish=True)
t = Template(
'{% load reviewtags %}'
'{% display_review_request_trophies review_request %}')
request = self._request_factory.get('/')
request.user = review_request.submitter
text = t.render(RequestContext(request, {
'review_request': review_request,
}))
self.assertIn('Trophy get!', text)
class ForReviewRequestFieldTests(SpyAgency, TestCase):
"""Tests for the for_review_request_field template tag."""
@add_fixtures(['test_users'])
def test_render_instantiated_fields(self):
"""Testing for_review_request_field does not try to render
uninstantiated fields
"""
# exception_id will be a unique value (the ID of the field set) that
# causes the exception; no other exception should have this value.
exception_id = None
class TestField(BaseReviewRequestField):
field_id = 'test_field'
def __init__(self, *args, **kwargs):
raise Exception(exception_id)
class TestFieldSet(BaseReviewRequestFieldSet):
fieldset_id = 'test_fieldset'
register_review_request_fieldset(TestFieldSet)
TestFieldSet.add_field(TestField)
review_request = self.create_review_request()
from reviewboard.reviews.templatetags.reviewtags import logger
self.spy_on(logger.exception)
fieldset = TestFieldSet(review_request)
exception_id = id(fieldset)
try:
t = Template(
'{% load reviewtags %}'
'{% for_review_request_field review_request fieldset %}'
'Never reached.'
'{% end_for_review_request_field %}'
)
result = t.render(Context({
'review_request': review_request,
'fieldset': TestFieldSet(review_request),
}))
self.assertEqual(result, '')
finally:
unregister_review_request_fieldset(TestFieldSet)
# There should only be one logging.exception call, from the failed
# instantiation of the TestField.
self.assertEqual(len(logger.exception.spy.calls), 1)
self.assertEqual(len(logger.exception.spy.calls[0].args), 3)
self.assertEqual(
logger.exception.spy.calls[0].args[2].args,
(exception_id,))
class DiffCommentLineNumbersTests(TestCase):
"""Tests for the diff_comment_line_numbers template tag."""
def test_delete_single_lines(self):
"""Testing diff_comment_line_numbers with delete chunk and single
commented line
"""
t = Template(
'{% load reviewtags %}'
'{% diff_comment_line_numbers chunks comment %}'
)
result = t.render(Context({
'comment': Comment(first_line=20, num_lines=1),
'chunks': [
{
'change': 'delete',
'lines': [
(10, 20, 'deleted line', [], '', '', [], False),
# ...
(50, 60, 'deleted line', [], '', '', [], False),
],
},
],
}))
self.assertEqual(result, 'Line 30 (original)')
def test_delete_mutiple_lines(self):
"""Testing diff_comment_line_numbers with delete chunk and multiple
commented lines
"""
t = Template(
'{% load reviewtags %}'
'{% diff_comment_line_numbers chunks comment %}'
)
result = t.render(Context({
'comment': Comment(first_line=20, num_lines=2),
'chunks': [
{
'change': 'delete',
'lines': [
(10, 20, 'deleted line', [], '', '', [], False),
# ...
(50, 60, 'deleted line', [], '', '', [], False),
],
},
],
}))
self.assertEqual(result, 'Lines 30-31 (original)')
def test_replace_single_line(self):
"""Testing diff_comment_line_numbers with replace chunk and single
commented line
"""
t = Template(
'{% load reviewtags %}'
'{% diff_comment_line_numbers chunks comment %}'
)
result = t.render(Context({
'comment': Comment(first_line=20, num_lines=1),
'chunks': [
{
'change': 'replace',
'lines': [
(10, 20, 'foo', [], 20, 'replaced line', [], False),
# ...
(50, 60, 'foo', [], 60, 'replaced line', [], False),
],
},
],
}))
self.assertEqual(result,
'Line 30 (original), 30 (patched)')
def test_replace_multiple_lines(self):
"""Testing diff_comment_line_numbers with replace chunk and multiple
commented lines
"""
t = Template(
'{% load reviewtags %}'
'{% diff_comment_line_numbers chunks comment %}'
)
result = t.render(Context({
'comment': Comment(first_line=20, num_lines=2),
'chunks': [
{
'change': 'replace',
'lines': [
(10, 20, 'foo', [], 20, 'replaced line', [], False),
# ...
(50, 60, 'foo', [], 60, 'replaced line', [], False),
],
},
],
}))
self.assertEqual(result,
'Lines 30-31 (original), 30-31 (patched)')
def test_insert_single_line(self):
"""Testing diff_comment_line_numbers with insert chunk and single
comented line
"""
t = Template(
'{% load reviewtags %}'
'{% diff_comment_line_numbers chunks comment %}'
)
result = t.render(Context({
'comment': Comment(first_line=20, num_lines=1),
'chunks': [
{
'change': 'insert',
'lines': [
(10, '', '', [], 20, 'inserted line', [], False),
# ...
(50, '', '', [], 60, 'inserted line', [], False),
],
},
],
}))
self.assertEqual(result, 'Lines 30 (patched)')
def test_insert_multiple_lines(self):
"""Testing diff_comment_line_numbers with insert chunk and multiple
commented lines
"""
t = Template(
'{% load reviewtags %}'
'{% diff_comment_line_numbers chunks comment %}'
)
result = t.render(Context({
'comment': Comment(first_line=20, num_lines=2),
'chunks': [
{
'change': 'insert',
'lines': [
(10, '', '', [], 20, 'inserted line', [], False),
# ...
(50, '', '', [], 60, 'inserted line', [], False),
],
},
],
}))
self.assertEqual(result, 'Lines 30-31 (patched)')
def test_fake_equal_orig(self):
"""Testing diff_comment_line_numbers with fake equal from original
side of interdiff
"""
t = Template(
'{% load reviewtags %}'
'{% diff_comment_line_numbers chunks comment %}'
)
result = t.render(Context({
'comment': Comment(first_line=20, num_lines=2),
'chunks': [
{
'change': 'equal',
'lines': [
(10, '', '', [], 20, 'inserted line', [], False),
# ...
(50, '', '', [], 60, 'inserted line', [], False),
],
},
],
}))
self.assertEqual(result, 'Lines 30-31 (patched)')
def test_fake_equal_patched(self):
"""Testing diff_comment_line_numbers with fake equal from patched
side of interdiff
"""
t = Template(
'{% load reviewtags %}'
'{% diff_comment_line_numbers chunks comment %}'
)
result = t.render(Context({
'comment': Comment(first_line=20, num_lines=2),
'chunks': [
{
'change': 'equal',
'lines': [
(10, 20, 'deleted line', [], '', '', [], False),
# ...
(50, 60, 'deleted line', [], '', '', [], False),
],
},
],
}))
self.assertEqual(result, 'Lines 30-31 (original)')
def test_spanning_inserts_deletes(self):
"""Testing diff_comment_line_numbers with spanning delete and insert"""
t = Template(
'{% load reviewtags %}'
'{% diff_comment_line_numbers chunks comment %}'
)
result = t.render(Context({
'comment': Comment(first_line=20, num_lines=50),
'chunks': [
{
'change': 'delete',
'lines': [
(10, 20, 'deleted line', [], '', '', [], False),
# ...
(50, 60, 'deleted line', [], '', '', [], False),
],
},
{
'change': 'insert',
'lines': [
(51, '', '', [], 61, 'inserted line', [], False),
# ...
(100, '', '', [], 110, 'inserted line', [], False),
],
},
{
'change': 'equal',
'lines': [
(101, 61, 'equal line', [], 111, 'equal line', [],
False),
# ...
(200, 160, 'equal line', [], 210, 'equal line', [],
False),
],
},
],
}))
self.assertEqual(result, 'Lines 30-60 (original), 61-79 (patched)')
def test_spanning_deletes_inserts(self):
"""Testing diff_comment_line_numbers with spanning insert and delete"""
t = Template(
'{% load reviewtags %}'
'{% diff_comment_line_numbers chunks comment %}'
)
result = t.render(Context({
'comment': Comment(first_line=20, num_lines=50),
'chunks': [
{
'change': 'insert',
'lines': [
(10, '', '', [], 20, 'inserted line', [], False),
# ...
(50, '', '', [], 60, 'inserted line', [], False),
],
},
{
'change': 'delete',
'lines': [
(51, 61, 'inserted line', [], '', '', [], False),
# ...
(100, 110, 'inserted line', [], '', '', [], False),
],
},
{
'change': 'equal',
'lines': [
(101, 111, 'equal line', [], 61, 'equal line', [],
False),
# ...
(200, 210, 'equal line', [], 160, 'equal line', [],
False),
],
},
],
}))
self.assertEqual(result, 'Lines 61-79 (original), 30-60 (patched)')
def test_spanning_last_chunk(self):
"""Testing diff_comment_line_numbers with spanning chunks through last
chunk
"""
t = Template(
'{% load reviewtags %}'
'{% diff_comment_line_numbers chunks comment %}'
)
result = t.render(Context({
'comment': Comment(first_line=20, num_lines=50),
'chunks': [
{
'change': 'delete',
'lines': [
(10, 20, 'deleted line', [], '', '', [], False),
# ...
(50, 60, 'deleted line', [], '', '', [], False),
],
},
{
'change': 'insert',
'lines': [
(51, '', '', [], 61, 'inserted line', [], False),
# ...
(100, '', '', [], 110, 'inserted line', [], False),
],
},
],
}))
self.assertEqual(result, 'Lines 30-60 (original), 61-79 (patched)')
class ReplySectionTests(TestCase):
"""Unit tests for the {% reply_section %} template tag."""
fixtures = ['test_users']
def test_with_body_top(self):
"""Testing {% reply_section %} with body_top"""
review_request = self.create_review_request(publish=True)
review = self.create_review(review_request, publish=True)
self.create_reply(review,
body_top_reply_to=review,
publish=True)
self._test_reply_section(context_type='body_top',
context_id='rcbt',
review=review,
expected_context_id='rcbt',
expected_reply_anchor_prefix='header-reply')
def test_with_body_bottom(self):
"""Testing {% reply_section %} with body_bottom"""
review_request = self.create_review_request(publish=True)
review = self.create_review(review_request, publish=True)
self.create_reply(review,
body_bottom_reply_to=review,
publish=True)
self._test_reply_section(context_type='body_bottom',
context_id='rcbb',
review=review,
expected_context_id='rcbb',
expected_reply_anchor_prefix='footer-reply')
@add_fixtures(['test_scmtools'])
def test_with_diff_comment(self):
"""Testing {% reply_section %} with diff comment"""
review_request = self.create_review_request(publish=True,
create_repository=True)
diffset = self.create_diffset(review_request)
filediff = self.create_filediff(diffset)
review = self.create_review(review_request, publish=True)
comment = self.create_diff_comment(review, filediff)
reply = self.create_reply(review, publish=True)
self.create_diff_comment(reply, filediff,
reply_to=comment)
self._test_reply_section(context_type='diff_comments',
context_id='rc',
review=review,
comment=comment,
expected_context_id='rc%s' % comment.pk,
expected_reply_anchor_prefix='comment')
def test_with_general_comment(self):
"""Testing {% reply_section %} with general comment"""
review_request = self.create_review_request(publish=True)
review = self.create_review(review_request, publish=True)
comment = self.create_general_comment(review)
reply = self.create_reply(review, publish=True)
self.create_general_comment(reply,
reply_to=comment)
self._test_reply_section(context_type='general_comments',
context_id='rc',
review=review,
comment=comment,
expected_context_id='rcg%s' % comment.pk,
expected_reply_anchor_prefix='gcomment')
def test_with_file_attachment_comment(self):
"""Testing {% reply_section %} with file attachment comment"""
review_request = self.create_review_request(publish=True)
file_attachment = self.create_file_attachment(review_request)
review = self.create_review(review_request, publish=True)
comment = self.create_file_attachment_comment(review, file_attachment)
reply = self.create_reply(review, publish=True)
self.create_file_attachment_comment(reply, file_attachment,
reply_to=comment)
self._test_reply_section(context_type='file_attachment_comments',
context_id='rc',
review=review,
comment=comment,
expected_context_id='rcf%s' % comment.pk,
expected_reply_anchor_prefix='fcomment')
def test_with_screenshot_comment(self):
"""Testing {% reply_section %} with screenshot comment"""
review_request = self.create_review_request(publish=True)
screenshot = self.create_screenshot(review_request)
review = self.create_review(review_request, publish=True)
comment = self.create_screenshot_comment(review, screenshot)
reply = self.create_reply(review, publish=True)
self.create_screenshot_comment(reply, screenshot,
reply_to=comment)
self._test_reply_section(context_type='screenshot_comments',
context_id='rc',
review=review,
comment=comment,
expected_context_id='rcs%s' % comment.pk,
expected_reply_anchor_prefix='scomment')
def _test_reply_section(self, context_type, context_id, review,
expected_context_id, expected_reply_anchor_prefix,
comment=None):
"""Render the template tag and check the output.
Args:
context_type (unicode):
The context type to pass to the template tag.
context_id (unicode):
The context ID to pass to the template tag.
review (reviewboard.reviews.models.review.Review):
The review being replied to.
expected_context_id (unicode):
The expected rendered context ID (found in the element ID).
expected_reply_anchor_prefix (unicode):
The expected reply anchor (found in the
``data-reply-anchor-prefix=`` attribute).
comment (reviewboard.reviews.models.base_comment.BaseComment,
optional):
The comment being replied to, if replying to a comment.
Raises:
AssertionError:
The rendered content didn't match the expected criteria.
"""
request = self.create_http_request()
t = Template(
r'{% load reviewtags %}'
r'{% reply_section review comment context_type context_id %}'
)
html = t.render(RequestContext(request, {
'review': review,
'comment': comment,
'context_type': context_type,
'context_id': context_id,
}))
s = [
'<div id="%s-%s"\\s+'
'class="comment-section"\\s+'
'data-context-type="%s"\\s+'
'data-reply-anchor-prefix="%s"\\s+'
% (expected_context_id, review.pk, context_type,
expected_reply_anchor_prefix)
]
if comment:
s.append('data-context-id="%s"' % comment.pk)
s.append('>')
self.assertRegexpMatches(html, ''.join(s))
class CommentRepliesTests(TestCase):
"""Unit tests for the comment_replies template tag."""
fixtures = ['test_users']
@add_fixtures(['test_scmtools'])
def test_diff_comments(self):
"""Testing comment_replies for diff comments"""
self._test_diff_comments(user_is_owner=False)
@add_fixtures(['test_scmtools'])
def test_diff_comments_with_draft(self):
"""Testing comment_replies for diff comments with draft"""
self._test_diff_comments(user_is_owner=True)
def test_general_comments(self):
"""Testing comment_replies for general comments"""
self._test_general_comments(user_is_owner=False)
def test_general_comments_with_draft(self):
"""Testing comment_replies for general comments with draft"""
self._test_general_comments(user_is_owner=True)
def test_file_attachment_comments(self):
"""Testing comment_replies for file attachment comments"""
self._test_file_attachment_comments(user_is_owner=False)
def test_file_attachment_comments_with_draft(self):
"""Testing comment_replies for file attachment comments with draft"""
self._test_file_attachment_comments(user_is_owner=True)
def test_screenshot_comments(self):
"""Testing comment_replies for screenshot comments"""
self._test_screenshot_comments(user_is_owner=False)
def test_screenshot_comments_with_draft(self):
"""Testing comment_replies for screenshot comments with draft"""
self._test_screenshot_comments(user_is_owner=True)
def _test_diff_comments(self, user_is_owner):
review_request = self.create_review_request(publish=True,
create_repository=True)
diffset = self.create_diffset(review_request)
filediff = self.create_filediff(diffset)
review = self.create_review(review_request, publish=True)
comment = self.create_diff_comment(review, filediff)
self._check_replies(
review,
comment,
self.create_diff_comment,
{
'filediff': filediff,
},
user_is_owner)
def _test_general_comments(self, user_is_owner):
review_request = self.create_review_request(publish=True)
review = self.create_review(review_request, publish=True)
comment = self.create_general_comment(review)
self._check_replies(
review,
comment,
self.create_general_comment,
{},
user_is_owner)
def _test_file_attachment_comments(self, user_is_owner):
review_request = self.create_review_request(publish=True)
file_attachment = self.create_file_attachment(review_request)
review = self.create_review(review_request, publish=True)
comment = self.create_file_attachment_comment(review, file_attachment)
self._check_replies(
review,
comment,
self.create_file_attachment_comment,
{
'file_attachment': file_attachment,
},
user_is_owner)
def _test_screenshot_comments(self, user_is_owner):
review_request = self.create_review_request(publish=True)
screenshot = self.create_screenshot(review_request)
review = self.create_review(review_request, publish=True)
comment = self.create_screenshot_comment(review, screenshot)
self._check_replies(
review,
comment,
self.create_screenshot_comment,
{
'screenshot': screenshot,
},
user_is_owner)
def _check_replies(self, review, comment, create_comment_func,
create_comment_kwargs, user_is_owner):
reply_kwargs = {
'review': review,
'user': review.user,
}
create_comment_kwargs['reply_to'] = comment
reply1 = self.create_reply(publish=True, **reply_kwargs)
reply_comment1 = create_comment_func(reply1, **create_comment_kwargs)
reply_comment2 = create_comment_func(reply1, **create_comment_kwargs)
reply2 = self.create_reply(publish=True, **reply_kwargs)
reply_comment3 = create_comment_func(reply2, **create_comment_kwargs)
reply3 = self.create_reply(publish=False, **reply_kwargs)
reply_comment4 = create_comment_func(reply3, **create_comment_kwargs)
t = Template(
'{% load reviewtags %}'
'{% comment_replies review comment "123" %}'
)
request = RequestFactory().request()
if user_is_owner:
request.user = review.user
else:
request.user = User.objects.create_user(username='test-user',
email='user@example.com')
html = t.render(RequestContext(request, {
'comment': comment,
'review': review,
}))
self.assertIn('data-comment-id="%s"' % reply_comment1.pk, html)
self.assertIn('data-comment-id="%s"' % reply_comment2.pk, html)
self.assertIn('data-comment-id="%s"' % reply_comment3.pk, html)
if user_is_owner:
self.assertIn('<li class="draft" data-comment-id="%s"'
% reply_comment4.pk,
html)
else:
self.assertNotIn('data-comment-id="%s"' % reply_comment4.pk, html)
class ReviewBodyRepliesTests(TestCase):
"""Unit tests for the review_body_replies template tag."""
fixtures = ['test_users']
def test_body_top(self):
"""Testing review_body_replies for body_top"""
self._test_body_field('body_top', user_is_owner=False)
def test_body_top_with_draft(self):
"""Testing review_body_replies for body_top with draft"""
self._test_body_field('body_top', user_is_owner=True)
def test_body_bottom(self):
"""Testing review_body_replies for body_bottom"""
self._test_body_field('body_bottom', user_is_owner=False)
def test_body_bottom_with_draft(self):
"""Testing review_body_replies for body_bottom with draft"""
self._test_body_field('body_bottom', user_is_owner=True)
def _test_body_field(self, body_field, user_is_owner):
review_request = self.create_review_request(publish=True)
review = self.create_review(review_request, publish=True)
reply_kwargs = {
'review': review,
'user': review.user,
'%s_reply_to' % body_field: review,
body_field: 'Some reply',
}
reply1 = self.create_reply(publish=True, **reply_kwargs)
reply2 = self.create_reply(publish=True, **reply_kwargs)
reply3 = self.create_reply(publish=False, **reply_kwargs)
t = Template(
'{%% load reviewtags %%}'
'{%% review_body_replies review "%s" "123" %%}'
% body_field
)
request = RequestFactory().request()
if user_is_owner:
request.user = review.user
else:
request.user = User.objects.create_user(username='test-user',
email='user@example.com')
html = t.render(RequestContext(request, {
'review': review,
}))
self.assertIn('id="comment_123-%s"' % reply1.pk, html)
self.assertIn('id="comment_123-%s"' % reply2.pk, html)
if user_is_owner:
self.assertIn('id="draftcomment_123-%s"' % reply3.pk, html)
else:
self.assertNotIn('id="comment_123-%s"' % reply3.pk, html)
self.assertNotIn('id="draftcomment_123-%s"' % reply3.pk, html)
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 Nicira, Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Aaron Rosen, Nicira Networks, Inc.
import uuid
from lxml import etree
from oslo.config import cfg
import webob
from nova.api.openstack.compute.contrib import security_groups
from nova.api.openstack import xmlutil
from nova import compute
from nova import context
import nova.db
from nova import exception
from nova.network import quantumv2
from nova.network.quantumv2 import api as quantum_api
from nova.network.security_group import quantum_driver
from nova.openstack.common import jsonutils
from nova import test
from nova.tests.api.openstack.compute.contrib import test_security_groups
from nova.tests.api.openstack import fakes
from quantumclient.common import exceptions as q_exc
class TestQuantumSecurityGroupsTestCase(test.TestCase):
def setUp(self):
super(TestQuantumSecurityGroupsTestCase, self).setUp()
cfg.CONF.set_override('security_group_api', 'quantum')
self.original_client = quantumv2.get_client
quantumv2.get_client = get_client
def tearDown(self):
quantumv2.get_client = self.original_client
get_client()._reset()
super(TestQuantumSecurityGroupsTestCase, self).tearDown()
class TestQuantumSecurityGroups(
test_security_groups.TestSecurityGroups,
TestQuantumSecurityGroupsTestCase):
def _create_sg_template(self, **kwargs):
sg = test_security_groups.security_group_template(**kwargs)
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
return self.controller.create(req, {'security_group': sg})
def _create_network(self):
body = {'network': {'name': 'net1'}}
quantum = get_client()
net = quantum.create_network(body)
body = {'subnet': {'network_id': net['network']['id'],
'cidr': '10.0.0.0/24'}}
quantum.create_subnet(body)
return net
def _create_port(self, **kwargs):
body = {'port': {}}
fields = ['security_groups', 'device_id', 'network_id',
'port_security_enabled']
for field in fields:
if field in kwargs:
body['port'][field] = kwargs[field]
quantum = get_client()
return quantum.create_port(body)
def test_create_security_group_with_no_description(self):
# Quantum's security group descirption field is optional.
pass
def test_create_security_group_with_blank_name(self):
# Quantum's security group name field is optional.
pass
def test_create_security_group_with_whitespace_name(self):
# Quantum allows security group name to be whitespace.
pass
def test_create_security_group_with_blank_description(self):
# Quantum's security group descirption field is optional.
pass
def test_create_security_group_with_whitespace_description(self):
# Quantum allows description to be whitespace.
pass
def test_create_security_group_with_duplicate_name(self):
# Quantum allows duplicate names for security groups.
pass
def test_create_security_group_non_string_name(self):
# Quantum allows security group name to be non string.
pass
def test_create_security_group_non_string_description(self):
# Quantum allows non string description.
pass
def test_create_security_group_quota_limit(self):
# Enforced by Quantum server.
pass
def test_update_security_group(self):
# Enforced by Quantum server.
pass
def test_get_security_group_list(self):
self._create_sg_template().get('security_group')
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
list_dict = self.controller.index(req)
self.assertEquals(len(list_dict['security_groups']), 2)
def test_get_security_group_list_all_tenants(self):
pass
def test_get_security_group_by_instance(self):
sg = self._create_sg_template().get('security_group')
net = self._create_network()
self._create_port(
network_id=net['network']['id'], security_groups=[sg['id']],
device_id=test_security_groups.FAKE_UUID1)
expected = [{'rules': [], 'tenant_id': 'fake_tenant', 'id': sg['id'],
'name': 'test', 'description': 'test-description'}]
self.stubs.Set(nova.db, 'instance_get_by_uuid',
test_security_groups.return_server_by_uuid)
req = fakes.HTTPRequest.blank('/v2/fake/servers/%s/os-security-groups'
% test_security_groups.FAKE_UUID1)
res_dict = self.server_controller.index(
req, test_security_groups.FAKE_UUID1)['security_groups']
self.assertEquals(expected, res_dict)
def test_get_security_group_by_id(self):
sg = self._create_sg_template().get('security_group')
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups/%s'
% sg['id'])
res_dict = self.controller.show(req, sg['id'])
expected = {'security_group': sg}
self.assertEquals(res_dict, expected)
def test_delete_security_group_by_id(self):
sg = self._create_sg_template().get('security_group')
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups/%s' %
sg['id'])
self.controller.delete(req, sg['id'])
def test_delete_security_group_in_use(self):
sg = self._create_sg_template().get('security_group')
self._create_network()
fake_instance = {'project_id': 'fake_tenant',
'availability_zone': 'zone_one',
'security_groups': [],
'uuid': str(uuid.uuid4()),
'display_name': 'test_instance'}
quantum = quantum_api.API()
quantum.allocate_for_instance(context.get_admin_context(),
fake_instance,
security_groups=[sg['id']])
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups/%s'
% sg['id'])
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.delete,
req, sg['id'])
def test_associate_non_running_instance(self):
# Quantum does not care if the instance is running or not. When the
# instances is detected by quantum it will push down the security
# group policy to it.
pass
def test_associate_already_associated_security_group_to_instance(self):
# Quantum security groups does not raise an error if you update a
# port adding a security group to it that was already associated
# to the port. This is because PUT semantics are used.
pass
def test_associate(self):
sg = self._create_sg_template().get('security_group')
net = self._create_network()
self._create_port(
network_id=net['network']['id'], security_groups=[sg['id']],
device_id=test_security_groups.FAKE_UUID1)
self.stubs.Set(nova.db, 'instance_get',
test_security_groups.return_server)
body = dict(addSecurityGroup=dict(name="test"))
req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
self.manager._addSecurityGroup(req, '1', body)
def test_associate_port_security_enabled_true(self):
sg = self._create_sg_template().get('security_group')
net = self._create_network()
self._create_port(
network_id=net['network']['id'], security_groups=[sg['id']],
port_security_enabled=True,
device_id=test_security_groups.FAKE_UUID1)
self.stubs.Set(nova.db, 'instance_get',
test_security_groups.return_server)
body = dict(addSecurityGroup=dict(name="test"))
req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
self.manager._addSecurityGroup(req, '1', body)
def test_associate_port_security_enabled_false(self):
self._create_sg_template().get('security_group')
net = self._create_network()
self._create_port(
network_id=net['network']['id'], port_security_enabled=False,
device_id=test_security_groups.FAKE_UUID1)
self.stubs.Set(nova.db, 'instance_get',
test_security_groups.return_server)
body = dict(addSecurityGroup=dict(name="test"))
req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
self.assertRaises(webob.exc.HTTPBadRequest,
self.manager._addSecurityGroup,
req, '1', body)
def test_disassociate_by_non_existing_security_group_name(self):
self.stubs.Set(nova.db, 'instance_get',
test_security_groups.return_server)
body = dict(removeSecurityGroup=dict(name='non-existing'))
req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
self.assertRaises(webob.exc.HTTPNotFound,
self.manager._removeSecurityGroup, req, '1', body)
def test_disassociate_non_running_instance(self):
# Quantum does not care if the instance is running or not. When the
# instances is detected by quantum it will push down the security
# group policy to it.
pass
def test_disassociate_already_associated_security_group_to_instance(self):
# Quantum security groups does not raise an error if you update a
# port adding a security group to it that was already associated
# to the port. This is because PUT semantics are used.
pass
def test_disassociate(self):
sg = self._create_sg_template().get('security_group')
net = self._create_network()
self._create_port(
network_id=net['network']['id'], security_groups=[sg['id']],
device_id=test_security_groups.FAKE_UUID1)
self.stubs.Set(nova.db, 'instance_get',
test_security_groups.return_server)
body = dict(removeSecurityGroup=dict(name="test"))
req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
self.manager._removeSecurityGroup(req, '1', body)
def test_get_instances_security_groups_bindings(self):
sg1 = self._create_sg_template(name='test1').get('security_group')
sg2 = self._create_sg_template(name='test2').get('security_group')
# test name='' is replaced with id
sg3 = self._create_sg_template(name='').get('security_group')
net = self._create_network()
self._create_port(
network_id=net['network']['id'], security_groups=[sg1['id'],
sg2['id']],
device_id=test_security_groups.FAKE_UUID1)
self._create_port(
network_id=net['network']['id'], security_groups=[sg2['id'],
sg3['id']],
device_id=test_security_groups.FAKE_UUID2)
expected = {test_security_groups.FAKE_UUID1: [{'name': sg1['name']},
{'name': sg2['name']}],
test_security_groups.FAKE_UUID2: [{'name': sg2['name']},
{'name': sg3['id']}]}
security_group_api = self.controller.security_group_api
bindings = (
security_group_api.get_instances_security_groups_bindings(
context.get_admin_context()))
self.assertEquals(bindings, expected)
def test_get_instance_security_groups(self):
sg1 = self._create_sg_template(name='test1').get('security_group')
sg2 = self._create_sg_template(name='test2').get('security_group')
# test name='' is replaced with id
sg3 = self._create_sg_template(name='').get('security_group')
net = self._create_network()
self._create_port(
network_id=net['network']['id'], security_groups=[sg1['id'],
sg2['id'],
sg3['id']],
device_id=test_security_groups.FAKE_UUID1)
expected = [{'name': sg1['name']}, {'name': sg2['name']},
{'name': sg3['id']}]
security_group_api = self.controller.security_group_api
sgs = security_group_api.get_instance_security_groups(
context.get_admin_context(), test_security_groups.FAKE_UUID1)
self.assertEquals(sgs, expected)
def test_create_port_with_sg_and_port_security_enabled_true(self):
sg1 = self._create_sg_template(name='test1').get('security_group')
net = self._create_network()
self._create_port(
network_id=net['network']['id'], security_groups=[sg1['id']],
port_security_enabled=True,
device_id=test_security_groups.FAKE_UUID1)
security_group_api = self.controller.security_group_api
sgs = security_group_api.get_instance_security_groups(
context.get_admin_context(), test_security_groups.FAKE_UUID1)
self.assertEquals(sgs, [{'name': 'test1'}])
def test_create_port_with_sg_and_port_security_enabled_false(self):
sg1 = self._create_sg_template(name='test1').get('security_group')
net = self._create_network()
self.assertRaises(exception.SecurityGroupCannotBeApplied,
self._create_port,
network_id=net['network']['id'],
security_groups=[sg1['id']],
port_security_enabled=False,
device_id=test_security_groups.FAKE_UUID1)
class TestQuantumSecurityGroupRulesTestCase(TestQuantumSecurityGroupsTestCase):
def setUp(self):
super(TestQuantumSecurityGroupRulesTestCase, self).setUp()
id1 = '11111111-1111-1111-1111-111111111111'
sg_template1 = test_security_groups.security_group_template(
security_group_rules=[], id=id1)
id2 = '22222222-2222-2222-2222-222222222222'
sg_template2 = test_security_groups.security_group_template(
security_group_rules=[], id=id2)
self.controller_sg = security_groups.SecurityGroupController()
quantum = get_client()
quantum._fake_security_groups[id1] = sg_template1
quantum._fake_security_groups[id2] = sg_template2
def tearDown(self):
quantumv2.get_client = self.original_client
get_client()._reset()
super(TestQuantumSecurityGroupsTestCase, self).tearDown()
class TestQuantumSecurityGroupRules(
test_security_groups.TestSecurityGroupRules,
TestQuantumSecurityGroupRulesTestCase):
def test_create_add_existing_rules_by_cidr(self):
sg = test_security_groups.security_group_template()
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
self.controller_sg.create(req, {'security_group': sg})
rule = test_security_groups.security_group_rule_template(
cidr='15.0.0.0/8', parent_group_id=self.sg2['id'])
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.controller.create(req, {'security_group_rule': rule})
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_rule': rule})
def test_create_add_existing_rules_by_group_id(self):
sg = test_security_groups.security_group_template()
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
self.controller_sg.create(req, {'security_group': sg})
rule = test_security_groups.security_group_rule_template(
group=self.sg1['id'], parent_group_id=self.sg2['id'])
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.controller.create(req, {'security_group_rule': rule})
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_rule': rule})
def test_delete(self):
rule = test_security_groups.security_group_rule_template(
parent_group_id=self.sg2['id'])
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
res_dict = self.controller.create(req, {'security_group_rule': rule})
security_group_rule = res_dict['security_group_rule']
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules/%s'
% security_group_rule['id'])
self.controller.delete(req, security_group_rule['id'])
def test_create_rule_quota_limit(self):
# Enforced by quantum
pass
class TestQuantumSecurityGroupsXMLDeserializer(
test_security_groups.TestSecurityGroupXMLDeserializer,
TestQuantumSecurityGroupsTestCase):
pass
class TestQuantumSecurityGroupsXMLSerializer(
test_security_groups.TestSecurityGroupXMLSerializer,
TestQuantumSecurityGroupsTestCase):
pass
class TestQuantumSecurityGroupsOutputTest(TestQuantumSecurityGroupsTestCase):
content_type = 'application/json'
def setUp(self):
super(TestQuantumSecurityGroupsOutputTest, self).setUp()
fakes.stub_out_nw_api(self.stubs)
self.controller = security_groups.SecurityGroupController()
self.stubs.Set(compute.api.API, 'get',
test_security_groups.fake_compute_get)
self.stubs.Set(compute.api.API, 'get_all',
test_security_groups.fake_compute_get_all)
self.stubs.Set(compute.api.API, 'create',
test_security_groups.fake_compute_create)
self.stubs.Set(quantum_driver.SecurityGroupAPI,
'get_instances_security_groups_bindings',
(test_security_groups.
fake_get_instances_security_groups_bindings))
self.flags(
osapi_compute_extension=[
'nova.api.openstack.compute.contrib.select_extensions'],
osapi_compute_ext_list=['Security_groups'])
def _make_request(self, url, body=None):
req = webob.Request.blank(url)
if body:
req.method = 'POST'
req.body = self._encode_body(body)
req.content_type = self.content_type
req.headers['Accept'] = self.content_type
res = req.get_response(fakes.wsgi_app(init_only=('servers',)))
return res
def _encode_body(self, body):
return jsonutils.dumps(body)
def _get_server(self, body):
return jsonutils.loads(body).get('server')
def _get_servers(self, body):
return jsonutils.loads(body).get('servers')
def _get_groups(self, server):
return server.get('security_groups')
def test_create(self):
url = '/v2/fake/servers'
image_uuid = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
security_groups = [{'name': 'fake-2-0'}, {'name': 'fake-2-1'}]
for security_group in security_groups:
sg = test_security_groups.security_group_template(
name=security_group['name'])
self.controller.create(req, {'security_group': sg})
server = dict(name='server_test', imageRef=image_uuid, flavorRef=2,
security_groups=security_groups)
res = self._make_request(url, {'server': server})
self.assertEqual(res.status_int, 202)
server = self._get_server(res.body)
for i, group in enumerate(self._get_groups(server)):
name = 'fake-2-%s' % i
self.assertEqual(group.get('name'), name)
def test_create_server_get_default_security_group(self):
url = '/v2/fake/servers'
image_uuid = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
server = dict(name='server_test', imageRef=image_uuid, flavorRef=2)
res = self._make_request(url, {'server': server})
self.assertEqual(res.status_int, 202)
server = self._get_server(res.body)
group = self._get_groups(server)[0]
self.assertEquals(group.get('name'), 'default')
def test_show(self):
def fake_get_instance_security_groups(inst, context, id):
return [{'name': 'fake-2-0'}, {'name': 'fake-2-1'}]
self.stubs.Set(quantum_driver.SecurityGroupAPI,
'get_instance_security_groups',
fake_get_instance_security_groups)
url = '/v2/fake/servers'
image_uuid = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
security_groups = [{'name': 'fake-2-0'}, {'name': 'fake-2-1'}]
for security_group in security_groups:
sg = test_security_groups.security_group_template(
name=security_group['name'])
self.controller.create(req, {'security_group': sg})
server = dict(name='server_test', imageRef=image_uuid, flavorRef=2,
security_groups=security_groups)
res = self._make_request(url, {'server': server})
self.assertEqual(res.status_int, 202)
server = self._get_server(res.body)
for i, group in enumerate(self._get_groups(server)):
name = 'fake-2-%s' % i
self.assertEqual(group.get('name'), name)
# Test that show (GET) returns the same information as create (POST)
url = '/v2/fake/servers/' + test_security_groups.UUID3
res = self._make_request(url)
self.assertEqual(res.status_int, 200)
server = self._get_server(res.body)
for i, group in enumerate(self._get_groups(server)):
name = 'fake-2-%s' % i
self.assertEqual(group.get('name'), name)
def test_detail(self):
url = '/v2/fake/servers/detail'
res = self._make_request(url)
self.assertEqual(res.status_int, 200)
for i, server in enumerate(self._get_servers(res.body)):
for j, group in enumerate(self._get_groups(server)):
name = 'fake-%s-%s' % (i, j)
self.assertEqual(group.get('name'), name)
def test_no_instance_passthrough_404(self):
def fake_compute_get(*args, **kwargs):
raise exception.InstanceNotFound(instance_id='fake')
self.stubs.Set(compute.api.API, 'get', fake_compute_get)
url = '/v2/fake/servers/70f6db34-de8d-4fbd-aafb-4065bdfa6115'
res = self._make_request(url)
self.assertEqual(res.status_int, 404)
class TestQuantumSecurityGroupsOutputXMLTest(
TestQuantumSecurityGroupsOutputTest):
content_type = 'application/xml'
class MinimalCreateServerTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('server', selector='server')
root.set('name')
root.set('id')
root.set('imageRef')
root.set('flavorRef')
elem = xmlutil.SubTemplateElement(root, 'security_groups')
sg = xmlutil.SubTemplateElement(elem, 'security_group',
selector='security_groups')
sg.set('name')
return xmlutil.MasterTemplate(root, 1,
nsmap={None: xmlutil.XMLNS_V11})
def _encode_body(self, body):
serializer = self.MinimalCreateServerTemplate()
return serializer.serialize(body)
def _get_server(self, body):
return etree.XML(body)
def _get_servers(self, body):
return etree.XML(body).getchildren()
def _get_groups(self, server):
# NOTE(vish): we are adding security groups without an extension
# namespace so we don't break people using the existing
# functionality, but that means we need to use find with
# the existing server namespace.
namespace = server.nsmap[None]
return server.find('{%s}security_groups' % namespace).getchildren()
def get_client(context=None, admin=False):
return MockClient()
class MockClient(object):
# Needs to be global to survive multiple calls to get_client.
_fake_security_groups = {}
_fake_ports = {}
_fake_networks = {}
_fake_subnets = {}
_fake_security_group_rules = {}
def __init__(self):
# add default security group
if not len(self._fake_security_groups):
ret = {'name': 'default', 'description': 'default',
'tenant_id': 'fake_tenant', 'security_group_rules': [],
'id': str(uuid.uuid4())}
self._fake_security_groups[ret['id']] = ret
def _reset(self):
self._fake_security_groups.clear()
self._fake_ports.clear()
self._fake_networks.clear()
self._fake_subnets.clear()
self._fake_security_group_rules.clear()
def create_security_group(self, body=None):
s = body.get('security_group')
if len(s.get('name')) > 255 or len(s.get('description')) > 255:
msg = 'Security Group name great than 255'
raise q_exc.QuantumClientException(message=msg, status_code=401)
ret = {'name': s.get('name'), 'description': s.get('description'),
'tenant_id': 'fake_tenant', 'security_group_rules': [],
'id': str(uuid.uuid4())}
self._fake_security_groups[ret['id']] = ret
return {'security_group': ret}
def create_network(self, body):
n = body.get('network')
ret = {'status': 'ACTIVE', 'subnets': [], 'name': n.get('name'),
'admin_state_up': n.get('admin_state_up', True),
'tenant_id': 'fake_tenant',
'id': str(uuid.uuid4())}
if 'port_security_enabled' in n:
ret['port_security_enabled'] = n['port_security_enabled']
self._fake_networks[ret['id']] = ret
return {'network': ret}
def create_subnet(self, body):
s = body.get('subnet')
try:
net = self._fake_networks[s.get('network_id')]
except KeyError:
msg = 'Network %s not found' % s.get('network_id')
raise q_exc.QuantumClientException(message=msg, status_code=404)
ret = {'name': s.get('name'), 'network_id': s.get('network_id'),
'tenant_id': 'fake_tenant', 'cidr': s.get('cidr'),
'id': str(uuid.uuid4()), 'gateway_ip': '10.0.0.1'}
net['subnets'].append(ret['id'])
self._fake_networks[net['id']] = net
self._fake_subnets[ret['id']] = ret
return {'subnet': ret}
def create_port(self, body):
p = body.get('port')
ret = {'status': 'ACTIVE', 'id': str(uuid.uuid4()),
'mac_address': p.get('mac_address', 'fa:16:3e:b8:f5:fb'),
'device_id': p.get('device_id', str(uuid.uuid4())),
'admin_state_up': p.get('admin_state_up', True),
'security_groups': p.get('security_groups', []),
'network_id': p.get('network_id')}
network = self._fake_networks[p['network_id']]
if 'port_security_enabled' in p:
ret['port_security_enabled'] = p['port_security_enabled']
elif 'port_security_enabled' in network:
ret['port_security_enabled'] = network['port_security_enabled']
port_security = ret.get('port_security_enabled', True)
# port_security must be True if security groups are present
if not port_security and ret['security_groups']:
raise exception.SecurityGroupCannotBeApplied()
if network['subnets']:
ret['fixed_ips'] = [{'subnet_id': network['subnets'][0],
'ip_address': '10.0.0.1'}]
if not ret['security_groups'] and (port_security is None or
port_security is True):
for security_group in self._fake_security_groups.values():
if security_group['name'] == 'default':
ret['security_groups'] = [security_group['id']]
break
self._fake_ports[ret['id']] = ret
return {'port': ret}
def create_security_group_rule(self, body):
# does not handle bulk case so just picks rule[0]
r = body.get('security_group_rules')[0]
fields = ['direction', 'protocol', 'port_range_min', 'port_range_max',
'ethertype', 'remote_ip_prefix', 'tenant_id',
'security_group_id', 'remote_group_id']
ret = {}
for field in fields:
ret[field] = r.get(field)
ret['id'] = str(uuid.uuid4())
self._fake_security_group_rules[ret['id']] = ret
return {'security_group_rules': [ret]}
def show_security_group(self, security_group, **_params):
try:
sg = self._fake_security_groups[security_group]
except KeyError:
msg = 'Security Group %s not found' % security_group
raise q_exc.QuantumClientException(message=msg, status_code=404)
for security_group_rule in self._fake_security_group_rules.values():
if security_group_rule['security_group_id'] == sg['id']:
sg['security_group_rules'].append(security_group_rule)
return {'security_group': sg}
def show_security_group_rule(self, security_group_rule, **_params):
try:
return {'security_group_rule':
self._fake_security_group_rules[security_group_rule]}
except KeyError:
msg = 'Security Group rule %s not found' % security_group_rule
raise q_exc.QuantumClientException(message=msg, status_code=404)
def show_network(self, network, **_params):
try:
return {'network':
self._fake_networks[network]}
except KeyError:
msg = 'Network %s not found' % network
raise q_exc.QuantumClientException(message=msg, status_code=404)
def show_port(self, port, **_params):
try:
return {'port':
self._fake_ports[port]}
except KeyError:
msg = 'Port %s not found' % port
raise q_exc.QuantumClientException(message=msg, status_code=404)
def show_subnet(self, subnet, **_params):
try:
return {'subnet':
self._fake_subnets[subnet]}
except KeyError:
msg = 'Port %s not found' % subnet
raise q_exc.QuantumClientException(message=msg, status_code=404)
def list_security_groups(self, **_params):
ret = []
for security_group in self._fake_security_groups.values():
names = _params.get('name')
if names:
if not isinstance(names, list):
names = [names]
for name in names:
if security_group.get('name') == name:
ret.append(security_group)
ids = _params.get('id')
if ids:
if not isinstance(ids, list):
ids = [ids]
for id in ids:
if security_group.get('id') == id:
ret.append(security_group)
elif not (names or ids):
ret.append(security_group)
return {'security_groups': ret}
def list_networks(self, **_params):
return {'networks':
[network for network in self._fake_networks.values()]}
def list_ports(self, **_params):
ret = []
device_id = _params.get('device_id')
for port in self._fake_ports.values():
if device_id:
if device_id == port['device_id']:
ret.append(port)
else:
ret.append(port)
return {'ports': ret}
def list_subnets(self, **_params):
return {'subnets':
[subnet for subnet in self._fake_subnets.values()]}
def list_floatingips(self, **_params):
return {'floatingips': []}
def delete_security_group(self, security_group):
self.show_security_group(security_group)
ports = self.list_ports()
for port in ports.get('ports'):
for sg_port in port['security_groups']:
if sg_port == security_group:
msg = ('Unable to delete Security group %s in use'
% security_group)
raise q_exc.QuantumClientException(message=msg,
status_code=409)
del self._fake_security_groups[security_group]
def delete_security_group_rule(self, security_group_rule):
self.show_security_group_rule(security_group_rule)
del self._fake_security_group_rules[security_group_rule]
def delete_network(self, network):
self.show_network(network)
self._check_ports_on_network(network)
for subnet in self._fake_subnets.values():
if subnet['network_id'] == network:
del self._fake_subnets[subnet['id']]
del self._fake_networks[network]
def delete_subnet(self, subnet):
subnet = self.show_subnet(subnet).get('subnet')
self._check_ports_on_network(subnet['network_id'])
del self._fake_subnet[subnet]
def delete_port(self, port):
self.show_port(port)
del self._fake_ports[port]
def update_port(self, port, body=None):
self.show_port(port)
self._fake_ports[port].update(body['port'])
return {'port': self._fake_ports[port]}
def list_extensions(self, **_parms):
return {'extensions': []}
def _check_ports_on_network(self, network):
ports = self.list_ports()
for port in ports:
if port['network_id'] == network:
msg = ('Unable to complete operation on network %s. There is '
'one or more ports still in use on the network'
% network)
raise q_exc.QuantumClientException(message=msg, status_code=409)
| |
# Copyright 2017 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the controllers that communicate with VM for training
classifiers.
"""
from __future__ import annotations
import datetime
import json
import os
from core import feconf
from core import python_utils
from core.constants import constants
from core.domain import classifier_services
from core.domain import config_domain
from core.domain import email_manager
from core.domain import exp_domain
from core.domain import exp_fetchers
from core.domain import exp_services
from core.domain import fs_services
from core.platform import models
from core.tests import test_utils
from proto_files import text_classifier_pb2
from proto_files import training_job_response_payload_pb2
(classifier_models,) = models.Registry.import_models([models.NAMES.classifier])
class TrainedClassifierHandlerTests(test_utils.ClassifierTestBase):
"""Test the handler for storing job result of training job."""
def setUp(self):
super(TrainedClassifierHandlerTests, self).setUp()
self.exp_id = 'exp_id1'
self.title = 'Testing Classifier storing'
self.category = 'Test'
yaml_path = os.path.join(
feconf.TESTS_DATA_DIR, 'string_classifier_test.yaml')
with python_utils.open_file(yaml_path, 'r') as yaml_file:
self.yaml_content = yaml_file.read()
self.signup(self.CURRICULUM_ADMIN_EMAIL, self.CURRICULUM_ADMIN_USERNAME)
self.signup('moderator@example.com', 'mod')
assets_list = []
with self.swap(feconf, 'ENABLE_ML_CLASSIFIERS', True):
exp_services.save_new_exploration_from_yaml_and_assets(
feconf.SYSTEM_COMMITTER_ID, self.yaml_content, self.exp_id,
assets_list)
self.exploration = exp_fetchers.get_exploration_by_id(self.exp_id)
self.algorithm_id = feconf.INTERACTION_CLASSIFIER_MAPPING[
self.exploration.states['Home'].interaction.id]['algorithm_id']
self.algorithm_version = feconf.INTERACTION_CLASSIFIER_MAPPING[
self.exploration.states['Home'].interaction.id]['algorithm_version']
self.classifier_data = {
'_alpha': 0.1,
'_beta': 0.001,
'_prediction_threshold': 0.5,
'_training_iterations': 25,
'_prediction_iterations': 5,
'_num_labels': 10,
'_num_docs': 12,
'_num_words': 20,
'_label_to_id': {'text': 1},
'_word_to_id': {'hello': 2},
'_w_dp': [],
'_b_dl': [],
'_l_dp': [],
'_c_dl': [],
'_c_lw': [],
'_c_l': [],
}
classifier_training_job = (
classifier_services.get_classifier_training_job(
self.exp_id, self.exploration.version, 'Home',
self.algorithm_id))
self.assertIsNotNone(classifier_training_job)
self.job_id = classifier_training_job.job_id
# TODO(pranavsid98): Replace the three commands below with
# mark_training_job_pending after Giritheja's PR gets merged.
classifier_training_job_model = (
classifier_models.ClassifierTrainingJobModel.get(
self.job_id, strict=False))
classifier_training_job_model.status = (
feconf.TRAINING_JOB_STATUS_PENDING)
classifier_training_job_model.update_timestamps()
classifier_training_job_model.put()
self.job_result = (
training_job_response_payload_pb2.TrainingJobResponsePayload.
JobResult())
self.job_result.job_id = self.job_id
classifier_frozen_model = (
text_classifier_pb2.TextClassifierFrozenModel())
classifier_frozen_model.model_json = json.dumps(self.classifier_data)
self.job_result.text_classifier.CopyFrom(classifier_frozen_model)
self.payload_proto = (
training_job_response_payload_pb2.TrainingJobResponsePayload())
self.payload_proto.job_result.CopyFrom(self.job_result)
self.payload_proto.vm_id = feconf.DEFAULT_VM_ID
self.secret = feconf.DEFAULT_VM_SHARED_SECRET
self.payload_proto.signature = classifier_services.generate_signature(
self.secret.encode('utf-8'),
self.payload_proto.job_result.SerializeToString(),
self.payload_proto.vm_id)
self.payload_for_fetching_next_job_request = {
'vm_id': feconf.DEFAULT_VM_ID,
'message': json.dumps({})
}
self.payload_for_fetching_next_job_request['signature'] = (
classifier_services.generate_signature(
self.secret.encode('utf-8'),
self.payload_for_fetching_next_job_request['message'].encode(
'utf-8'),
self.payload_for_fetching_next_job_request['vm_id']))
def test_trained_classifier_handler(self):
# Normal end-to-end test.
self.post_blob(
'/ml/trainedclassifierhandler',
self.payload_proto.SerializeToString(), expected_status_int=200)
classifier_training_job = (
classifier_services.get_classifier_training_job(
self.exp_id, self.exploration.version, 'Home',
self.algorithm_id))
self.assertIsNotNone(classifier_training_job)
classifier_data = (
self._get_classifier_data_from_classifier_training_job(
classifier_training_job))
self.assertEqual(
json.loads(classifier_data.model_json), self.classifier_data)
self.assertEqual(
classifier_training_job.status,
feconf.TRAINING_JOB_STATUS_COMPLETE)
def test_email_sent_on_failed_job(self):
class FakeTrainingJob:
"""Fake training class to invoke failed job functions."""
def __init__(self):
self.status = feconf.TRAINING_JOB_STATUS_FAILED
def mock_get_classifier_training_job_by_id(_):
return FakeTrainingJob()
can_send_emails_ctx = self.swap(
feconf, 'CAN_SEND_EMAILS', True)
can_send_feedback_email_ctx = self.swap(
feconf, 'CAN_SEND_FEEDBACK_MESSAGE_EMAILS', True)
fail_training_job = self.swap(
classifier_services,
'get_classifier_training_job_by_id',
mock_get_classifier_training_job_by_id)
config_property = config_domain.Registry.get_config_property(
'notification_user_ids_for_failed_tasks')
config_property.set_value(
'committer_id',
[self.get_user_id_from_email(self.CURRICULUM_ADMIN_EMAIL)])
with can_send_emails_ctx, can_send_feedback_email_ctx:
with fail_training_job:
# Adding moderator email to admin config page
# for sending emails for failed training jobs.
self.login(self.CURRICULUM_ADMIN_EMAIL, is_super_admin=True)
response_dict = self.get_json('/adminhandler')
response_config_properties = response_dict['config_properties']
expected_email_list = {
'value': [self.get_user_id_from_email(
self.CURRICULUM_ADMIN_EMAIL)]}
sys_config_list = response_config_properties[
email_manager.NOTIFICATION_USER_IDS_FOR_FAILED_TASKS.name]
self.assertDictContainsSubset(
expected_email_list, sys_config_list)
# Check that there are no sent emails to either
# email address before posting json.
messages = self._get_sent_email_messages(
feconf.ADMIN_EMAIL_ADDRESS)
self.assertEqual(len(messages), 0)
messages = self._get_sent_email_messages(
'moderator@example.com')
self.assertEqual(len(messages), 0)
# Post ML Job.
self.post_blob(
'/ml/trainedclassifierhandler',
self.payload_proto.SerializeToString(),
expected_status_int=500)
# Check that there are now emails sent.
messages = self._get_sent_email_messages(
feconf.ADMIN_EMAIL_ADDRESS)
expected_subject = 'Failed ML Job'
self.assertEqual(len(messages), 1)
self.assertEqual(messages[0].subject, expected_subject)
messages = (
self._get_sent_email_messages(self.CURRICULUM_ADMIN_EMAIL))
self.assertEqual(len(messages), 1)
self.assertEqual(messages[0].subject, expected_subject)
def test_error_on_prod_mode_and_default_vm_id(self):
# Turn off DEV_MODE.
with self.swap(constants, 'DEV_MODE', False):
self.post_blob(
'/ml/trainedclassifierhandler',
self.payload_proto.SerializeToString(), expected_status_int=401)
def test_error_on_different_signatures(self):
# Altering data to result in different signatures.
self.payload_proto.job_result.job_id = 'different_job_id'
self.post_blob(
'/ml/trainedclassifierhandler',
self.payload_proto.SerializeToString(), expected_status_int=401)
def test_error_on_invalid_classifier_data_in_message(self):
# Altering message dict to result in invalid dict.
self.payload_proto.job_result.ClearField('classifier_frozen_model')
self.payload_proto.signature = classifier_services.generate_signature(
self.secret.encode('utf-8'),
self.payload_proto.job_result.SerializeToString(),
self.payload_proto.vm_id)
self.post_blob(
'/ml/trainedclassifierhandler',
self.payload_proto.SerializeToString(), expected_status_int=400)
def test_error_on_failed_training_job_status(self):
classifier_training_job_model = (
classifier_models.ClassifierTrainingJobModel.get(
self.job_id, strict=False))
classifier_training_job_model.status = (
feconf.TRAINING_JOB_STATUS_FAILED)
classifier_training_job_model.update_timestamps()
classifier_training_job_model.put()
self.post_blob(
'/ml/trainedclassifierhandler',
self.payload_proto.SerializeToString(), expected_status_int=500)
def test_error_on_exception_in_store_classifier_data(self):
classifier_training_job_model = (
classifier_models.ClassifierTrainingJobModel.get(
self.job_id, strict=False))
classifier_training_job_model.state_name = 'invalid_state'
classifier_training_job_model.update_timestamps()
classifier_training_job_model.put()
self.post_blob(
'/ml/trainedclassifierhandler',
self.payload_proto.SerializeToString(), expected_status_int=500)
def test_get_trained_classifier_handler(self):
self.post_blob(
'/ml/trainedclassifierhandler',
self.payload_proto.SerializeToString(), expected_status_int=200)
classifier_training_job = (
classifier_services.get_classifier_training_job(
self.exp_id, self.exploration.version, 'Home',
self.algorithm_id))
params = {
'exploration_id': self.exp_id,
'exploration_version': self.exploration.version,
'state_name': 'Home',
}
response = self.get_json(
'/ml/trainedclassifierhandler', params=params,
expected_status_int=200)
self.assertEqual(
response['gcs_filename'],
classifier_training_job.classifier_data_filename)
def test_error_on_incorrect_exploration_id_for_retrieving_model(self):
self.post_blob(
'/ml/trainedclassifierhandler',
self.payload_proto.SerializeToString(), expected_status_int=200)
params = {
'exploration_id': 'fake_exp',
'exploration_version': self.exploration.version,
'state_name': 'Home',
}
self.get_json(
'/ml/trainedclassifierhandler', params=params,
expected_status_int=400)
def test_error_on_incorrect_state_name_for_retrieving_model(self):
self.post_blob(
'/ml/trainedclassifierhandler',
self.payload_proto.SerializeToString(), expected_status_int=200)
params = {
'exploration_id': self.exp_id,
'exploration_version': self.exploration.version,
'state_name': 'fake_state',
}
self.get_json(
'/ml/trainedclassifierhandler', params=params,
expected_status_int=400)
def test_error_on_incorrect_exp_version_for_retrieving_model(self):
self.post_blob(
'/ml/trainedclassifierhandler',
self.payload_proto.SerializeToString(), expected_status_int=200)
params = {
'exploration_id': self.exp_id,
'exploration_version': 3,
'state_name': 'fake_state',
}
self.get_json(
'/ml/trainedclassifierhandler', params=params,
expected_status_int=400)
def test_error_on_incomplete_training_job_for_retrieving_model(self):
params = {
'exploration_id': self.exp_id,
'exploration_version': self.exploration.version,
'state_name': 'Home',
}
self.get_json(
'/ml/trainedclassifierhandler', params=params,
expected_status_int=404)
def test_error_on_no_training_job_mapping_for_retrieving_model(self):
new_exp_id = 'new_exp'
new_exp = self.save_new_default_exploration(
new_exp_id, feconf.SYSTEM_COMMITTER_ID, title='New title')
change_list = [
exp_domain.ExplorationChange({
'cmd': 'edit_state_property',
'state_name': new_exp.init_state_name,
'property_name': exp_domain.STATE_PROPERTY_INTERACTION_ID,
'new_value': 'TextInput'
}),
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'property_name':
exp_domain.STATE_PROPERTY_INTERACTION_CUST_ARGS,
'state_name': new_exp.init_state_name,
'new_value': {
'placeholder': {
'value': {
'content_id': 'ca_placeholder_0',
'unicode_str': ''
}
},
'rows': {'value': 1}
}
})]
with self.swap(feconf, 'ENABLE_ML_CLASSIFIERS', True):
exp_services.update_exploration(
feconf.SYSTEM_COMMITTER_ID, new_exp_id, change_list, '')
params = {
'exploration_id': new_exp_id,
'exploration_version': 2,
'state_name': new_exp.init_state_name,
}
self.get_json(
'/ml/trainedclassifierhandler', params=params,
expected_status_int=400)
def test_error_on_no_training_job_for_retrieving_model(self):
new_exp_id = 'new_exp'
new_exp = self.save_new_default_exploration(
new_exp_id, feconf.SYSTEM_COMMITTER_ID, title='New title')
change_list = [
exp_domain.ExplorationChange({
'cmd': 'edit_state_property',
'state_name': new_exp.init_state_name,
'property_name': exp_domain.STATE_PROPERTY_INTERACTION_ID,
'new_value': 'NumericInput'
}),
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'property_name':
exp_domain.STATE_PROPERTY_INTERACTION_CUST_ARGS,
'state_name': new_exp.init_state_name,
'new_value': {
'requireNonnegativeInput': {
'value': False
}
}
})]
with self.swap(feconf, 'ENABLE_ML_CLASSIFIERS', True):
exp_services.update_exploration(
feconf.SYSTEM_COMMITTER_ID, new_exp_id, change_list, '')
params = {
'exploration_id': new_exp_id,
'exploration_version': new_exp.version,
'state_name': new_exp.init_state_name,
}
self.get_json(
'/ml/trainedclassifierhandler', params=params,
expected_status_int=404)
def test_training_job_migration_on_algorithm_id_change(self):
params = {
'exploration_id': self.exp_id,
'exploration_version': self.exploration.version,
'state_name': 'Home',
}
interaction_classifier_mapping = {
'TextInput': {
'algorithm_id': 'NewTextClassifier',
'algorithm_version': 1
},
}
with self.swap(
feconf, 'INTERACTION_CLASSIFIER_MAPPING',
interaction_classifier_mapping):
self.get_json(
'/ml/trainedclassifierhandler', params=params,
expected_status_int=404)
state_training_jobs_mapping = (
classifier_services.get_state_training_jobs_mapping(
self.exp_id, self.exploration.version, 'Home'))
self.assertIn(
'NewTextClassifier',
state_training_jobs_mapping.algorithm_ids_to_job_ids)
with self.swap(
feconf, 'INTERACTION_CLASSIFIER_MAPPING',
interaction_classifier_mapping):
json_response = self.post_json(
'/ml/nextjobhandler',
self.payload_for_fetching_next_job_request,
expected_status_int=200)
self.assertEqual(
state_training_jobs_mapping.algorithm_ids_to_job_ids[
'NewTextClassifier'],
json_response['job_id']
)
self.assertEqual(json_response['algorithm_id'], 'NewTextClassifier')
self.assertEqual(json_response['algorithm_version'], 1)
def test_training_job_migration_on_algorithm_version_change(self):
self.post_blob(
'/ml/trainedclassifierhandler',
self.payload_proto.SerializeToString(), expected_status_int=200)
params = {
'exploration_id': self.exp_id,
'exploration_version': self.exploration.version,
'state_name': 'Home',
}
interaction_classifier_mapping = {
'TextInput': {
'algorithm_id': 'TextClassifier',
'algorithm_version': 2
},
}
with self.swap(
feconf, 'INTERACTION_CLASSIFIER_MAPPING',
interaction_classifier_mapping):
self.get_json(
'/ml/trainedclassifierhandler', params=params,
expected_status_int=404)
state_training_jobs_mapping = (
classifier_services.get_state_training_jobs_mapping(
self.exp_id, self.exploration.version, 'Home'))
self.assertIn(
'TextClassifier',
state_training_jobs_mapping.algorithm_ids_to_job_ids)
with self.swap(
feconf, 'INTERACTION_CLASSIFIER_MAPPING',
interaction_classifier_mapping):
json_response = self.post_json(
'/ml/nextjobhandler',
self.payload_for_fetching_next_job_request,
expected_status_int=200)
self.assertEqual(
state_training_jobs_mapping.algorithm_ids_to_job_ids[
'TextClassifier'],
json_response['job_id']
)
self.assertEqual(json_response['algorithm_id'], 'TextClassifier')
self.assertEqual(json_response['algorithm_version'], 2)
class NextJobHandlerTest(test_utils.GenericTestBase):
"""Test the handler for fetching next training job."""
def setUp(self):
super(NextJobHandlerTest, self).setUp()
self.exp_id = 'exp_id1'
self.title = 'Testing Classifier storing'
self.category = 'Test'
interaction_id = 'TextInput'
self.algorithm_id = feconf.INTERACTION_CLASSIFIER_MAPPING[
interaction_id]['algorithm_id']
self.algorithm_version = feconf.INTERACTION_CLASSIFIER_MAPPING[
interaction_id]['algorithm_version']
self.training_data = [
{
u'answer_group_index': 1,
u'answers': [u'a1', u'a2']
},
{
u'answer_group_index': 2,
u'answers': [u'a2', u'a3']
}
]
self.job_id = classifier_models.ClassifierTrainingJobModel.create(
self.algorithm_id, interaction_id, self.exp_id, 1,
datetime.datetime.utcnow(), self.training_data, 'Home',
feconf.TRAINING_JOB_STATUS_NEW, 1)
self.classifier_data = text_classifier_pb2.TextClassifierFrozenModel()
self.classifier_data.model_json = ''
fs_services.save_classifier_data(
self.exp_id, self.job_id, self.classifier_data)
self.expected_response = {
u'job_id': self.job_id,
u'training_data': self.training_data,
u'algorithm_id': self.algorithm_id,
u'algorithm_version': self.algorithm_version
}
self.payload = {}
self.payload['vm_id'] = feconf.DEFAULT_VM_ID
secret = feconf.DEFAULT_VM_SHARED_SECRET
self.payload['message'] = json.dumps({})
self.payload['signature'] = classifier_services.generate_signature(
secret.encode('utf-8'),
self.payload['message'].encode('utf-8'),
self.payload['vm_id'])
def test_next_job_handler(self):
json_response = self.post_json(
'/ml/nextjobhandler', self.payload, expected_status_int=200)
self.assertEqual(json_response, self.expected_response)
classifier_services.mark_training_jobs_failed([self.job_id])
json_response = self.post_json(
'/ml/nextjobhandler', self.payload, expected_status_int=200)
self.assertEqual(json_response, {})
| |
# Copyright 2017-present Kensho Technologies, LLC.
"""Common test data and helper functions."""
from collections import namedtuple
from inspect import getmembers, isfunction
from pprint import pformat
import re
from typing import Dict, List, Optional, Set, Tuple, Union, cast
from unittest import TestCase
from graphql import GraphQLList, build_schema, lexicographic_sort_schema, print_schema
from graphql.type.definition import GraphQLInterfaceType, GraphQLObjectType, GraphQLUnionType
from graphql.type.schema import GraphQLSchema
from pyorient.orient import OrientDB
import six
import sqlalchemy
from sqlalchemy.dialects import mssql, postgresql
from ..compiler.compiler_entities import BasicBlock
from ..compiler.subclass import compute_subclass_sets
from ..debugging_utils import pretty_print_gremlin, pretty_print_match
from ..global_utils import is_same_type
from ..macros import MacroRegistry, create_macro_registry, register_macro_edge
from ..query_formatting.graphql_formatting import pretty_print_graphql
from ..schema import (
ClassToFieldTypeOverridesType,
GraphQLSchemaFieldType,
TypeEquivalenceHintsType,
is_vertex_field_name,
)
from ..schema.schema_info import (
CommonSchemaInfo,
CompositeJoinDescriptor,
DirectJoinDescriptor,
JoinDescriptor,
SQLAlchemySchemaInfo,
make_sqlalchemy_schema_info,
)
from ..schema_generation.orientdb import get_graphql_schema_from_orientdb_schema_data
from ..schema_generation.orientdb.schema_graph_builder import get_orientdb_schema_graph
from ..schema_generation.orientdb.utils import (
ORIENTDB_INDEX_RECORDS_QUERY,
ORIENTDB_SCHEMA_RECORDS_QUERY,
)
from ..schema_generation.schema_graph import SchemaGraph
# The strings which we will be comparing have newlines and spaces we'd like to get rid of,
# so we can compare expected and produced emitted code irrespective of whitespace.
WHITESPACE_PATTERN = re.compile("[\t\n ]*", flags=re.UNICODE)
# flag to indicate a test component should be skipped
SKIP_TEST = "SKIP"
# Text representation of a GraphQL schema generated from OrientDB.
# This schema isn't meant to be a paragon of good schema design.
# Instead, it aims to capture as many real-world edge cases as possible,
# without requiring a massive number of types and interfaces.
SCHEMA_TEXT = """
schema {
query: RootSchemaQuery
}
directive @filter(
\"\"\"Name of the filter operation to perform.\"\"\"
op_name: String!
\"\"\"List of string operands for the operator.\"\"\"
value: [String!]
) repeatable on FIELD | INLINE_FRAGMENT
directive @tag(
\"\"\"Name to apply to the given property field.\"\"\"
tag_name: String!
) on FIELD
directive @output(
\"\"\"What to designate the output field generated from this property field.\"\"\"
out_name: String!
) on FIELD
directive @output_source on FIELD
directive @optional on FIELD
directive @recurse(
\"\"\"
Recurse up to this many times on this edge. A depth of 1 produces the current \
vertex and its immediate neighbors along the given edge.
\"\"\"
depth: Int!
) on FIELD
directive @fold on FIELD
directive @macro_edge on FIELD_DEFINITION
directive @stitch(source_field: String!, sink_field: String!) on FIELD_DEFINITION
type Animal implements Entity & UniquelyIdentifiable {
_x_count: Int
alias: [String]
birthday: Date
color: String
description: String
in_Animal_ParentOf: [Animal]
in_Entity_Related: [Entity]
name: String
net_worth: Decimal
out_Animal_BornAt: [BirthEvent]
out_Animal_FedAt: [FeedingEvent]
out_Animal_ImportantEvent: [Union__BirthEvent__Event__FeedingEvent]
out_Animal_LivesIn: [Location]
out_Animal_OfSpecies: [Species]
out_Animal_ParentOf: [Animal]
out_Entity_Related: [Entity]
uuid: ID
}
type BirthEvent implements Entity & UniquelyIdentifiable {
_x_count: Int
alias: [String]
description: String
event_date: DateTime
in_Animal_BornAt: [Animal]
in_Animal_ImportantEvent: [Animal]
in_Entity_Related: [Entity]
in_Event_RelatedEvent: [Union__BirthEvent__Event__FeedingEvent]
name: String
out_Entity_Related: [Entity]
out_Event_RelatedEvent: [Union__BirthEvent__Event__FeedingEvent]
uuid: ID
}
\"\"\"
The `Date` scalar type represents day-accuracy date objects.
Values are serialized following the ISO-8601 datetime format specification,
for example "2017-03-21". Serialization and parsing support is guaranteed for the format
described here, with the year, month and day fields included and separated by dashes as
in the example. Implementations are allowed to support additional serialization formats,
if they so choose.
\"\"\"
scalar Date
\"\"\"
The `DateTime` scalar type represents timezone-naive timestamps with up to microsecond
accuracy. Values are serialized following the ISO-8601 datetime format specification,
for example "2017-03-21T12:34:56.012345" or "2017-03-21T12:34:56". Serialization and
parsing support is guaranteed for the format described here, with all fields down to
and including seconds required to be included, and fractional seconds optional, as in
the example. Implementations are allowed to support additional serialization formats,
if they so choose.
\"\"\"
scalar DateTime
\"\"\"
The `Decimal` scalar type is an arbitrary-precision decimal number object useful
for representing values that should never be rounded, such as currency amounts.
Values are allowed to be transported as either a native Decimal type, if the
underlying transport allows that, or serialized as strings in decimal format,
without thousands separators and using a "." as the decimal separator: for
example, "12345678.012345".
\"\"\"
scalar Decimal
interface Entity {
_x_count: Int
alias: [String]
description: String
in_Entity_Related: [Entity]
name: String
out_Entity_Related: [Entity]
uuid: ID
}
type Event implements Entity & UniquelyIdentifiable {
_x_count: Int
alias: [String]
description: String
event_date: DateTime
in_Animal_ImportantEvent: [Animal]
in_Entity_Related: [Entity]
in_Event_RelatedEvent: [Union__BirthEvent__Event__FeedingEvent]
name: String
out_Entity_Related: [Entity]
out_Event_RelatedEvent: [Union__BirthEvent__Event__FeedingEvent]
uuid: ID
}
type FeedingEvent implements Entity & UniquelyIdentifiable {
_x_count: Int
alias: [String]
description: String
event_date: DateTime
in_Animal_FedAt: [Animal]
in_Animal_ImportantEvent: [Animal]
in_Entity_Related: [Entity]
in_Event_RelatedEvent: [Union__BirthEvent__Event__FeedingEvent]
name: String
out_Entity_Related: [Entity]
out_Event_RelatedEvent: [Union__BirthEvent__Event__FeedingEvent]
uuid: ID
}
type Food implements Entity & UniquelyIdentifiable {
_x_count: Int
alias: [String]
description: String
in_Entity_Related: [Entity]
in_Species_Eats: [Species]
name: String
out_Entity_Related: [Entity]
uuid: ID
}
type FoodOrSpecies implements Entity & UniquelyIdentifiable {
_x_count: Int
alias: [String]
description: String
in_Entity_Related: [Entity]
in_Species_Eats: [Species]
name: String
out_Entity_Related: [Entity]
uuid: ID
}
type Location implements Entity & UniquelyIdentifiable {
_x_count: Int
alias: [String]
description: String
in_Animal_LivesIn: [Animal]
in_Entity_Related: [Entity]
name: String
out_Entity_Related: [Entity]
uuid: ID
}
type RootSchemaQuery {
Animal: [Animal]
BirthEvent: [BirthEvent]
Entity: [Entity]
Event: [Event]
FeedingEvent: [FeedingEvent]
Food: [Food]
FoodOrSpecies: [FoodOrSpecies]
Location: [Location]
Species: [Species]
UniquelyIdentifiable: [UniquelyIdentifiable]
}
type Species implements Entity & UniquelyIdentifiable {
_x_count: Int
alias: [String]
description: String
in_Animal_OfSpecies: [Animal]
in_Entity_Related: [Entity]
in_Species_Eats: [Species]
limbs: Int
name: String
out_Entity_Related: [Entity]
out_Species_Eats: [Union__Food__FoodOrSpecies__Species]
uuid: ID
}
union Union__BirthEvent__Event__FeedingEvent = BirthEvent | Event | FeedingEvent
union Union__Food__FoodOrSpecies__Species = Food | FoodOrSpecies | Species
interface UniquelyIdentifiable {
_x_count: Int
uuid: ID
}
"""
VALID_MACROS_TEXT = [
(
"""\
{
Entity @macro_edge_definition(name: "out_Entity_AlmostRelated") {
out_Entity_Related {
out_Entity_Related @macro_edge_target{
uuid
}
}
}
}
""",
{},
),
(
"""\
{
Animal @macro_edge_definition(name: "out_Animal_GrandparentOf") {
out_Animal_ParentOf {
out_Animal_ParentOf @macro_edge_target {
uuid
}
}
}
}""",
{},
),
(
"""\
{
Animal @macro_edge_definition(name: "out_Animal_GrandchildrenCalledNate") {
out_Animal_ParentOf {
out_Animal_ParentOf @filter(op_name: "name_or_alias", value: ["$wanted"])
@macro_edge_target {
uuid
}
}
}
}""",
{
"wanted": "Nate",
},
),
(
"""\
{
Animal @macro_edge_definition(name: "out_Animal_RichSiblings") {
in_Animal_ParentOf {
net_worth @tag(tag_name: "parent_net_worth")
out_Animal_ParentOf @macro_edge_target {
net_worth @filter(op_name: ">", value: ["%parent_net_worth"])
}
}
}
}""",
{},
),
(
"""\
{
Location @macro_edge_definition(name: "out_Location_Orphans") {
in_Animal_LivesIn @macro_edge_target {
in_Animal_ParentOf @filter(op_name: "has_edge_degree", value: ["$num_parents"])
@optional {
uuid
}
}
}
}""",
{
"num_parents": 0,
},
),
# Testing that @optional that doesn't include @macro_edge_target is okay.
(
"""\
{
Animal @macro_edge_definition(name: "out_Animal_MaybeYoungerSiblings") {
out_Animal_BornAt @optional {
event_date @tag(tag_name: "birthday")
}
in_Animal_ParentOf {
out_Animal_ParentOf @macro_edge_target {
out_Animal_BornAt @optional {
event_date @filter(op_name: ">", value: ["%birthday"])
}
}
}
}
}""",
{},
),
(
"""\
{
Animal @macro_edge_definition(name: "out_Animal_RichYoungerSiblings") {
net_worth @tag(tag_name: "net_worth")
out_Animal_BornAt {
event_date @tag(tag_name: "birthday")
}
in_Animal_ParentOf {
out_Animal_ParentOf @macro_edge_target {
net_worth @filter(op_name: ">", value: ["%net_worth"])
out_Animal_BornAt {
event_date @filter(op_name: "<", value: ["%birthday"])
}
}
}
}
}""",
{},
),
# The same as out_AnimalRichYoungerSiblings, but with a filter after the target.
(
"""\
{
Animal @macro_edge_definition(name: "out_Animal_RichYoungerSiblings_2") {
net_worth @tag(tag_name: "net_worth")
in_Animal_ParentOf {
out_Animal_ParentOf @macro_edge_target {
net_worth @filter(op_name: ">", value: ["%net_worth"])
out_Animal_BornAt {
event_date @tag(tag_name: "birthday")
}
}
}
out_Animal_BornAt {
event_date @filter(op_name: ">", value: ["%birthday"])
}
}
}""",
{},
),
(
"""\
{
Animal @macro_edge_definition(name: "out_Animal_RelatedFood") {
in_Entity_Related {
... on Food @macro_edge_target {
uuid
}
}
}
}""",
{},
),
(
"""\
{
Animal @macro_edge_definition(name: "out_Animal_RelatedEntity") {
in_Entity_Related {
... on Entity @macro_edge_target {
uuid
}
}
}
}""",
{},
),
]
# A class holding all necessary backend-specific testing utilities.
BackendTester = namedtuple(
"BackendTester",
(
# Backend to be tested
"backend"
# Returns whether two emitted queries are the same, up to differences in syntax/whitespace
"compare_queries",
# An instance of backend.SchemaInfoClass consistend with the standard testing schema.
"schema_info",
# Given a SchemaInfo and a connection pool to a database, install the given schema into
# the database, erasing content if necessary.
"setup_schema"
# Given a SchemaInfo, a dict representation of data fitting the schema,
# and a connection pool to a database with the same schema,
# install the given data into the database, erasing any existing data.
"setup_data",
),
)
def get_function_names_from_module(module):
"""Return a set of function names present in a given module."""
return {member for member, member_type in getmembers(module) if isfunction(member_type)}
def get_test_function_names_from_class(test_class):
"""Return a set of test function names present in a given TestCase class."""
if not issubclass(test_class, TestCase):
raise AssertionError("Received non-test class {} as input.".format(test_class))
member_dict = test_class.__dict__
return {
member
for member in member_dict
if isfunction(member_dict[member]) and member[:5] == "test_"
}
def transform(emitted_output: str) -> str:
"""Transform emitted_output into a unique representation, regardless of lines / indentation."""
return WHITESPACE_PATTERN.sub("", emitted_output)
def _get_mismatch_message(
expected_blocks: List[BasicBlock], received_blocks: List[BasicBlock]
) -> str:
"""Create a well-formated error message indicating that two lists of blocks are mismatched."""
pretty_expected = pformat(expected_blocks)
pretty_received = pformat(received_blocks)
return "{}\n\n!=\n\n{}".format(pretty_expected, pretty_received)
def compare_ir_blocks(
test_case: TestCase, expected_blocks: List[BasicBlock], received_blocks: List[BasicBlock]
) -> None:
"""Compare the expected and received IR blocks."""
mismatch_message = _get_mismatch_message(expected_blocks, received_blocks)
if len(expected_blocks) != len(received_blocks):
test_case.fail("Not the same number of blocks:\n\n{}".format(mismatch_message))
for i, (expected, received) in enumerate(zip(expected_blocks, received_blocks)):
test_case.assertEqual(
expected,
received,
msg=(
"Blocks at position {} were different: {} vs {}\n\n"
"{}".format(i, expected, received, mismatch_message)
),
)
def compare_graphql(test_case: TestCase, expected: str, received: str) -> None:
"""Compare the expected and received GraphQL code, ignoring whitespace."""
msg = "\n{}\n\n!=\n\n{}".format(pretty_print_graphql(expected), pretty_print_graphql(received))
compare_ignoring_whitespace(test_case, expected, received, msg)
def compare_match(
test_case: TestCase,
expected: str,
received: str,
parameterized: bool = True,
) -> None:
"""Compare the expected and received MATCH code, ignoring whitespace."""
msg = "\n{}\n\n!=\n\n{}".format(
pretty_print_match(expected, parameterized=parameterized),
pretty_print_match(received, parameterized=parameterized),
)
compare_ignoring_whitespace(test_case, expected, received, msg)
def compare_sql(test_case: TestCase, expected: str, received: str) -> None:
"""Compare the expected and received SQL query, ignoring whitespace."""
msg = "\n{}\n\n!=\n\n{}".format(expected, received)
compare_ignoring_whitespace(test_case, expected, received, msg)
def compare_gremlin(
test_case: TestCase,
expected: str,
received: str,
) -> None:
"""Compare the expected and received Gremlin code, ignoring whitespace."""
msg = "\n{}\n\n!=\n\n{}".format(pretty_print_gremlin(expected), pretty_print_gremlin(received))
compare_ignoring_whitespace(test_case, expected, received, msg)
def compare_cypher(test_case: TestCase, expected: str, received: str) -> None:
"""Compare the expected and received Cypher query, ignoring whitespace."""
msg = "\n{}\n\n!=\n\n{}".format(expected, received)
compare_ignoring_whitespace(test_case, expected, received, msg)
def compare_input_metadata(
test_case: TestCase,
expected: Dict[str, GraphQLSchemaFieldType],
received: Dict[str, GraphQLSchemaFieldType],
) -> None:
"""Compare two dicts of input metadata, using proper GraphQL type comparison operators."""
# First, assert that the sets of keys in both dicts are equal.
test_case.assertEqual(set(six.iterkeys(expected)), set(six.iterkeys(received)))
# Then, compare the values for each key in both dicts.
for key in six.iterkeys(expected):
expected_value = expected[key]
received_value = received[key]
test_case.assertTrue(
is_same_type(expected_value, received_value),
msg="{} != {}".format(str(expected_value), str(received_value)),
)
def compare_ignoring_whitespace(
test_case: TestCase, expected: str, received: str, msg: Optional[str]
) -> None:
"""Compare expected and received code, ignoring whitespace, with the given failure message."""
test_case.assertEqual(transform(expected), transform(received), msg=msg)
def _lexicographic_sort_schema_text(schema_text: str) -> str:
"""Sort the schema types and fields in a lexicographic order."""
return print_schema(lexicographic_sort_schema(build_schema(schema_text)))
def compare_schema_texts_order_independently(
test_case: TestCase,
expected_schema_text: str,
received_schema_text: str,
) -> None:
"""Compare expected and received schema texts, ignoring order of definitions."""
sorted_expected_schema_text = _lexicographic_sort_schema_text(expected_schema_text)
sorted_received_schema_text = _lexicographic_sort_schema_text(received_schema_text)
msg = "\n{}\n\n!=\n\n{}".format(sorted_expected_schema_text, sorted_received_schema_text)
compare_ignoring_whitespace(
test_case, sorted_expected_schema_text, sorted_received_schema_text, msg
)
def get_schema() -> GraphQLSchema:
"""Get a schema object for testing."""
return build_schema(SCHEMA_TEXT)
def get_type_equivalence_hints() -> TypeEquivalenceHintsType:
"""Get the default type_equivalence_hints used for testing."""
schema = get_schema()
type_equivalence_hints: Dict[
Union[GraphQLInterfaceType, GraphQLObjectType], GraphQLUnionType
] = {}
for key, value in [
("Event", "Union__BirthEvent__Event__FeedingEvent"),
("FoodOrSpecies", "Union__Food__FoodOrSpecies__Species"),
]:
key_type = schema.get_type(key)
value_type = schema.get_type(value)
if (
key_type
and value_type
and (
isinstance(key_type, GraphQLInterfaceType)
or isinstance(key_type, GraphQLObjectType)
)
and isinstance(value_type, GraphQLUnionType)
):
type_equivalence_hints[key_type] = value_type
return type_equivalence_hints
def get_common_schema_info() -> CommonSchemaInfo:
"""Get the default CommonSchemaInfo used for testing."""
return CommonSchemaInfo(get_schema(), get_type_equivalence_hints())
def _get_schema_without_list_valued_property_fields() -> GraphQLSchema:
"""Get the default testing schema, skipping any list-valued property fields it has."""
schema = get_schema()
types_with_fields = (GraphQLInterfaceType, GraphQLObjectType)
for type_name, graphql_type in six.iteritems(schema.type_map):
if isinstance(graphql_type, types_with_fields):
if type_name != "RootSchemaQuery" and not type_name.startswith("__"):
fields_to_pop = []
for field_name, field_type in six.iteritems(graphql_type.fields):
if not is_vertex_field_name(field_name):
if isinstance(field_type.type, GraphQLList):
fields_to_pop.append(field_name)
for field_to_pop in fields_to_pop:
graphql_type.fields.pop(field_to_pop)
return schema
def get_sqlalchemy_schema_info(dialect: str = "mssql") -> SQLAlchemySchemaInfo:
"""Get a SQLAlchemySchemaInfo for testing."""
# We don't support list-valued property fields in SQL for now.
schema = _get_schema_without_list_valued_property_fields()
type_equivalence_hints = get_type_equivalence_hints()
sqlalchemy_metadata = sqlalchemy.MetaData()
uuid_type = sqlalchemy.String(36)
tables = {
"Animal": sqlalchemy.Table(
"Animal",
sqlalchemy_metadata,
sqlalchemy.Column("birthday", sqlalchemy.DateTime, nullable=False),
sqlalchemy.Column("color", sqlalchemy.String(40), nullable=True),
sqlalchemy.Column("description", sqlalchemy.String(40), nullable=True),
sqlalchemy.Column("parent", sqlalchemy.String(40), nullable=True),
sqlalchemy.Column("related_entity", sqlalchemy.String(40), nullable=True),
sqlalchemy.Column("name", sqlalchemy.String(40), nullable=False),
sqlalchemy.Column("net_worth", sqlalchemy.Integer, nullable=True),
sqlalchemy.Column("fed_at", uuid_type, nullable=True),
sqlalchemy.Column("birth_date", sqlalchemy.DateTime, nullable=True),
sqlalchemy.Column("birth_uuid", uuid_type, nullable=True),
sqlalchemy.Column("lives_in", uuid_type, nullable=True),
sqlalchemy.Column("important_event", sqlalchemy.String(40), nullable=True),
sqlalchemy.Column("species", sqlalchemy.String(40), nullable=True),
sqlalchemy.Column("uuid", uuid_type, primary_key=True),
schema=("db_1." if dialect == "mssql" else "") + "schema_1",
),
"BirthEvent": sqlalchemy.Table(
"BirthEvent",
sqlalchemy_metadata,
sqlalchemy.Column("description", sqlalchemy.String(40), nullable=True),
sqlalchemy.Column("uuid", uuid_type, primary_key=True),
sqlalchemy.Column("name", sqlalchemy.String(40), nullable=False),
sqlalchemy.Column("event_date", sqlalchemy.DateTime, nullable=False, primary_key=True),
sqlalchemy.Column("related_event", uuid_type, primary_key=False),
schema=("db_1." if dialect == "mssql" else "") + "schema_1",
),
"Entity": sqlalchemy.Table(
"Entity",
sqlalchemy_metadata,
sqlalchemy.Column("description", sqlalchemy.String(40), nullable=False),
sqlalchemy.Column("uuid", uuid_type, primary_key=True),
sqlalchemy.Column("name", sqlalchemy.String(40), nullable=False),
sqlalchemy.Column("related_entity", uuid_type, nullable=True),
schema=("db_1." if dialect == "mssql" else "") + "schema_1",
),
"Event": sqlalchemy.Table(
"Event",
sqlalchemy_metadata,
sqlalchemy.Column("description", sqlalchemy.String(40), nullable=False),
sqlalchemy.Column("uuid", uuid_type, primary_key=True),
sqlalchemy.Column("name", sqlalchemy.String(40), nullable=False),
sqlalchemy.Column("event_date", sqlalchemy.DateTime, nullable=False),
sqlalchemy.Column("related_event", uuid_type, primary_key=False),
schema=("db_2." if dialect == "mssql" else "") + "schema_1",
),
"FeedingEvent": sqlalchemy.Table(
"FeedingEvent",
sqlalchemy_metadata,
sqlalchemy.Column("description", sqlalchemy.String(40), nullable=False),
sqlalchemy.Column("uuid", uuid_type, primary_key=True),
sqlalchemy.Column("name", sqlalchemy.String(40), nullable=False),
sqlalchemy.Column("event_date", sqlalchemy.DateTime, nullable=False),
sqlalchemy.Column("related_event", uuid_type, primary_key=False),
schema=("db_2." if dialect == "mssql" else "") + "schema_1",
),
"Food": sqlalchemy.Table(
"Food",
sqlalchemy_metadata,
sqlalchemy.Column("description", sqlalchemy.String(40), nullable=False),
sqlalchemy.Column("uuid", uuid_type, primary_key=True),
sqlalchemy.Column("name", sqlalchemy.String(40), nullable=False),
schema=("db_2." if dialect == "mssql" else "") + "schema_2",
),
"FoodOrSpecies": sqlalchemy.Table(
"FoodOrSpecies",
sqlalchemy_metadata,
sqlalchemy.Column("description", sqlalchemy.String(40), nullable=False),
sqlalchemy.Column("uuid", uuid_type, primary_key=True),
sqlalchemy.Column("name", sqlalchemy.String(40), nullable=False),
schema=("db_2." if dialect == "mssql" else "") + "schema_2",
),
"Location": sqlalchemy.Table(
"Location",
sqlalchemy_metadata,
sqlalchemy.Column("description", sqlalchemy.String(40), nullable=False),
sqlalchemy.Column("uuid", uuid_type, primary_key=True),
sqlalchemy.Column("name", sqlalchemy.String(40), nullable=False),
schema=("db_1." if dialect == "mssql" else "") + "schema_1",
),
"Species": sqlalchemy.Table(
"Species",
sqlalchemy_metadata,
sqlalchemy.Column("description", sqlalchemy.String(40), nullable=True),
sqlalchemy.Column("uuid", uuid_type, primary_key=True),
sqlalchemy.Column("name", sqlalchemy.String(40), nullable=False),
sqlalchemy.Column("eats", uuid_type, nullable=True),
sqlalchemy.Column("limbs", sqlalchemy.Integer, nullable=True),
sqlalchemy.Column("related_entity", uuid_type, nullable=True),
schema=("db_1." if dialect == "mssql" else "") + "schema_1",
),
"UniquelyIdentifiable": sqlalchemy.Table(
"UniquelyIdentifiable",
sqlalchemy_metadata,
sqlalchemy.Column("uuid", uuid_type, primary_key=True),
schema=("db_1." if dialect == "mssql" else "") + "schema_1",
),
}
# Compute the subclass sets, including union types
subclasses = compute_subclass_sets(schema, type_equivalence_hints=type_equivalence_hints)
for object_type, equivalent_union_type in six.iteritems(type_equivalence_hints):
subclasses[equivalent_union_type.name] = subclasses[object_type.name]
subclasses[equivalent_union_type.name].add(object_type.name)
# HACK(bojanserafimov): Some of these edges are many-to-many, but I've represented them
# as many-to-one edges. If I didn't, I'd have to implement many-to-many
# edges before I can get any tests to run, because most tests use
# these edges.
edges = [
{
"name": "Animal_ParentOf",
"from_table": "Animal",
"to_table": "Animal",
"column_pairs": {("uuid", "parent")},
},
{
"name": "Animal_OfSpecies",
"from_table": "Animal",
"to_table": "Species",
"column_pairs": {("species", "uuid")},
},
{
"name": "Animal_FedAt",
"from_table": "Animal",
"to_table": "FeedingEvent",
"column_pairs": {("fed_at", "uuid")},
},
{
"name": "Animal_BornAt",
"from_table": "Animal",
"to_table": "BirthEvent",
"column_pairs": {
("birth_uuid", "uuid"),
("birth_date", "event_date"),
},
},
{
"name": "Animal_LivesIn",
"from_table": "Animal",
"to_table": "Location",
"column_pairs": {("lives_in", "uuid")},
},
{
"name": "Animal_ImportantEvent",
"from_table": "Animal",
"to_table": "Union__BirthEvent__Event__FeedingEvent",
"column_pairs": {("important_event", "uuid")},
},
{
"name": "Species_Eats",
"from_table": "Species",
"to_table": "Union__Food__FoodOrSpecies__Species",
"column_pairs": {("eats", "uuid")},
},
{
"name": "Entity_Related",
"from_table": "Entity",
"to_table": "Entity",
"column_pairs": {("related_entity", "uuid")},
},
{
"name": "Event_RelatedEvent",
"from_table": "Union__BirthEvent__Event__FeedingEvent",
"to_table": "Union__BirthEvent__Event__FeedingEvent",
"column_pairs": {("related_event", "uuid")},
},
{
"name": "Entity_Alias",
"from_table": "Entity",
"to_table": "Alias",
"column_pairs": {("uuid", "alias_for")},
},
]
# Create the appropriate JoinDescriptor for each specified edge, in both the
# in and out directions.
join_descriptors: Dict[str, Dict[str, JoinDescriptor]] = {}
for edge in edges:
column_pairs = cast(Set[Tuple[str, str]], edge["column_pairs"])
from_table = cast(str, edge["from_table"])
to_table = cast(str, edge["to_table"])
if len(column_pairs) > 1:
join_descriptors.setdefault(from_table, {})[
"out_{}".format(edge["name"])
] = CompositeJoinDescriptor(column_pairs)
join_descriptors.setdefault(to_table, {})[
"in_{}".format(edge["name"])
] = CompositeJoinDescriptor(
{(to_column, from_column) for from_column, to_column in column_pairs}
)
elif len(column_pairs) == 1:
from_column, to_column = next(iter(column_pairs))
join_descriptors.setdefault(from_table, {})[
"out_{}".format(edge["name"])
] = DirectJoinDescriptor(from_column, to_column)
join_descriptors.setdefault(to_table, {})[
"in_{}".format(edge["name"])
] = DirectJoinDescriptor(to_column, from_column)
# Inherit join_descriptors from superclasses
# TODO(bojanserafimov): Properties can be inferred too, instead of being explicitly inherited.
for class_name, subclass_set in six.iteritems(subclasses):
for subclass in subclass_set:
for edge_name, join_info in six.iteritems(join_descriptors.get(class_name, {})):
join_descriptors.setdefault(subclass, {})[edge_name] = join_info
if dialect == "postgresql":
sqlalchemy_compiler_dialect = postgresql.dialect()
elif dialect == "mssql":
sqlalchemy_compiler_dialect = mssql.dialect()
else:
raise AssertionError("Unrecognized dialect {}".format(dialect))
return make_sqlalchemy_schema_info(
schema, type_equivalence_hints, sqlalchemy_compiler_dialect, tables, join_descriptors
)
def generate_schema_graph(orientdb_client: OrientDB) -> SchemaGraph:
"""Generate SchemaGraph from a pyorient client."""
schema_records = orientdb_client.command(ORIENTDB_SCHEMA_RECORDS_QUERY)
schema_data = [x.oRecordData for x in schema_records]
index_records = orientdb_client.command(ORIENTDB_INDEX_RECORDS_QUERY)
index_query_data = [x.oRecordData for x in index_records]
return get_orientdb_schema_graph(schema_data, index_query_data)
def generate_schema(
orientdb_client: OrientDB,
class_to_field_type_overrides: Optional[ClassToFieldTypeOverridesType] = None,
hidden_classes: Optional[Set[str]] = None,
) -> Tuple[GraphQLSchema, TypeEquivalenceHintsType]:
"""Generate schema and type equivalence dict from a pyorient client."""
schema_records = orientdb_client.command(ORIENTDB_SCHEMA_RECORDS_QUERY)
schema_data = [x.oRecordData for x in schema_records]
return get_graphql_schema_from_orientdb_schema_data(
schema_data, class_to_field_type_overrides, hidden_classes
)
def get_empty_test_macro_registry() -> MacroRegistry:
"""Return a MacroRegistry with appropriate type_equivalence_hints and subclass_set."""
schema = get_schema()
type_equivalence_hints = cast(
TypeEquivalenceHintsType,
{
schema.get_type("Event"): schema.get_type("Union__BirthEvent__Event__FeedingEvent"),
},
)
subclass_sets = compute_subclass_sets(schema, type_equivalence_hints)
macro_registry = create_macro_registry(schema, type_equivalence_hints, subclass_sets)
return macro_registry
def get_test_macro_registry() -> MacroRegistry:
"""Return a MacroRegistry object containing macros used in tests."""
macro_registry = get_empty_test_macro_registry()
for graphql, args in VALID_MACROS_TEXT:
register_macro_edge(macro_registry, graphql, args)
return macro_registry
| |
"""
Main module of Anatomize.
"""
# implemented by Qiyuan Gong
# qiyuangong@gmail.com
# @INPROCEEDINGS{
# author = {Xiao, Xiaokui and Tao, Yufei},
# title = {Anatomy: simple and effective privacy preservation},
# booktitle = {Proceedings of the 32nd international conference on Very large data
# bases},
# year = {2006},
# series = {VLDB '06},
# pages = {139--150},
# publisher = {VLDB Endowment},
# acmid = {1164141},
# location = {Seoul, Korea},
# numpages = {12}
# }
import random
import heapq
_DEBUG = False
class SABucket(object):
"""
this class is used for bucketize
in Anatomize. Each bucket indicate one SA value
"""
def __init__(self, data, index):
self.member = data[:]
self.value = ""
self.index = index
def pop_element(self):
"""
pop an element from SABucket
"""
return self.member.pop()
def __len__(self):
"""
return number of records
"""
return len(self.member)
class Group(object):
"""
Group records to form Equivalent Class
"""
def __init__(self):
self.index = 0
self.member = []
self.checklist = set()
def add_element(self, record, index):
"""
add element pair (record, index) to Group
"""
self.member.append(record[:])
self.checklist.add(index)
def check_index(self, index):
"""
Check if index is in checklist
"""
if index in self.checklist:
return True
return False
def __len__(self):
"""
return number of records
"""
return len(self.member)
def build_SA_bucket(data):
"""
build SA buckets and a heap sorted by number of records in bucket
"""
buckets = {}
bucket_heap = []
# Assign SA into buckets
for record in data:
sa_value = record[-1]
try:
buckets[sa_value].append(record)
except KeyError:
buckets[sa_value] = [record]
# random shuffle records in buckets
# make pop random
for key in buckets.keys():
random.shuffle(buckets[key])
# group stage
# each round choose l largest buckets, then pop
# an element from these buckets to form a group
# We use heap to sort buckets.
for i, bucketed_record in enumerate(buckets.values()):
# push to heap reversely
length = len(bucketed_record) * -1
if length == 0:
continue
heapq.heappush(bucket_heap, (length, SABucket(bucketed_record, i)))
return buckets, bucket_heap
def assign_to_groups(buckets, bucket_heap, L):
"""
assign records to groups.
Each iterator pos 1 record from L largest bucket to form a group.
"""
groups = []
while len(bucket_heap) >= L:
newgroup = Group()
length_list = []
SAB_list = []
# choose l largest buckets
for i in range(L):
(length, bucket) = heapq.heappop(bucket_heap)
length_list.append(length)
SAB_list.append(bucket)
# pop an element from choosen buckets
for i in range(L):
bucket = SAB_list[i]
length = length_list[i]
newgroup.add_element(bucket.pop_element(), bucket.index)
length += 1
if length == 0:
continue
# push new tuple to heap
heapq.heappush(bucket_heap, (length, bucket))
groups.append(newgroup)
return groups
def residue_assign(groups, bucket_heap):
"""
residue-assign stage
If the dataset is even distributed on SA, only one tuple will
remain in this stage. However, most dataset don't satisfy this
condition, so lots of records need to be re-assigned. In worse
case, some records cannot be assigned to any groups, which will
be suppressed (deleted).
"""
suppress = []
while len(bucket_heap):
(_, bucket) = heapq.heappop(bucket_heap)
index = bucket.index
candidate_set = []
for group in groups:
if group.check_index(index) is False:
candidate_set.append(group)
if len(candidate_set) == 0:
suppress.extend(bucket.member[:])
while bucket.member:
candidate_len = len(candidate_set)
if candidate_len == 0:
break
current_record = bucket.pop_element()
group_index = random.randrange(candidate_len)
group = candidate_set.pop(group_index)
group.add_element(current_record, index)
if len(bucket) >= 0:
suppress.extend(bucket.member[:])
return groups, suppress
def split_table(groups):
"""
split table to qi_table, sa_table and grouped result
qi_table contains qi and gid
sa_table contains sa and gid
result contains raw data grouped
"""
qi_table = []
sa_table = []
result = []
for i, group in enumerate(groups):
group.index = i
result.append(group.member[:])
# creat sa_table and qi_table
for record in group.member:
qi_part = record[:-1]
qi_part.append(i)
sa_part = [record[-1]]
sa_part.insert(0, i)
qi_table.append(qi_part)
sa_table.append(sa_part)
return qi_table, sa_table, result
def anatomize(data, L):
"""
only one SA is supported in anatomy.
Separation grouped member into QIT and SAT
Use heap to get l largest buckets
L is the denote l in l-diversity.
data is a list, i.e. [qi1,qi2,sa]
"""
if _DEBUG:
print '*' * 10
print "Begin Anatomizer!"
print "L=%d" % L
# build SA buckets
buckets, bucket_heap = build_SA_bucket(data)
# assign records to groups
groups = assign_to_groups(buckets, bucket_heap, L)
# handle residue records
groups, suppress = residue_assign(groups, bucket_heap)
# transform and print result
qi_table, sa_table, result = split_table(groups)
if _DEBUG:
print 'NO. of Suppress after anatomy = %d' % len(suppress)
print 'NO. of groups = %d' % len(result)
for i in range(len(qi_table)):
print qi_table[i] + sa_table[i]
return result
| |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'IntrinsicImagesAlgorithm.task_version'
db.add_column(u'intrinsic_intrinsicimagesalgorithm', 'task_version',
self.gf('django.db.models.fields.IntegerField')(default=0),
keep_default=False)
def backwards(self, orm):
# Deleting field 'IntrinsicImagesAlgorithm.task_version'
db.delete_column(u'intrinsic_intrinsicimagesalgorithm', 'task_version')
models = {
u'accounts.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'always_approve': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'blocked': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'blocked_reason': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'exclude_from_aggregation': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'mturk_worker_id': ('django.db.models.fields.CharField', [], {'max_length': '127', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'user'", 'unique': 'True', 'primary_key': 'True', 'to': u"orm['auth.User']"})
},
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'intrinsic.intrinsicimagesalgorithm': {
'Meta': {'unique_together': "(('slug', 'parameters'),)", 'object_name': 'IntrinsicImagesAlgorithm'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'baseline': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parameters': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'task_version': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
u'intrinsic.intrinsicimagesdecomposition': {
'Meta': {'ordering': "['mean_error', '-id']", 'unique_together': "(('photo', 'algorithm'),)", 'object_name': 'IntrinsicImagesDecomposition'},
'algorithm': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'intrinsic_images_decompositions'", 'to': u"orm['intrinsic.IntrinsicImagesAlgorithm']"}),
'error_comparison_thresh': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mean_dense_error': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'mean_eq_error': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'mean_error': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'mean_neq_error': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'mean_sparse_error': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'mean_sum_error': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'num': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'num_dense': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'num_eq': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'num_neq': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'num_sparse': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'photo': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'intrinsic_images_decompositions'", 'to': u"orm['photos.Photo']"}),
'reflectance_image': ('django.db.models.fields.files.ImageField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'runtime': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'shading_image': ('django.db.models.fields.files.ImageField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
u'intrinsic.intrinsicpoint': {
'Meta': {'ordering': "['-id']", 'object_name': 'IntrinsicPoint'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'min_separation': ('django.db.models.fields.DecimalField', [], {'default': "'0.07'", 'max_digits': '7', 'decimal_places': '5'}),
'opaque': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'opaque_method': ('django.db.models.fields.CharField', [], {'max_length': '1', 'null': 'True', 'blank': 'True'}),
'opaque_score': ('django.db.models.fields.FloatField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'photo': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'intrinsic_points'", 'to': u"orm['photos.Photo']"}),
'sRGB': ('django.db.models.fields.CharField', [], {'max_length': '6'}),
'synthetic_diff_cv': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'synthetic_diff_fraction': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'synthetic_diff_intensity': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'x': ('django.db.models.fields.FloatField', [], {}),
'y': ('django.db.models.fields.FloatField', [], {})
},
u'intrinsic.intrinsicpointcomparison': {
'Meta': {'ordering': "['photo', '-darker_score']", 'unique_together': "(('point1', 'point2'),)", 'object_name': 'IntrinsicPointComparison'},
'darker': ('django.db.models.fields.CharField', [], {'max_length': '1', 'null': 'True', 'blank': 'True'}),
'darker_method': ('django.db.models.fields.CharField', [], {'max_length': '1', 'null': 'True', 'blank': 'True'}),
'darker_score': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'photo': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'intrinsic_comparisons'", 'to': u"orm['photos.Photo']"}),
'point1': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'comparison_point1'", 'to': u"orm['intrinsic.IntrinsicPoint']"}),
'point1_image_darker': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'point2': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'comparison_point2'", 'to': u"orm['intrinsic.IntrinsicPoint']"}),
'reflectance_dd': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'reflectance_dd_score': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'reflectance_eq': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'reflectance_eq_score': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'synthetic_diff_intensity_ratio': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'})
},
u'intrinsic.intrinsicpointcomparisonresponse': {
'Meta': {'ordering': "['-time_ms']", 'object_name': 'IntrinsicPointComparisonResponse'},
'added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'comparison': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'responses'", 'to': u"orm['intrinsic.IntrinsicPointComparison']"}),
'confidence': ('django.db.models.fields.IntegerField', [], {}),
'darker': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invalid': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'mturk_assignment': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['mturk.MtAssignment']"}),
'quality_method': ('django.db.models.fields.CharField', [], {'max_length': '1', 'null': 'True', 'blank': 'True'}),
'reflectance_dd': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'reflectance_eq': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'reward': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '8', 'decimal_places': '4', 'blank': 'True'}),
'sandbox': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'time_active_ms': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'time_ms': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounts.UserProfile']"})
},
u'intrinsic.intrinsicpointopacityresponse': {
'Meta': {'ordering': "['-time_ms']", 'object_name': 'IntrinsicPointOpacityResponse'},
'added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invalid': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'mturk_assignment': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['mturk.MtAssignment']"}),
'opaque': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'point': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'opacities'", 'to': u"orm['intrinsic.IntrinsicPoint']"}),
'quality_method': ('django.db.models.fields.CharField', [], {'max_length': '1', 'null': 'True', 'blank': 'True'}),
'reward': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '8', 'decimal_places': '4', 'blank': 'True'}),
'sandbox': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'time_active_ms': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'time_ms': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounts.UserProfile']"}),
'zoom': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'})
},
u'intrinsic.intrinsicsyntheticdecomposition': {
'Meta': {'ordering': "['-id']", 'object_name': 'IntrinsicSyntheticDecomposition'},
'depth_thumb': ('django.db.models.fields.files.ImageField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'diff_col_thumb': ('django.db.models.fields.files.ImageField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'diff_dir_thumb': ('django.db.models.fields.files.ImageField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'diff_ind_thumb': ('django.db.models.fields.files.ImageField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'emit_thumb': ('django.db.models.fields.files.ImageField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'env_thumb': ('django.db.models.fields.files.ImageField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'gloss_col_thumb': ('django.db.models.fields.files.ImageField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'gloss_dir_thumb': ('django.db.models.fields.files.ImageField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'gloss_ind_thumb': ('django.db.models.fields.files.ImageField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'md5': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),
'multilayer_exr': ('django.db.models.fields.files.FileField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'normal_thumb': ('django.db.models.fields.files.ImageField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'photo': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'intrinsic_synthetic'", 'unique': 'True', 'to': u"orm['photos.Photo']"}),
'scene_artist': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'scene_url': ('django.db.models.fields.URLField', [], {'max_length': '255', 'blank': 'True'}),
'trans_col_thumb': ('django.db.models.fields.files.ImageField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'trans_dir_thumb': ('django.db.models.fields.files.ImageField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'trans_ind_thumb': ('django.db.models.fields.files.ImageField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
u'licenses.license': {
'Meta': {'object_name': 'License'},
'added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'cc_attribution': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'cc_no_deriv': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'cc_noncommercial': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'cc_share_alike': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'creative_commons': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'publishable': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '255', 'blank': 'True'})
},
u'mturk.experiment': {
'Meta': {'ordering': "['slug', 'variant']", 'unique_together': "(('slug', 'variant'),)", 'object_name': 'Experiment'},
'added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'completed_id': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'cubam_dirty': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'examples_group_attr': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'has_tutorial': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'module': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'new_hit_settings': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'experiments'", 'null': 'True', 'to': u"orm['mturk.ExperimentSettings']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'template_dir': ('django.db.models.fields.CharField', [], {'default': "'mturk/experiments'", 'max_length': '255'}),
'test_contents_per_assignment': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'variant': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'version': ('django.db.models.fields.IntegerField', [], {'default': '1'})
},
u'mturk.experimentsettings': {
'Meta': {'object_name': 'ExperimentSettings'},
'added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'auto_add_hits': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'auto_approval_delay': ('django.db.models.fields.IntegerField', [], {'default': '2592000'}),
'content_filter': ('django.db.models.fields.TextField', [], {'default': "'{}'"}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'experiment_settings_in'", 'to': u"orm['contenttypes.ContentType']"}),
'contents_per_hit': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'duration': ('django.db.models.fields.IntegerField', [], {'default': '1800'}),
'feedback_bonus': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '8', 'decimal_places': '4', 'blank': 'True'}),
'frame_height': ('django.db.models.fields.IntegerField', [], {'default': '800'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'keywords': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'lifetime': ('django.db.models.fields.IntegerField', [], {'default': '2678400'}),
'max_active_hits': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'max_total_hits': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'min_output_consensus': ('django.db.models.fields.IntegerField', [], {'default': '4'}),
'num_outputs_max': ('django.db.models.fields.IntegerField', [], {'default': '5'}),
'out_content_attr': ('django.db.models.fields.CharField', [], {'max_length': '127', 'blank': 'True'}),
'out_content_type': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'experiment_settings_out'", 'null': 'True', 'to': u"orm['contenttypes.ContentType']"}),
'out_count_ratio': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'qualifications': ('django.db.models.fields.TextField', [], {'default': "'{}'"}),
'requirements': ('django.db.models.fields.TextField', [], {'default': "'{}'"}),
'reward': ('django.db.models.fields.DecimalField', [], {'max_digits': '8', 'decimal_places': '4'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'mturk.experimenttestcontent': {
'Meta': {'ordering': "['-id']", 'object_name': 'ExperimentTestContent'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
'experiment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'test_contents'", 'to': u"orm['mturk.Experiment']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'priority': ('django.db.models.fields.FloatField', [], {'default': '0', 'db_index': 'True'})
},
u'mturk.mtassignment': {
'Meta': {'object_name': 'MtAssignment'},
'accept_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'action_log': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'approval_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'approve_message': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'auto_approval_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'bonus': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '8', 'decimal_places': '2', 'blank': 'True'}),
'bonus_message': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'deadline': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'feedback': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'feedback_bonus_given': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'has_feedback': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'hit': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'assignments'", 'to': u"orm['mturk.MtHit']"}),
'id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'primary_key': 'True'}),
'manually_rejected': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'num_test_contents': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'num_test_correct': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'num_test_incorrect': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'partially_completed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'post_data': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'post_meta': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'reject_message': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'rejection_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'screen_height': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'screen_width': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '1', 'null': 'True', 'blank': 'True'}),
'submission_complete': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'submit_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'test_contents': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'assignments'", 'symmetrical': 'False', 'to': u"orm['mturk.ExperimentTestContent']"}),
'time_active_ms': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'time_load_ms': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'time_ms': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user_agent': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'wage': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'worker': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounts.UserProfile']", 'null': 'True', 'blank': 'True'})
},
u'mturk.mthit': {
'Meta': {'object_name': 'MtHit'},
'added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'all_submitted_assignments': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'any_submitted_assignments': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'compatible_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'expired': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'hit_status': ('django.db.models.fields.CharField', [], {'max_length': '1', 'null': 'True', 'blank': 'True'}),
'hit_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'hits'", 'to': u"orm['mturk.MtHitType']"}),
'id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'primary_key': 'True'}),
'incompatible_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'lifetime': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'max_assignments': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'num_assignments_available': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'num_assignments_completed': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'num_assignments_pending': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'num_contents': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'out_count_ratio': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'review_status': ('django.db.models.fields.CharField', [], {'max_length': '1', 'null': 'True', 'blank': 'True'}),
'sandbox': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'mturk.mthittype': {
'Meta': {'object_name': 'MtHitType'},
'added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'auto_approval_delay': ('django.db.models.fields.IntegerField', [], {'default': '2592000'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'duration': ('django.db.models.fields.IntegerField', [], {'default': '3600'}),
'experiment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'hit_types'", 'to': u"orm['mturk.Experiment']"}),
'experiment_settings': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'hit_types'", 'to': u"orm['mturk.ExperimentSettings']"}),
'external_url': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'feedback_bonus': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '8', 'decimal_places': '2', 'blank': 'True'}),
'frame_height': ('django.db.models.fields.IntegerField', [], {'default': '800'}),
'id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'primary_key': 'True'}),
'keywords': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'blank': 'True'}),
'reward': ('django.db.models.fields.DecimalField', [], {'default': "'0.01'", 'max_digits': '8', 'decimal_places': '4'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'photos.flickruser': {
'Meta': {'ordering': "['-id']", 'object_name': 'FlickrUser'},
'blacklisted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '127'})
},
u'photos.photo': {
'Meta': {'ordering': "['aspect_ratio', '-id']", 'object_name': 'Photo'},
'added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'aspect_ratio': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'exif': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'flickr_id': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'flickr_user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'photos'", 'null': 'True', 'to': u"orm['photos.FlickrUser']"}),
'focal_y': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'fov': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image_orig': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'inappropriate': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'license': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'photos'", 'null': 'True', 'to': u"orm['licenses.License']"}),
'light_stack': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'photos'", 'null': 'True', 'to': u"orm['photos.PhotoLightStack']"}),
'md5': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'median_intrinsic_error': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'nonperspective': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'num_intrinsic_comparisons': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'num_intrinsic_points': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'num_shapes': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'num_vertices': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'orig_height': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'orig_width': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'rotated': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'scene_category': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'photos'", 'null': 'True', 'to': u"orm['photos.PhotoSceneCategory']"}),
'scene_category_correct': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'scene_category_correct_method': ('django.db.models.fields.CharField', [], {'max_length': '1', 'null': 'True', 'blank': 'True'}),
'scene_category_correct_score': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'stylized': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'synthetic': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounts.UserProfile']"}),
'vanishing_length': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'vanishing_lines': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'vanishing_points': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'whitebalanced': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'whitebalanced_score': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'})
},
u'photos.photolightstack': {
'Meta': {'ordering': "['-id']", 'object_name': 'PhotoLightStack'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
},
u'photos.photoscenecategory': {
'Meta': {'ordering': "['name']", 'object_name': 'PhotoSceneCategory'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '127'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['photos.PhotoSceneCategory']", 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['intrinsic']
| |
from core.himesis import Himesis
import uuid
class HCompositeState2ProcDef(Himesis):
def __init__(self):
"""
Creates the himesis graph representing the DSLTrans rule CompositeState2ProcDef.
"""
# Flag this instance as compiled now
self.is_compiled = True
super(HCompositeState2ProcDef, self).__init__(name='HCompositeState2ProcDef', num_nodes=0, edges=[])
# Set the graph attributes
self["mm__"] = ['HimesisMM']
self["name"] = """CompositeState2ProcDef"""
self["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'CompositeState2ProcDef')
# match model. We only support one match model
self.add_node()
self.vs[0]["mm__"] = """MatchModel"""
# apply model node
self.add_node()
self.vs[1]["mm__"] = """ApplyModel"""
# paired with relation between match and apply models
self.add_node()
self.vs[2]["mm__"] = """paired_with"""
self.vs[2]["attr1"] = """CompositeState2ProcDef"""
# match class State() node
self.add_node()
self.vs[3]["mm__"] = """State"""
self.vs[3]["attr1"] = """+"""
# apply class ProcDef() node
self.add_node()
self.vs[4]["mm__"] = """ProcDef"""
self.vs[4]["attr1"] = """1"""
# apply class LocalDef() node
self.add_node()
self.vs[5]["mm__"] = """LocalDef"""
self.vs[5]["attr1"] = """1"""
# apply class Name() node
self.add_node()
self.vs[6]["mm__"] = """Name"""
self.vs[6]["attr1"] = """1"""
# apply class New() node
self.add_node()
self.vs[7]["mm__"] = """New"""
self.vs[7]["attr1"] = """1"""
# apply class Name() node
self.add_node()
self.vs[8]["mm__"] = """Name"""
self.vs[8]["attr1"] = """1"""
# apply class Name() node
self.add_node()
self.vs[9]["mm__"] = """Name"""
self.vs[9]["attr1"] = """1"""
# apply class Name() node
self.add_node()
self.vs[10]["mm__"] = """Name"""
self.vs[10]["attr1"] = """1"""
# apply class Par() node
self.add_node()
self.vs[11]["mm__"] = """Par"""
self.vs[11]["attr1"] = """1"""
# apply class Inst() node
self.add_node()
self.vs[12]["mm__"] = """Inst"""
self.vs[12]["attr1"] = """1"""
# apply class Inst() node
self.add_node()
self.vs[13]["mm__"] = """Inst"""
self.vs[13]["attr1"] = """1"""
# apply class Name() node
self.add_node()
self.vs[14]["mm__"] = """Name"""
self.vs[14]["attr1"] = """1"""
# apply class Name() node
self.add_node()
self.vs[15]["mm__"] = """Name"""
self.vs[15]["attr1"] = """1"""
# apply class Name() node
self.add_node()
self.vs[16]["mm__"] = """Name"""
self.vs[16]["attr1"] = """1"""
# apply class Name() node
self.add_node()
self.vs[17]["mm__"] = """Name"""
self.vs[17]["attr1"] = """1"""
# apply class Name() node
self.add_node()
self.vs[18]["mm__"] = """Name"""
self.vs[18]["attr1"] = """1"""
# apply class Name() node
self.add_node()
self.vs[19]["mm__"] = """Name"""
self.vs[19]["attr1"] = """1"""
# apply class Name() node
self.add_node()
self.vs[20]["mm__"] = """Name"""
self.vs[20]["attr1"] = """1"""
# apply association ProcDef--p-->LocalDef node
self.add_node()
self.vs[21]["attr1"] = """p"""
self.vs[21]["mm__"] = """directLink_T"""
# apply association ProcDef--channelNames-->Name node
self.add_node()
self.vs[22]["attr1"] = """channelNames"""
self.vs[22]["mm__"] = """directLink_T"""
# apply association LocalDef--p-->New node
self.add_node()
self.vs[23]["attr1"] = """p"""
self.vs[23]["mm__"] = """directLink_T"""
# apply association New--channelNames-->Name node
self.add_node()
self.vs[24]["attr1"] = """channelNames"""
self.vs[24]["mm__"] = """directLink_T"""
# apply association New--channelNames-->Name node
self.add_node()
self.vs[25]["attr1"] = """channelNames"""
self.vs[25]["mm__"] = """directLink_T"""
# apply association New--channelNames-->Name node
self.add_node()
self.vs[26]["attr1"] = """channelNames"""
self.vs[26]["mm__"] = """directLink_T"""
# apply association New--p-->Par node
self.add_node()
self.vs[27]["attr1"] = """p"""
self.vs[27]["mm__"] = """directLink_T"""
# apply association Par--p-->Inst node
self.add_node()
self.vs[28]["attr1"] = """p"""
self.vs[28]["mm__"] = """directLink_T"""
# apply association Par--p-->Inst node
self.add_node()
self.vs[29]["attr1"] = """p"""
self.vs[29]["mm__"] = """directLink_T"""
# apply association Inst--channelNames-->Name node
self.add_node()
self.vs[30]["attr1"] = """channelNames"""
self.vs[30]["mm__"] = """directLink_T"""
# apply association Inst--channelNames-->Name node
self.add_node()
self.vs[31]["attr1"] = """channelNames"""
self.vs[31]["mm__"] = """directLink_T"""
# apply association Inst--channelNames-->Name node
self.add_node()
self.vs[32]["attr1"] = """channelNames"""
self.vs[32]["mm__"] = """directLink_T"""
# apply association Inst--channelNames-->Name node
self.add_node()
self.vs[33]["attr1"] = """channelNames"""
self.vs[33]["mm__"] = """directLink_T"""
# apply association Inst--channelNames-->Name node
self.add_node()
self.vs[34]["attr1"] = """channelNames"""
self.vs[34]["mm__"] = """directLink_T"""
# apply association Inst--channelNames-->Name node
self.add_node()
self.vs[35]["attr1"] = """channelNames"""
self.vs[35]["mm__"] = """directLink_T"""
# apply association Inst--channelNames-->Name node
self.add_node()
self.vs[36]["attr1"] = """channelNames"""
self.vs[36]["mm__"] = """directLink_T"""
# backward association State---->ProcDef node
self.add_node()
self.vs[37]["mm__"] = """backward_link"""
# Add the edges
self.add_edges([
(0,3), # matchmodel -> match_class State()
(1,4), # applymodel -> -> apply_class ProcDef()
(1,5), # applymodel -> -> apply_class LocalDef()
(1,6), # applymodel -> -> apply_class Name()
(1,7), # applymodel -> -> apply_class New()
(1,8), # applymodel -> -> apply_class Name()
(1,9), # applymodel -> -> apply_class Name()
(1,10), # applymodel -> -> apply_class Name()
(1,11), # applymodel -> -> apply_class Par()
(1,12), # applymodel -> -> apply_class Inst()
(1,13), # applymodel -> -> apply_class Inst()
(1,14), # applymodel -> -> apply_class Name()
(1,15), # applymodel -> -> apply_class Name()
(1,16), # applymodel -> -> apply_class Name()
(1,17), # applymodel -> -> apply_class Name()
(1,18), # applymodel -> -> apply_class Name()
(1,19), # applymodel -> -> apply_class Name()
(1,20), # applymodel -> -> apply_class Name()
(4,21), # apply_class ProcDef() -> association p
(21,5), # association p -> apply_class LocalDef()
(4,22), # apply_class ProcDef() -> association channelNames
(22,6), # association channelNames -> apply_class Name()
(5,23), # apply_class LocalDef() -> association p
(23,7), # association p -> apply_class New()
(7,24), # apply_class New() -> association channelNames
(24,8), # association channelNames -> apply_class Name()
(7,25), # apply_class New() -> association channelNames
(25,9), # association channelNames -> apply_class Name()
(7,26), # apply_class New() -> association channelNames
(26,10), # association channelNames -> apply_class Name()
(7,27), # apply_class New() -> association p
(27,11), # association p -> apply_class Par()
(11,28), # apply_class Par() -> association p
(28,13), # association p -> apply_class Inst()
(11,29), # apply_class Par() -> association p
(29,12), # association p -> apply_class Inst()
(12,30), # apply_class Inst() -> association channelNames
(30,14), # association channelNames -> apply_class Name()
(12,31), # apply_class Inst() -> association channelNames
(31,15), # association channelNames -> apply_class Name()
(12,32), # apply_class Inst() -> association channelNames
(32,16), # association channelNames -> apply_class Name()
(12,33), # apply_class Inst() -> association channelNames
(33,17), # association channelNames -> apply_class Name()
(13,34), # apply_class Inst() -> association channelNames
(34,18), # association channelNames -> apply_class Name()
(13,35), # apply_class Inst() -> association channelNames
(35,19), # association channelNames -> apply_class Name()
(13,36), # apply_class Inst() -> association channelNames
(36,20), # association channelNames -> apply_class Name()
(4,37), # apply_class ProcDef() -> backward_association
(37,3), # backward_association -> apply_class State()
(0,2), # matchmodel -> pairedwith
(2,1) # pairedwith -> applyModel
])
# Add the attribute equations
self["equations"] = [((3,'isComposite'),('constant','true')), ((6,'literal'),('constant','sh')), ((8,'literal'),('constant','exit_in')), ((9,'literal'),('constant','exack_in')), ((10,'literal'),('constant','sh_in')), ((12,'name'),('constant','C')), ((13,'name'),('constant','H')), ((14,'literal'),('constant','enp')), ((15,'literal'),('constant','exit_in')), ((16,'literal'),('constant','exack_in')), ((17,'literal'),('constant','sh_in')), ((18,'literal'),('constant','exit_in')), ((19,'literal'),('constant','exack_in')), ((20,'literal'),('constant','sh_in')), ]
| |
"""
Please note that this is the historic version, and now we have moved to parsing the JSON
files directly. See model_data.py
A parser for the SEED biochemistry modules that are available on Github
at https://github.com/ModelSEED/ModelSEEDDatabase. We have also included
them in our repo as a submodule.
We parse compounds from the compounds file in Biochemistry. Locations
are currently hardcoded because the ModelSeedDirectory does not contain
a mapping for compartments (the mapping files do not have the integers
used in the reactions file!).
"""
import copy
import os
import re
import sys
import io
import PyFBA
from .model_seed import location
def template_reactions(modeltype='microbial'):
"""
Load the template reactions to adjust the model. Returns a hash of some altered parameters for the model
:param modeltype: which type of model to load e.g. GramNegative, GramPositive, Microbial
:type modeltype: str
:return: A hash of the new model parameters that should be used to update the reactions object
:rtype: dict
"""
inputfile = ""
if modeltype.lower() == 'microbial':
inputfile = "Templates/Microbial/Reactions.tsv"
elif modeltype.lower() == 'gramnegative' or modeltype.lower() == 'gram_negative':
inputfile = "Templates/GramNegative/Reactions.tsv"
elif modeltype.lower() == 'grampositive' or modeltype.lower() == 'gram_positive':
inputfile = "Templates/GramPositive/Reactions.tsv"
elif modeltype.lower() == 'mycobacteria':
inputfile = "Templates/Mycobacteria/Reactions.tsv"
elif modeltype.lower() == 'plant':
inputfile = "Templates/Plant/Reactions.tsv"
else:
raise NotImplementedError("Parsing data for " + inputfile + " has not been implemented!")
if not os.path.exists(os.path.join(MODELSEED_DIR, inputfile)):
raise IOError(os.path.join(MODELSEED_DIR, inputfile) +
" was not found. Please check your model SEED directory (" + MODELSEED_DIR + ")")
new_enz = {}
with open(os.path.join(MODELSEED_DIR, inputfile), 'r') as f:
for l in f:
if l.startswith('id'):
continue
p = l.strip().split("\t")
new_enz[p[0]] = {}
new_enz[p[0]]['direction'] = p[2]
new_enz[p[0]]['enzymes'] = set(p[-1].split("|"))
return new_enz
def compounds(compounds_file=None):
"""
Load the compounds mapping. This maps from cpd id to name (we use
the name in our reactions, but use the cpd to parse the model
seed database to avoid ambiguities.
Optionally, you can provide a compounds file. If not, the default
in MODELSEED_DIR/Biochemistry/compounds.master.tsv will be used.
:param compounds_file: An optional filename of a compounds file to parse
:type compounds_file: str
:return: A hash of compounds with the str(compound) as the key and the compound object as the value
:rtype: dict
"""
cpds = {}
if not compounds_file:
compounds_file = os.path.join(MODELSEED_DIR, 'Biochemistry/compounds.master.tsv')
try:
with open(compounds_file, 'r') as f:
for li, l in enumerate(f):
if li == 0:
# skip the header line
continue
p = l.strip().split("\t")
c = PyFBA.metabolism.Compound(p[2], '')
c.model_seed_id = p[0]
c.abbreviation = p[1]
c.formula = p[3]
c.mw = p[4]
# there are some compounds (like D-Glucose and Fe2+) that appear >1x in the table
if str(c) in cpds:
cpds[str(c)].alternate_seed_ids.add(p[0])
else:
cpds[str(c)] = c
except IOError as e:
sys.exit("There was an error parsing " +
compounds_file + "\n" + "I/O error({0}): {1}".format(e.errno, e.strerror))
return cpds
def reactions(organism_type="", rctf='Biochemistry/reactions.master.tsv', verbose=False):
"""
Parse the reaction information in Biochemistry/reactions.master.tsv
One reaction ID is associated with one equation and thus many
compounds and parts.
If the boolean verbose is set we will print out error/debugging
messages.
You can supply an alternative reactions file (rctf) if you
don't like the default.
:param organism_type: The type of organism, eg. microbial, gram_negative, gram_positive
:type organism_type: str
:param rctf: The optional reaction file to provide
:type rctf: str
:param verbose: Print more output
:type verbose: bool
:return: Two components, a dict of the reactions and a dict of all the compounds used in the reactions.
:rtype: dict, dict
"""
locations = location()
cpds = compounds()
# cpds_by_id = {cpds[c].model_seed_id: cpds[c] for c in cpds}
cpds_by_id = {}
for c in cpds:
cpds_by_id[cpds[c].model_seed_id] = cpds[c]
for asi in cpds[c].alternate_seed_ids:
cpds_by_id[asi] = cpds[c]
all_reactions = {}
try:
with open(os.path.join(MODELSEED_DIR, rctf), 'r') as rxnf:
for l in rxnf:
if l.startswith('id'):
# ignore the header line
continue
if l.startswith("#"):
# ignore any comment lines
continue
pieces = l.strip().split("\t")
if len(pieces) < 20:
sys.stderr.write("ERROR PARSING REACTION INFO: " + l)
continue
rid = pieces[0]
rxn = pieces[6]
for i in range(len(pieces)):
if pieces[i] == "none" or pieces[i] == "null":
pieces[i] = None
if pieces[14]:
deltaG = float(pieces[14])
else:
deltaG = 0.0
if pieces[15]:
deltaG_error = float(pieces[15])
else:
deltaG_error = 0.0
# we need to split the reaction, but different reactions
# have different splits!
separator = ""
for separator in [" <=> ", " => ", " <= ", " = ", " < ", " > ", "Not found"]:
if separator in rxn:
break
if separator == "Not found":
if verbose:
sys.stderr.write("WARNING: Could not find a seperator in " + rxn +
". This reaction was skipped. Please check it\n")
continue
left, right = rxn.split(separator)
# check and see we have a valid equation
left = left.strip()
right = right.strip()
if False:
if left == "" or right == "":
if verbose:
sys.stderr.write("One side missing for " + rxn + " ignored\n")
continue
# create a new reaction object to hold all the information ...
r = PyFBA.metabolism.Reaction(rid)
r.deltaG = deltaG
r.deltaG_error = deltaG_error
if pieces[5] != '0':
r.is_transport = True
all_reactions[rid] = r
r.direction = pieces[9]
# we have to rewrite the equation to accomodate
# the proper locations
newleft = []
newright = []
# deal with the compounds on the left side of the equation
m = re.findall('\(([\d\.e-]+)\)\s+(.*?)\[(\d+)\]', left)
if m == [] and verbose:
sys.stderr.write("ERROR: Could not parse the compounds" + " on the left side of the reaction " +
rid + ": " + rxn + "\n")
for p in m:
(q, cmpd, locval) = p
if locval in locations:
loc = locations[locval]
else:
if verbose:
sys.stderr.write("WARNING: Could not get a location " + " for " + locval + "\n")
loc = locval
# we first look up to see whether we have the compound
# and then we need to create a new compound with the
# appropriate location
if cmpd in cpds_by_id:
nc = PyFBA.metabolism.Compound(cpds_by_id[cmpd].name, loc)
else:
if verbose:
sys.stderr.write("ERROR: Did not find " + cmpd + " in the compounds file.\n")
nc = PyFBA.metabolism.Compound(cmpd, loc)
ncstr = str(nc)
if ncstr in cpds:
nc = copy.copy(cpds[ncstr])
nc.add_reactions({rid})
cpds[ncstr] = nc
r.add_left_compounds({nc})
r.set_left_compound_abundance(nc, float(q))
newleft.append("(" + str(q) + ") " + nc.name + "[" + loc + "]")
# deal with the right side of the equation
m = re.findall('\(([\d\.e-]+)\)\s+(.*?)\[(\d+)\]', right)
if m == [] and verbose:
sys.stderr.write("ERROR: Could not parse the compounds on the right side of the reaction " +
rid + ": " + rxn + " >>" + right + "<<\n")
for p in m:
(q, cmpd, locval) = p
if locval in locations:
loc = locations[locval]
else:
if verbose:
sys.stderr.write("WARNING: Could not get a location " + " for " + locval + "\n")
loc = locval
# we first look up to see whether we have the compound
# and then we need to create a new compound with the
# appropriate location
if cmpd in cpds_by_id:
nc = PyFBA.metabolism.Compound(cpds_by_id[cmpd].name, loc)
else:
if verbose:
sys.stderr.write("ERROR: Did not find " + cmpd + " in the compounds file.\n")
nc = PyFBA.metabolism.Compound(cmpd, loc)
ncstr = str(nc)
if ncstr in cpds:
nc = copy.copy(cpds[ncstr])
nc.add_reactions({rid})
cpds[ncstr] = nc
r.add_right_compounds({nc})
r.set_right_compound_abundance(nc, float(q))
newright.append("(" + str(q) + ") " + nc.name + "[" + loc + "]")
r.equation = " + ".join(newleft) + " <=> " + " + ".join(newright)
all_reactions[rid] = r
except IOError as e:
sys.exit("There was an error parsing " + rctf + "\n" + "I/O error({0}): {1}".format(e.errno, e.strerror))
# finally, if we need to adjust the organism type based on Template reactions, we shall
if organism_type:
new_rcts = template_reactions(organism_type)
for r in new_rcts:
all_reactions[r].direction = new_rcts[r]['direction']
all_reactions[r].enzymes = new_rcts[r]['enzymes']
return cpds, all_reactions
def complexes(cf="SOLRDump/TemplateReactions.tsv", verbose=False):
"""
Connection between complexes and reactions. A complex can be
involved in many reactions.
In addition, many complexes are involved in one reaction, so we have
a many:many relationship here
Read the complex file and return a hash of the complexes where
key is the complex id and the value is a set of reactions that the
complex is involved in.
You can provide an optional complexes file (cf) if you don't like
the default!
:param cf: An optional complexes file name
:type cf: str
:param verbose: Print more output
:type verbose: bool
:return A dict of the complexes where the key is the complex id and the value is the set of reactions
:rtype: dict
"""
cplxes = {}
try:
# io.open() to enable the encoding and errors arguments when using Python2
# io.open() will read lines as unicode objects instead of str objects
# In Python2, unicode objects are equivalent to Python3 str objects
with io.open(os.path.join(MODELSEED_DIR, cf), 'r', encoding='utf-8', errors='replace') as rin:
for l in rin:
# If using Python2, must convert unicode object to str object
if sys.version_info.major == 2:
l = l.encode('utf-8', 'replace')
if l.startswith("#") or l.startswith('id'):
# ignore any comment lines
continue
p = l.strip().split("\t")
if len(p) < 30:
if verbose:
sys.stderr.write("WARNING: Malformed line in " + cf + ": " + l + "\n")
continue
if p[28] == "":
continue
for cmplx in p[28].split(';'):
if cmplx not in cplxes:
cplxes[cmplx] = set()
cplxes[cmplx].add(p[1])
except IOError as e:
sys.stderr.write("There was an error parsing {}\n".format(os.path.join(MODELSEED_DIR, cf)))
sys.stderr.write("I/O error({0}): {1}\n".format(e.errno, e.strerror))
sys.exit(-1)
return cplxes
def roles_ec(rf="SOLRDump/ComplexRoles.tsv"):
"""
Read the roles and EC and return a hash of the roles and EC where the id
is the role name or EC number and the value is the set of complex IDs that
the role is inolved in.
One role or EC can be involved in many complexes.
You can provide an alternate roles file (rf) if you don't like the
default.
:param rf: an alternate roles file
:type rf: str
:return: A dict of role name and complex ids that the roles is involved with
:rtype: dict
"""
rles_ec = {}
try:
with open(os.path.join(MODELSEED_DIR, rf), 'r') as rin:
for l in rin:
if l.startswith("#") or l.startswith('complex_id'):
# ignore any comment lines
continue
p = l.strip().split("\t")
if p[5] not in rles_ec:
rles_ec[p[5]] = set()
rles_ec[p[5]].add(p[0])
# Try to add EC number if it exists in role name
for ecno in re.findall('[\d\-]+\.[\d\-]+\.[\d\-]+\.[\d\-]+', l):
if ecno not in rles_ec:
rles_ec[ecno] = set()
rles_ec[ecno].add(p[0])
except IOError as e:
sys.exit("There was an error parsing " + rf + "\n" + "I/O error({0}): {1}".format(e.errno, e.strerror))
return rles_ec
def roles(rf="SOLRDump/ComplexRoles.tsv"):
"""
Read the roles and return a hash of the roles where the id is the
role name and the value is the set of complex IDs that the role is
inolved in.
One role can be involved in many complexes.
You can provide an alternate roles file (rf) if you don't like the
default.
:param rf: an alternate roles file
:type rf: str
:return: A dict of role name and complex ids that the roles is involved with
:rtype: dict
"""
rles = {}
try:
with open(os.path.join(MODELSEED_DIR, rf), 'r') as rin:
for l in rin:
if l.startswith("#") or l.startswith('complex_id'):
# ignore any comment lines
continue
p = l.strip().split("\t")
if p[5] not in rles:
rles[p[5]] = set()
rles[p[5]].add(p[0])
except IOError as e:
sys.exit("There was an error parsing " + rf + "\n" + "I/O error({0}): {1}".format(e.errno, e.strerror))
return rles
def enzymes(verbose=False):
"""
Convert each of the roles and complexes into a set of enzymes, and
connect them to reactions.
Return just the enzyme objects.
You probably want to use compounds_reactions_enzymes, this is partly here
as a test case to make sure that enzymes and complexes play well
together
:param verbose: Print more output
:type verbose: bool
:return: A dict of with complex id as key and reaction id as value
"""
roleset = roles()
cmplxset = complexes()
enzs = {}
cpds, rcts = reactions()
# for roles the key is the role name and the value is the complex it
# is in
for rolename in roleset:
# what complex is this involved in
for complexid in roleset[rolename]:
if complexid not in cmplxset:
if verbose:
sys.stderr.write("WARNING: " + complexid + " is not in the complexes\n")
continue
if complexid not in enzs:
enzs[complexid] = PyFBA.metabolism.Enzyme(complexid)
enzs[complexid].add_roles({rolename})
for ecno in re.findall('[\d\-]+\.[\d\-]+\.[\d\-]+\.[\d\-]+', rolename):
enzs[complexid].add_ec(ecno)
for complexid in cmplxset:
if complexid not in enzs:
if verbose:
sys.stderr.write("WARNING: No roles found that are part" + " of complex " + complexid + "\n")
continue
for reactid in cmplxset[complexid]:
if reactid in rcts:
enzs[complexid].add_reaction(reactid)
rcts[reactid].add_enzymes({complexid})
return enzs
def compounds_reactions_enzymes(organism_type='', verbose=False):
"""
Convert each of the roles and complexes into a set of enzymes, and
connect them to reactions.
We return three dicts, the compounds, the enzymes, and the reactions. See the individual methods for the dicts
that we return!
:param organism_type: The type of organism, eg. Microbial, Gram_positive, Gram_negative
:type organism_type:str
:param verbose:Print more output
:type verbose:bool
:return: The compounds, the reactions, and the enzymes in that order
:rtype: dict of Compound, dict of Reaction, dict of Enzyme
"""
roleset = roles()
cmplxset = complexes()
cpds, rcts = reactions(organism_type, verbose=verbose)
enzs = {}
# for roles the key is the role name and the value is the complex it
# is in
for rolename in roleset:
# what complex is this involved in
for complexid in roleset[rolename]:
if complexid not in cmplxset:
if verbose:
sys.stderr.write("WARNING: " + complexid + " is not in the complexes\n")
continue
if complexid not in enzs:
enzs[complexid] = PyFBA.metabolism.Enzyme(complexid)
enzs[complexid].add_roles({rolename})
for ecno in re.findall('[\d\-]+\.[\d\-]+\.[\d\-]+\.[\d\-]+', rolename):
enzs[complexid].add_ec(ecno)
for complexid in cmplxset:
if complexid not in enzs:
if verbose:
sys.stderr.write("WARNING: No roles found that are part" + " of complex " + complexid + "\n")
continue
for reactid in cmplxset[complexid]:
if reactid in rcts:
enzs[complexid].add_reaction(reactid)
rcts[reactid].add_enzymes({complexid})
return cpds, rcts, enzs
| |
import os
import django
from django.test.testcases import SimpleTestCase
from django.test.utils import override_settings, modify_settings
try:
from django.urls import reverse
except ImportError:
from django.core.urlresolvers import reverse
from atlassian_jwt_auth.contrib.tests.utils import (
get_static_retriever_class,
)
from atlassian_jwt_auth.tests import utils
from atlassian_jwt_auth.tests.utils import (
create_token,
RS256KeyTestMixin,
)
class DjangoAsapMixin(object):
@classmethod
def setUpClass(cls):
os.environ.setdefault(
'DJANGO_SETTINGS_MODULE',
'atlassian_jwt_auth.frameworks.django.tests.settings')
django.setup()
super(DjangoAsapMixin, cls).setUpClass()
@classmethod
def tearDownClass(cls):
super(DjangoAsapMixin, cls).tearDownClass()
del os.environ['DJANGO_SETTINGS_MODULE']
def setUp(self):
super(DjangoAsapMixin, self).setUp()
self._private_key_pem = self.get_new_private_key_in_pem_format()
self._public_key_pem = utils.get_public_key_pem_for_private_key_pem(
self._private_key_pem
)
self.retriever = get_static_retriever_class({
'client-app/key01': self._public_key_pem
})
self.test_settings = {
'ASAP_KEY_RETRIEVER_CLASS': self.retriever
}
@modify_settings(MIDDLEWARE={
'prepend': 'atlassian_jwt_auth.frameworks.django.asap_middleware',
})
class TestAsapMiddleware(DjangoAsapMixin, RS256KeyTestMixin, SimpleTestCase):
def check_response(self,
view_name,
response_content='',
status_code=200,
issuer='client-app',
audience='server-app',
key_id='client-app/key01',
subject=None,
private_key=None,
token=None,
authorization=None,
retriever_key=None):
if authorization is None:
if token is None:
if private_key is None:
private_key = self._private_key_pem
token = create_token(issuer=issuer, audience=audience,
key_id=key_id, private_key=private_key,
subject=subject)
authorization = b'Bearer ' + token
test_settings = self.test_settings.copy()
if retriever_key is not None:
retriever = get_static_retriever_class({
retriever_key: self._public_key_pem
})
test_settings['ASAP_KEY_RETRIEVER_CLASS'] = retriever
with override_settings(**test_settings):
response = self.client.get(reverse(view_name),
HTTP_AUTHORIZATION=authorization)
self.assertContains(response, response_content,
status_code=status_code)
def test_request_with_valid_token_is_allowed(self):
self.check_response('needed', 'one', 200)
def test_request_with_duplicate_jti_is_rejected_as_per_setting(self):
self.test_settings['ASAP_CHECK_JTI_UNIQUENESS'] = True
token = create_token(
issuer='client-app', audience='server-app',
key_id='client-app/key01', private_key=self._private_key_pem
)
str_auth = 'Bearer ' + token.decode(encoding='iso-8859-1')
self.check_response('needed', 'one', 200, authorization=str_auth)
self.check_response('needed', 'duplicate jti', 401,
authorization=str_auth)
def _assert_request_with_duplicate_jti_is_accepted(self):
token = create_token(
issuer='client-app', audience='server-app',
key_id='client-app/key01', private_key=self._private_key_pem
)
str_auth = 'Bearer ' + token.decode(encoding='iso-8859-1')
self.check_response('needed', 'one', 200, authorization=str_auth)
self.check_response('needed', 'one', 200, authorization=str_auth)
def test_request_with_duplicate_jti_is_accepted(self):
self._assert_request_with_duplicate_jti_is_accepted()
def test_request_with_duplicate_jti_is_accepted_as_per_setting(self):
self.test_settings['ASAP_CHECK_JTI_UNIQUENESS'] = False
self._assert_request_with_duplicate_jti_is_accepted()
def test_request_with_string_headers_is_allowed(self):
token = create_token(
issuer='client-app', audience='server-app',
key_id='client-app/key01', private_key=self._private_key_pem
)
str_auth = 'Bearer ' + token.decode(encoding='iso-8859-1')
self.check_response('needed', 'one', 200, authorization=str_auth)
def test_request_with_invalid_audience_is_rejected(self):
self.check_response('needed', 'Unauthorized', 401,
audience='invalid')
def test_request_with_invalid_token_is_rejected(self):
self.check_response('needed', 'Unauthorized', 401,
authorization='Bearer invalid')
def test_request_without_token_is_rejected(self):
with override_settings(**self.test_settings):
response = self.client.get(reverse('needed'))
self.assertContains(response, 'Unauthorized',
status_code=401)
def test_request_with_invalid_issuer_is_rejected(self):
self.check_response('needed', 'Forbidden', 403,
issuer='something-invalid',
key_id='something-invalid/key01',
retriever_key='something-invalid/key01')
def test_request_non_whitelisted_decorated_issuer_is_rejected(self):
self.check_response('needed', 'Forbidden', 403,
issuer='unexpected',
key_id='unexpected/key01',
retriever_key='unexpected/key01')
def test_request_non_decorated_issuer_is_rejected(self):
self.check_response('restricted_issuer', 'Forbidden', 403)
def test_request_decorated_issuer_is_allowed(self):
self.check_response('restricted_issuer', 'three',
issuer='whitelist',
key_id='whitelist/key01',
retriever_key='whitelist/key01')
# TODO: modify JWTAuthSigner to allow non-issuer subjects and update the
# decorated subject test cases
def test_request_non_decorated_subject_is_rejected(self):
self.check_response('restricted_subject', 'Forbidden', 403,
issuer='whitelist',
key_id='whitelist/key01',
retriever_key='whitelist/key01')
def test_request_using_settings_only_is_allowed(self):
self.check_response('unneeded', 'two')
def test_request_subject_does_not_need_to_match_issuer_from_settings(self):
self.test_settings['ASAP_SUBJECT_SHOULD_MATCH_ISSUER'] = False
self.check_response('needed', 'one', 200, subject='different_than_is')
def test_request_subject_and_issue_not_matching(self):
self.check_response(
'needed',
'Subject and Issuer do not match',
401,
subject='different_than_is',
)
class TestAsapDecorator(DjangoAsapMixin, RS256KeyTestMixin, SimpleTestCase):
def test_request_with_valid_token_is_allowed(self):
token = create_token(
issuer='client-app', audience='server-app',
key_id='client-app/key01', private_key=self._private_key_pem
)
with override_settings(**self.test_settings):
response = self.client.get(reverse('expected'),
HTTP_AUTHORIZATION=b'Bearer ' + token)
self.assertContains(response, 'Greatest Success!', status_code=200)
def test_request_with_string_headers_is_allowed(self):
token = create_token(
issuer='client-app', audience='server-app',
key_id='client-app/key01', private_key=self._private_key_pem
)
str_token = token.decode(encoding='iso-8859-1')
with override_settings(**self.test_settings):
response = self.client.get(reverse('expected'),
HTTP_AUTHORIZATION='Bearer ' +
str_token)
self.assertContains(response, 'Greatest Success!', status_code=200)
def test_request_with_invalid_audience_is_rejected(self):
token = create_token(
issuer='client-app', audience='something-invalid',
key_id='client-app/key01', private_key=self._private_key_pem
)
with override_settings(**self.test_settings):
response = self.client.get(reverse('expected'),
HTTP_AUTHORIZATION=b'Bearer ' + token)
self.assertContains(response, 'Unauthorized: Invalid token',
status_code=401)
def test_request_with_invalid_token_is_rejected(self):
with override_settings(**self.test_settings):
response = self.client.get(
reverse('expected'),
HTTP_AUTHORIZATION=b'Bearer notavalidtoken')
self.assertContains(response, 'Unauthorized: Invalid token',
status_code=401)
def test_request_without_token_is_rejected(self):
with override_settings(**self.test_settings):
response = self.client.get(reverse('expected'))
self.assertContains(response, 'Unauthorized',
status_code=401)
def test_request_with_invalid_issuer_is_rejected(self):
retriever = get_static_retriever_class({
'something-invalid/key01': self._public_key_pem
})
token = create_token(
issuer='something-invalid', audience='server-app',
key_id='something-invalid/key01', private_key=self._private_key_pem
)
with override_settings(ASAP_KEY_RETRIEVER_CLASS=retriever):
response = self.client.get(reverse('expected'),
HTTP_AUTHORIZATION=b'Bearer ' + token)
self.assertContains(response, 'Forbidden: Invalid token issuer',
status_code=403)
def test_request_non_decorated_issuer_is_rejected(self):
token = create_token(
issuer='client-app', audience='server-app',
key_id='client-app/key01', private_key=self._private_key_pem
)
with override_settings(**self.test_settings):
response = self.client.get(reverse('decorated'),
HTTP_AUTHORIZATION=b'Bearer ' + token)
self.assertContains(response, 'Forbidden: Invalid token issuer',
status_code=403)
def test_request_decorated_issuer_is_allowed(self):
retriever = get_static_retriever_class({
'whitelist/key01': self._public_key_pem
})
token = create_token(
issuer='whitelist', audience='server-app',
key_id='whitelist/key01', private_key=self._private_key_pem
)
with override_settings(ASAP_KEY_RETRIEVER_CLASS=retriever):
response = self.client.get(reverse('decorated'),
HTTP_AUTHORIZATION=b'Bearer ' + token)
self.assertContains(response, 'Only the right issuer is allowed.')
def test_request_using_settings_only_is_allowed(self):
token = create_token(
issuer='client-app', audience='server-app',
key_id='client-app/key01', private_key=self._private_key_pem
)
with override_settings(**self.test_settings):
response = self.client.get(reverse('settings'),
HTTP_AUTHORIZATION=b'Bearer ' + token)
self.assertContains(response, 'Any settings issuer is allowed.')
def test_request_subject_does_not_need_to_match_issuer(self):
token = create_token(
issuer='client-app', audience='server-app',
key_id='client-app/key01', private_key=self._private_key_pem,
subject='not-client-app',
)
with override_settings(**self.test_settings):
response = self.client.get(
reverse('subject_does_not_need_to_match_issuer'),
HTTP_AUTHORIZATION=b'Bearer ' + token)
self.assertContains(response, 'Subject does not need to match issuer.')
def test_request_subject_does_need_to_match_issuer_override_settings(self):
""" tests that the with_asap decorator can override the
ASAP_SUBJECT_SHOULD_MATCH_ISSUER setting.
"""
token = create_token(
issuer='client-app', audience='server-app',
key_id='client-app/key01', private_key=self._private_key_pem,
subject='not-client-app',
)
with override_settings(**dict(
self.test_settings, ASAP_SUBJECT_SHOULD_MATCH_ISSUER=False)):
response = self.client.get(
reverse('subject_does_need_to_match_issuer'),
HTTP_AUTHORIZATION=b'Bearer ' + token)
self.assertContains(
response,
'Unauthorized: Subject and Issuer do not match',
status_code=401
)
def test_request_subject_does_not_need_to_match_issuer_from_settings(self):
token = create_token(
issuer='client-app', audience='server-app',
key_id='client-app/key01', private_key=self._private_key_pem,
subject='not-client-app',
)
with override_settings(**dict(
self.test_settings, ASAP_SUBJECT_SHOULD_MATCH_ISSUER=False)):
response = self.client.get(
reverse('subject_does_not_need_to_match_issuer_from_settings'),
HTTP_AUTHORIZATION=b'Bearer ' + token)
self.assertContains(
response, 'Subject does not need to match issuer (settings).')
| |
# ------------- IMPORTS
import os, glob
import subprocess
import datetime
import sys
import time
import RPi.GPIO as GPIO
import picamera
import config
from PIL import Image
# -------------- set GPIO-Input
GPIO.setmode(GPIO.BCM)
GPIO.setup(24, GPIO.IN, pull_up_down=GPIO.PUD_UP) # start taken pics
GPIO.setup(25, GPIO.IN, pull_up_down=GPIO.PUD_UP) # exit program
# -------------- functions
def generate_collage(files):
images = map(Image.open, files)
widths, heights = zip(*(i.size for i in images))
total_width = sum(widths)
max_height = max(heights)
margin = config.COLLAGE_MARGIN
if (config.NUMBER_OF_PICTURES == 2):
new_im = Image.new('RGB', (total_width + margin, max_height), (255,255,255))
x_offset = 0
for im in images:
new_im.paste(im, (x_offset,0))
x_offset = x_offset + im.size[0] + margin
# save & display
display_image(save_image(new_im))
if (config.NUMBER_OF_PICTURES == 3):
image_width = total_width/3*2 + margin
image_height = max_height*2 + margin
new_im = Image.new('RGB', (image_width, image_height), (255,255,255))
x_offset = 0
y_offset = 0
counter = 1
for im in images:
if (counter == 3):
x_offset = (image_width / 2) - (im.size[0] / 2)
y_offset = max_height + margin
new_im.paste(im, (x_offset, y_offset))
x_offset = x_offset + im.size[0] + margin
counter = counter + 1
# save & display
display_image(save_image(new_im))
def save_taken_pics_to_usb(pictureList):
countList = 0
while (countList < len(pictureList)):
currentImage = Image.open(pictureList[countList])
newUsbImage = Image.new("RGB", (currentImage.size[0], currentImage.size[1]), "white")
newUsbImage.paste(currentImage, (0,0))
timestampAsString = datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d_%H-%M-%S')
usbFileName = config.USB_PATH + '' + str(timestampAsString) + '.jpg'
newUsbImage.save(usbFileName)
countList = countList + 1
#returns filename collage
def save_image(new_im):
timestampAsString = datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d_%H-%M-%S')
if (config.SAVE_TO_GALLERY):
#webserver gallery enabled = true -> save to gallery
collageFile = config.GALLERY_DIRECTORY + 'collage_' + str(timestampAsString) + '.jpg'
else:
#else -> save to local pictures directory
collageFile = 'pictures/collage_' + str(timestampAsString) + '.jpg'
new_im.save(collageFile)
if (config.USB_PATH != ''):
usbFile = config.USB_PATH + 'collage_' + str(timestampAsString) + '.jpg'
new_im.save(usbFile)
return collageFile
#displays one pic
def display_image(dis_im):
viewer = subprocess.Popen(['feh', '--fullscreen', dis_im])
time.sleep(3);
viewer.terminate()
viewer.kill()
#displays a list of pics
def display_taken_pics(takenPics):
pictureList = []
counter = 0
while (counter < len(takenPics)):
pictureList.append(subprocess.Popen(['feh', '--fullscreen', takenPics[counter]]))
time.sleep(3)
counter = counter + 1
counter2 = 0
while (counter2 < len(pictureList)):
pictureList[counter2].terminate()
pictureList[counter2].kill()
counter2 = counter2 + 1
# -------------- script
print('Welcome to raspberry-pi-photo-booth')
# open background image
viewer = subprocess.Popen(['feh', '--fullscreen', config.BACKGROUND_IMAGE])
# wait for input
while True:
# ----- take normal pics
if (GPIO.input(24) == False):
# get pi camera
camera = picamera.PiCamera()
takenPics = []
try:
camera.resolution = (config.RATIO_X, config.RATIO_Y)
camera.start_preview()
#take x pics
countPictures = 0
while countPictures < config.NUMBER_OF_PICTURES:
countPictures += 1
#show countdown
countSeconds = config.INTERVAL_IN_SECONDS + 1
while countSeconds > 1:
countSeconds -= 1
camera.annotate_text = str(countSeconds)
time.sleep(1)
#get current time as file name
timestamp = time.time()
timestampAsString = datetime.datetime.fromtimestamp(timestamp).strftime('%Y-%m-%d_%H-%M-%S')
#save pic with timestamp overlay
if config.TIMESTAMP == True:
camera.annotate_text = str(timestampAsString)
else:
camera.annotate_text = ' '
#create file path
if (config.SAVE_TO_GALLERY):
#webserver gallery enabled = true -> save to gallery
newFilePath = config.GALLERY_DIRECTORY + '' + str(timestampAsString) + '.jpg'
else:
#else -> save to local pictures directory
newFilePath = 'pictures/' + str(timestampAsString) + '.jpg'
#take picture
camera.capture(newFilePath)
takenPics.append(newFilePath)
#stop preview
camera.stop_preview()
finally:
camera.close()
#save pics to usb
if (config.USB_PATH != ''):
save_taken_pics_to_usb(takenPics)
#display taken pictures
display_taken_pics(takenPics)
#create collage
#if(config.NUMBER_OF_PICTURES > 1):
#generate_collage(takenPics)
#else:
#display_image(takenPics[0])
# ----- exit program
if (GPIO.input(25) == False):
print('Bye bye!')
viewer.terminate()
viewer.kill()
exit()
time.sleep(0.1);
| |
# Copyright (c) 2013, Luis Fernandes and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
import os, json
from frappe import _
from frappe.model.document import Document
from jasper_erpnext_report.utils.file import get_image_name, JasperXmlReport,\
get_jasper_path
from jasper_erpnext_report.utils.jasper_file_jrxml import check_root_exists, get_jrxml_root
from jasper_erpnext_report.utils.utils import check_queryString_param, jaspersession_set_value, jaspersession_get_value,\
check_jasper_perm
from frappe.utils import cint
from jasper_erpnext_report.core.JasperRoot import JasperRoot
from jasper_erpnext_report.utils.cache import redis_transation
"""
HOOKS:
on_jasper_params_ids(data=None, params=None);
on_jasper_params(data=None, params=None);
jasper_before_run_report(data=None, docdata=None);
"""
#param_type_java_conversion = {"BigDecimal": "Float", "Integer": "Int", "String": "String", "Double": "Float", "Float": "Float", "Long": "Int", "Character":"String", "Boolean": "String", "Date": "Date"}
class JasperReports(Document):
def on_update(self, method=None):
#if we are importing docs from jasperserver
if not frappe.flags.in_import:
#r_filters=["`tabJasper Reports`.jasper_doctype is NULL", "`tabJasper Reports`.report is NULL"]
r_filters = {"jasper_doctype": "", "report": ""}
jr = JasperRoot()
data = jr._get_reports_list(filters_report=r_filters)
#report_list_dirt_doc is not called from here
cached = redis_transation(data, "report_list_all")
if cached and data:
jaspersession_set_value("report_list_dirt_all", False)
jaspersession_set_value("report_list_dirt_doc", True)
elif data:
#redis not cache
jaspersession_set_value("report_list_dirt_doc", True)
jaspersession_set_value("report_list_dirt_all", True)
if check_root_exists(self.doctype, self.name):
return
#if jrxml file was removed then remove all associated images and params
if self.jasper_report_origin.lower() == "localserver":
frappe.db.sql("""delete from `tab%s` where %s=%s """ % ("Jasper Parameter", "parent", '%s'),(self.name), auto_commit=1)
self.query = ""
def before_save(self, method=None):
self.jasper_doctype = None if not frappe.utils.strip(self.jasper_doctype) else self.jasper_doctype
self.report = None if not frappe.utils.strip(self.report) else self.report
if not self.jasper_param_message:
try:
self.jasper_param_message = frappe.db.get_values_from_single(["jasper_param_message"], None, "JasperServerConfig")[0][0].format(report=self.jasper_report_name, user=frappe.local.session['user'])
except:
self.jasper_param_message = ""
#check if Jasper is configurated
use_jasper_server = frappe.db.get_values_from_single(["use_jasper_server"], None, "JasperServerConfig")[0][0]
if use_jasper_server == "None":
frappe.throw(_("You need to configure Jasper first."))
return
if check_root_exists(self.doctype, self.name):
rootquery = ''
self.query = ''
jrxml_path = _get_jrxml_root_path(self)
xmldoc = JasperXmlReport(jrxml_path)
xmlname = check_if_xPath_exists(xmldoc)
if xmlname and not check_for_report_xPath(xmldoc, xmlname, self):
frappe.throw(_("Import %s for report %s first." % (xmlname + ".xml",self.jasper_report_name)))
subreportquerys = getSubReportsQuery(xmldoc, self)
subquery = ''
for subreportquery in subreportquerys:
subquery = subquery + subreportquery.get("name") + ":\n" + subreportquery.get("query") + "\n"
if xmldoc.queryString or subquery:
self.query = xmldoc.name + ":\n" + xmldoc.queryString + "\n" + subquery
#give feedback to the user shown related params
params = xmldoc.get_params_from_xml()
#get total number of parameters to concatenate with name of parameter
is_copy = "Other"
action_type = "Ask"
for param in params:
pname = param.xpath('./@name')
pclass = param.xpath('./@class')
ptype = pclass[0].split(".")
c = len(ptype) - 1
if check_param_exists(self, pname[0]):
break
if check_queryString_param(xmldoc.queryString, pname[0]):
is_copy = "Is for where clause"
action_type = "Automatic"
#param_type = param_type_java_conversion.get(ptype[c])
self.append("jasper_parameters", {"__islocal": True, "jasper_param_name":pname[0], "jasper_param_type": ptype[c],#.lower().capitalize()
"jasper_param_action": action_type, "param_expression":"In", "is_copy":is_copy, "name": self.name + "_" + pname[0]})
self.query = rootquery + self.query
return
#if jrxml file was removed then prepare to remove all associated images and params given feedback to the user
if self.jasper_report_origin.lower() == "localserver":
self.jasper_parameters = []
return
@property
def jrxml_root_path(self):
root_path = None
docs = frappe.get_all("File", fields=["file_name", "file_url"], filters={"attached_to_name": self.name, "attached_to_doctype": self.doctype,
"attached_to_report_name":"root"})
try:
root_path = docs[0].file_url
except:
frappe.msgprint(_("The report is missing."), raise_exception=True)
return root_path
@frappe.whitelist()
def get_attachments(dn):
if not dn:
return
attachments = []
for f in frappe.db.sql("""select name, file_name, file_url, attached_to_report_name from
`tabFile` where attached_to_name=%s and attached_to_doctype=%s and is_folder=0""",
(dn, "Jasper Reports"), as_dict=True):
attachments.append({
'name': f.name,
'file_url': "".join(f.file_url.split("/files")[-1]),#f.file_url,
'file_name': f.file_name,
'parent_report': f.attached_to_report_name
})
return attachments
def getSubReportsQuery(xmlroot, doc):
subquery = []
check_for_report_images(xmlroot, doc)
jasper_path = get_jasper_path(doc.jasper_all_sites_report)
subreports = xmlroot.subreports
for path_name in subreports:
report_path = path_name[:-7] + ".jrxml"
file_path = frappe.utils.get_path(doc.name, report_path, base=jasper_path)
try:
xmldoc = JasperXmlReport(file_path)
subquery.append({"name": xmldoc.name, "query": xmldoc.queryString})
subquery.extend(xmldoc.datasets)
#check if the subreport has subreports too
subquery.extend(getSubReportsQuery(xmldoc, doc))
except:
frappe.msgprint(_("Subreport %s is missing." % (report_path)), raise_exception=True)
return subquery
def check_for_report_images(xmldoc, doc):
image_names_not_found = []
report_images_count = 0
images = xmldoc.get_images_from_xml()
if not images:
return
parent = xmldoc.getProperty("jasperId")
docs = frappe.get_all("File", fields=["file_name", "file_url"], filters={"attached_to_name": doc.name, "attached_to_doctype": doc.doctype,
"attached_to_report_name":parent})
for image in images:
found = False
try:
fimage = json.loads(image.text)
except:
fimage = image.text
report_image_name = get_image_name(fimage)
for f in docs:
list_img_name = f.file_url.split("compiled/",1)
if len(list_img_name) > 1:
img = list_img_name[1]
else:
img = list_img_name[0]
if report_image_name == img:
found = True
report_images_count = report_images_count + 1
break
if not found:
image_names_not_found.append(report_image_name)
if not report_images_count == len(images):
frappe.throw(_("Import %s image for report %s first." % (",".join(image_names_not_found),doc.jasper_report_name)))
def check_for_report_xPath(xmldoc, xmlname, doc):
xmlname = xmlname + ".xml"
parent = xmldoc.getProperty("jasperId")
docs = frappe.get_all("File", fields=["file_name", "file_url"], filters={"attached_to_name": doc.name, "attached_to_doctype": doc.doctype,
"attached_to_report_name":parent})
for f in docs:
list_file_name = f.file_url.split("compiled/",1)
if len(list_file_name) > 1:
file_name = list_file_name[1]
else:
file_name = list_file_name[0]
if xmlname == file_name:
return True
"""
Called from db_query.py method: def get_permission_query_conditions()
In this case is for check jasper permission on the documents to show to the client and the associated count
"""
def get_permission_query_conditions(user):
if not user: user = frappe.local.session['user']
if user=="Administrator":
return ""
if ignore_jasper_perm():
return ""
return """(exists(select * from `tabJasper PermRole` where `tabJasper PermRole`.parent=`tabJasper Reports`.`name` and
`tabJasper PermRole`.jasper_role in ('%(roles)s') and `tabJasper PermRole`.jasper_can_read = 1))
""" % {
"roles": "', '".join([frappe.db.escape(r) for r in frappe.get_roles(user)])
}
"""
Called from frappe.has_permission as controller
Verify which docs pass jasper permissions
"""
def has_jasper_permission(doc, ptype, user):
perm = True
if not ignore_jasper_perm():
perm = check_jasper_perm(doc.jasper_roles, ptype, user)
return perm
def ignore_jasper_perm():
ignore_perm = jaspersession_get_value("jasper_ignore_perm_roles")
if ignore_perm is None:
ignore_perm = frappe.db.get_single_value("JasperServerConfig", "jasper_ignore_perm_roles")
jaspersession_set_value("jasper_ignore_perm_roles", ignore_perm)
if not cint(ignore_perm):
return False
return True
def _get_jrxml_root_path(doc):
jasper_path = get_jasper_path(doc.jasper_all_sites_report)
root_jrxml_name, root_jrxml_url = get_jrxml_root(doc.doctype, doc.name)
file_path = os.path.join(jasper_path, doc.name, root_jrxml_name)
return file_path
#jasper docs have the same params spread so don't let them repeat in doc parameter
def check_param_exists(doc, pname):
exist = False
idx_pname = pname.rfind(":")
if idx_pname != -1:
pname = pname[0:idx_pname]
for p in doc.jasper_parameters:
if p.jasper_param_name == pname:
exist = True
break
return exist
def check_if_xPath_exists(xmldoc):
return xmldoc.getProperty("XMLNAME")
| |
from __future__ import unicode_literals, division, absolute_import
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
import collections
import logging
import os
import sys
import tempfile
from flexget import plugin
from flexget.event import event
log = logging.getLogger('subtitles')
try:
from subliminal.extensions import provider_manager
PROVIDERS = provider_manager.names()
except ImportError:
PROVIDERS = ['opensubtitles', 'thesubdb', 'podnapisi', 'addic7ed', 'tvsubtitles']
AUTHENTICATION_SCHEMA = dict((provider, {'type': 'object'}) for provider in PROVIDERS)
class PluginSubliminal(object):
"""
Search and download subtitles using Subliminal by Antoine Bertin
(https://pypi.python.org/pypi/subliminal).
Example (complete task)::
subs:
find:
path:
- d:\media\incoming
regexp: '.*\.(avi|mkv|mp4)$'
recursive: yes
accept_all: yes
subliminal:
languages:
- ita
alternatives:
- eng
exact_match: no
providers: addic7ed, opensubtitles
single: no
directory: /disk/subtitles
hearing_impaired: yes
authentication:
addic7ed:
username: myuser
passsword: mypassword
"""
schema = {
'type': 'object',
'properties': {
'languages': {'type': 'array', 'items': {'type': 'string'}, 'minItems': 1},
'alternatives': {'type': 'array', 'items': {'type': 'string'}},
'exact_match': {'type': 'boolean', 'default': True},
'providers': {'type': 'array', 'items': {'type': 'string', 'enum': PROVIDERS}},
'single': {'type': 'boolean', 'default': True},
'directory': {'type:': 'string'},
'hearing_impaired': {'type': 'boolean', 'default': False},
'authentication': {'type': 'object', 'properties': AUTHENTICATION_SCHEMA},
},
'required': ['languages'],
'additionalProperties': False,
}
def on_task_start(self, task, config):
if list(sys.version_info) < [2, 7]:
raise plugin.DependencyError(
'subliminal', 'Python 2.7', 'Subliminal plugin requires python 2.7.'
)
try:
import babelfish # noqa
except ImportError as e:
log.debug('Error importing Babelfish: %s', e)
raise plugin.DependencyError(
'subliminal', 'babelfish', 'Babelfish module required. ImportError: %s' % e
)
try:
import subliminal # noqa
except ImportError as e:
log.debug('Error importing Subliminal: %s', e)
raise plugin.DependencyError(
'subliminal', 'subliminal', 'Subliminal module required. ImportError: %s' % e
)
def on_task_output(self, task, config):
"""
Configuration::
subliminal:
languages: List of languages (as IETF codes) in order of preference. At least one is required.
alternatives: List of second-choice languages; subs will be downloaded but entries rejected.
exact_match: Use file hash only to search for subs, otherwise Subliminal will try to guess by filename.
providers: List of providers from where to download subtitles.
single: Download subtitles in single mode (no language code added to subtitle filename).
directory: Path to directory where to save the subtitles, default is next to the video.
hearing_impaired: Prefer subtitles for the hearing impaired when available
authentication: >
Dictionary of configuration options for different providers.
Keys correspond to provider names, and values are dictionaries, usually specifying `username` and
`password`.
"""
if not task.accepted:
log.debug('nothing accepted, aborting')
return
from babelfish import Language
from dogpile.cache.exception import RegionAlreadyConfigured
import subliminal
from subliminal import scan_video, save_subtitles
from subliminal.cli import MutexLock
from subliminal.core import (
ARCHIVE_EXTENSIONS,
scan_archive,
refine,
search_external_subtitles,
)
from subliminal.score import episode_scores, movie_scores
from subliminal.video import VIDEO_EXTENSIONS
try:
subliminal.region.configure(
'dogpile.cache.dbm',
arguments={
'filename': os.path.join(tempfile.gettempdir(), 'cachefile.dbm'),
'lock_factory': MutexLock,
},
)
except RegionAlreadyConfigured:
pass
# Let subliminal be more verbose if our logger is set to DEBUG
if log.isEnabledFor(logging.DEBUG):
logging.getLogger("subliminal").setLevel(logging.INFO)
else:
logging.getLogger("subliminal").setLevel(logging.CRITICAL)
logging.getLogger("dogpile").setLevel(logging.CRITICAL)
logging.getLogger("enzyme").setLevel(logging.WARNING)
try:
languages = set([Language.fromietf(s) for s in config.get('languages', [])])
alternative_languages = set(
[Language.fromietf(s) for s in config.get('alternatives', [])]
)
except ValueError as e:
raise plugin.PluginError(e)
# keep all downloaded subtitles and save to disk when done (no need to write every time)
downloaded_subtitles = collections.defaultdict(list)
providers_list = config.get('providers', None)
provider_configs = config.get('authentication', None)
# test if only one language was provided, if so we will download in single mode
# (aka no language code added to subtitle filename)
# unless we are forced not to by configuration
# if we pass 'yes' for single in configuration but choose more than one language
# we ignore the configuration and add the language code to the
# potentially downloaded files
single_mode = config.get('single', '') and len(languages | alternative_languages) <= 1
hearing_impaired = config.get('hearing_impaired', False)
with subliminal.core.ProviderPool(
providers=providers_list, provider_configs=provider_configs
) as provider_pool:
for entry in task.accepted:
if 'location' not in entry:
log.warning('Cannot act on entries that do not represent a local file.')
continue
if not os.path.exists(entry['location']):
entry.fail('file not found: %s' % entry['location'])
continue
if '$RECYCLE.BIN' in entry['location']: # ignore deleted files in Windows shares
continue
try:
entry_languages = set(entry.get('subtitle_languages', [])) or languages
if entry['location'].endswith(VIDEO_EXTENSIONS):
video = scan_video(entry['location'])
elif entry['location'].endswith(ARCHIVE_EXTENSIONS):
video = scan_archive(entry['location'])
else:
entry.reject(
'File extension is not a supported video or archive extension'
)
continue
# use metadata refiner to get mkv metadata
refiner = ('metadata',)
refine(video, episode_refiners=refiner, movie_refiners=refiner)
existing_subtitles = set(search_external_subtitles(entry['location']).values())
video.subtitle_languages |= existing_subtitles
if isinstance(video, subliminal.Episode):
title = video.series
hash_scores = episode_scores['hash']
else:
title = video.title
hash_scores = movie_scores['hash']
log.info('Name computed for %s was %s', entry['location'], title)
msc = hash_scores if config['exact_match'] else 0
if entry_languages.issubset(video.subtitle_languages):
log.debug('All preferred languages already exist for "%s"', entry['title'])
entry['subtitles_missing'] = set()
continue # subs for preferred lang(s) already exists
else:
# Gather the subtitles for the alternative languages too, to avoid needing to search the sites
# again. They'll just be ignored if the main languages are found.
all_subtitles = provider_pool.list_subtitles(
video, entry_languages | alternative_languages
)
try:
subtitles = provider_pool.download_best_subtitles(
all_subtitles,
video,
entry_languages,
min_score=msc,
hearing_impaired=hearing_impaired,
)
except TypeError as e:
log.error(
'Downloading subtitles failed due to a bug in subliminal. Please see'
'https://github.com/Diaoul/subliminal/issues/921. Error: %s',
e,
)
subtitles = []
if subtitles:
downloaded_subtitles[video].extend(subtitles)
log.info('Subtitles found for %s', entry['location'])
else:
# only try to download for alternatives that aren't already downloaded
subtitles = provider_pool.download_best_subtitles(
all_subtitles,
video,
alternative_languages,
min_score=msc,
hearing_impaired=hearing_impaired,
)
if subtitles:
downloaded_subtitles[video].extend(subtitles)
entry.reject('subtitles found for a second-choice language.')
else:
entry.reject('cannot find any subtitles for now.')
downloaded_languages = set(
[Language.fromietf(str(l.language)) for l in subtitles]
)
if entry_languages:
entry['subtitles_missing'] = entry_languages - downloaded_languages
if len(entry['subtitles_missing']) > 0:
entry.reject('Subtitles for all primary languages not found')
except ValueError as e:
log.error('subliminal error: %s', e)
entry.fail()
if downloaded_subtitles:
if task.options.test:
log.verbose('Test mode. Found subtitles:')
# save subtitles to disk
for video, subtitle in downloaded_subtitles.items():
if subtitle:
_directory = config.get('directory')
if _directory:
_directory = os.path.expanduser(_directory)
if task.options.test:
log.verbose(
' FOUND LANGUAGES %s for %s',
[str(l.language) for l in subtitle],
video.name,
)
continue
save_subtitles(video, subtitle, single=single_mode, directory=_directory)
@event('plugin.register')
def register_plugin():
plugin.register(PluginSubliminal, 'subliminal', api_ver=2)
| |
"""
Functions to compute some tensor-related quantities usual in continuum mechanics.
"""
import numpy as nm
import numpy.linalg as nla
from sfepy.base.base import assert_, Struct
from sfepy.linalg \
import apply_to_sequence, dot_sequences, make_axis_rotation_matrix
def dim2sym(dim):
"""
Given the space dimension, return the symmetric storage size.
"""
return (dim + 1) * dim // 2
def sym2dim(sym):
"""
Given the symmetric storage size, return the space dimension.
Notes
-----
This function works for any space dimension.
"""
val = int(-0.5 + nm.sqrt(2 * sym + 0.25))
assert_(dim2sym(val) == sym)
return val
def get_full_indices(dim):
"""
The indices for converting the symmetric storage to the full storage.
"""
return {
2 : [[0, 2], [2, 1]],
3 : [[0, 3, 4], [3, 1, 5], [4, 5, 2]],
}[dim]
def get_sym_indices(dim):
"""
The indices for converting the full storage to the symmetric storage.
"""
return {
2 : [0, 3, 1],
3 : [0, 4, 8, 1, 2, 5],
}[dim]
def get_non_diagonal_indices(dim):
"""
The non_diagonal indices for the full vector storage.
"""
return {
2 : ([1], [2]),
3 : ([1, 2, 5], [3, 6, 7]),
}[dim]
def get_trace(tensor, sym_storage=True):
"""
The trace of a tensor.
"""
if sym_storage:
dim = sym2dim(tensor.shape[1])
trace = nm.sum(tensor[:,:dim], axis=1)
else:
trace = nm.trace(tensor, axis1=1, axis2=2)
return trace
def get_volumetric_tensor(tensor, sym_storage=True):
"""
The volumetric part of a tensor.
"""
dim = tensor.shape[1]
if sym_storage:
dim = sym2dim(dim)
trace = get_trace(tensor, sym_storage=sym_storage)
val = trace / float(dim)
if sym_storage:
vt = nm.zeros_like(tensor)
vt[:,:dim] = val[:,None]
else:
vt = val[:,None,None] * nm.eye(dim, dtype=nm.float64)
return vt
def get_deviator(tensor, sym_storage=True):
"""
The deviatoric part (deviator) of a tensor.
"""
vt = get_volumetric_tensor(tensor, sym_storage=sym_storage)
dev = tensor - vt
return dev
def get_von_mises_stress(stress, sym_storage=True):
r"""
Given a symmetric stress tensor, compute the von Mises stress (also known
as Equivalent tensile stress).
Notes
-----
.. math::
\sigma_V = \sqrt{\frac{(\sigma_{11} - \sigma_{22})^2 +
(\sigma_{22} - \sigma_{33})^2 + (\sigma_{11} - \sigma_{33})^2 + 6
(\sigma_{12}^2 + \sigma_{13}^2 + \sigma_{23}^2)}{2}}
"""
dim = stress.shape[1]
if sym_storage:
dim = sym2dim(dim)
if dim == 2:
if sym_storage:
s11 = stress[:,0]
s22 = stress[:,1]
s12 = stress[:,2]
else:
s11 = stress[:,0,0]
s22 = stress[:,1,1]
s12 = stress[:,0,1]
vms = nm.sqrt(s11**2 - s11*s22 + s22**2 + 3*s12**2)[:,None]
else:
if sym_storage:
s11 = stress[:,0]
s22 = stress[:,1]
s33 = stress[:,2]
s12 = stress[:,3]
s13 = stress[:,4]
s23 = stress[:,5]
else:
s11 = stress[:,0,0]
s22 = stress[:,1,1]
s33 = stress[:,2,2]
s12 = stress[:,0,1]
s13 = stress[:,0,2]
s23 = stress[:,1,2]
vms = nm.sqrt(0.5 * ((s11 - s22)**2 + (s22 - s33)**2 + (s11 - s33)**2
+ 6.0 * (s12**2 + s13**2 + s23**2)))[:,None]
return vms
def get_t4_from_t2s(t2s):
"""
Get the full 4D tensor with major/minor symmetries from its 2D matrix
representation.
Parameters
----------
t2s : array
The symmetrically-stored tensor of shape (S, S), where S it the
symmetric storage size.
Returns
-------
t4 : array
The full 4D tensor of shape (D, D, D, D), where D is the space
dimension.
"""
dim = sym2dim(t2s.shape[0])
iif = get_full_indices(dim)
t4 = t2s[:, iif][iif, ...]
return t4
def prepare_cylindrical_transform(coors, origin, mode='axes'):
"""
Prepare matrices for transforming tensors into cylindrical coordinates with
the axis 'z' in a given origin.
Parameters
----------
coors : array
The Cartesian coordinates.
origin : array of length 3
The origin.
mode : 'axes' or 'data'
In 'axes' (default) mode the matrix transforms data to different
coordinate system, while in 'data' mode the matrix transforms
the data in the same coordinate system and is transpose of the
matrix in the 'axes' mode.
Returns
-------
mtx : array
The array of transformation matrices for each coordinate in `coors`.
"""
assert_(mode in ['axes', 'data'])
x, y = coors[:,0] - origin[0], coors[:,1] - origin[1]
theta = nm.arctan2(y, x)
if mode == 'data':
theta = -theta
mtx = nm.zeros((coors.shape[0], 3, 3), dtype=nm.float64)
for ii, th in enumerate(theta):
mtx[ii] = make_axis_rotation_matrix([0.0, 0.0, 1.0], th)
return mtx
def transform_data(data, coors=None, mode='cylindrical', mtx=None):
r"""
Transform vector or tensor data components between orthogonal
coordinate systems in 3D using transformation matrix :math:`M`, that
should express rotation of the original coordinate system to the new
system denoted by :math:`\bullet'` below.
For vectors:
.. math::
\ul{v}' = M \cdot \ul{v}
For second order tensors:
.. math::
\ull{t}' = M \cdot \ull{t} \cdot M^T
\mbox{or}
t_{ij}' = M_{ip} M_{jq} t_{pq}
For fourth order tensors:
.. math::
t_{ijkl}' = M_{ip} M_{jq} M_{kr} M_{ls} t_{pqrs}
Parameters
----------
data : array, shape (num, n_r) or (num, n_r, n_c)
The vectors (`n_r` is 3) or tensors (symmetric storage, `n_r` is 6,
`n_c`, if available, is 1 or 6) to be transformed.
coors : array
The Cartesian coordinates of the data. Not needed when `mtx` argument
is given.
mode : one of ['cylindrical']
The requested coordinate system. Not needed when `mtx` argument
is given.
mtx : array
The array of transformation matrices :math:`M` for each data row.
Returns
-------
new_data : array
The transformed data.
"""
if (coors is None) and (mtx is None):
raise ValueError('one of (coors, mtx) arguments must be set!')
if mtx is None:
if mode == 'cylindrical':
mtx = prepare_cylindrical_transform(coors, [0.0, 0.0, 0.0])
else:
raise ValueError('transformation mode %s is not supported!' % mode)
shape = data.shape
if shape[0] != mtx.shape[0]:
raise ValueError('incompatible numbers of points! (data: %d, mtx: %d)'
% (shape[0], mtx.shape[0]))
if shape[1] == 3: # Vectors.
new_data = dot_sequences(mtx, data)
elif shape[1] == 6: # Symmetric tensors.
iif = get_full_indices(3)
iis = get_sym_indices(3)
if ((data.ndim == 2)
or ((data.ndim == 3) and (shape[2] == 1))): # Second order.
if data.ndim == 3:
aux = data[:, iif, 0]
else:
aux = data[:, iif]
aux2 = dot_sequences(dot_sequences(mtx, aux, 'AB'), mtx, 'ABT')
assert nm.allclose(aux2[0],
nm.dot(nm.dot(mtx[0], aux[0]), mtx[0].T))
aux3 = aux2.reshape((aux2.shape[0], 9))
new_data = aux3[:, iis]
if data.ndim == 3:
new_data = new_data[..., None]
elif (data.ndim == 3) and (shape[2] == 6): # Fourth order.
# Note: nm.einsum() is much slower than dot_sequences().
df = data[:, iif][..., iif]
tdf = nm.einsum('apqrs,aip,ajq,akr,als->aijkl',
df, mtx, mtx, mtx, mtx)
tdf2 = tdf.reshape(tdf.shape[0], 9, 9)
new_data = tdf2[:, :, iis][:, iis]
else:
raise ValueError('unsupported data shape! (%s)' % str(shape))
else:
raise ValueError('unsupported data shape! (%s)' % str(shape))
assert_(new_data.shape == shape)
return new_data
class StressTransform(Struct):
"""
Encapsulates functions to convert various stress tensors in the symmetric
storage given the deformation state.
"""
def __init__(self, def_grad, jacobian=None):
r"""
Set :math:`\ull{F} = \pdiff{\ul{x}}{\ul{X}}` and optionally also
:math:`J = \det(\ull{F})`.
"""
self.def_grad = nm.asarray(def_grad, dtype=nm.float64)
self.n_el, self.n_qp, self.dim = self.def_grad.shape[:3]
self.s2f = get_full_indices(self.dim)
self.f2s = get_sym_indices(self.dim)
if jacobian is None:
self.jacobian = apply_to_sequence(self.def_grad, nla.det,
2, (1, 1))
else:
self.jacobian = nm.asarray(jacobian, dtype=nm.float64)
def _assert_symmetry(self, stress):
i1, i2 = get_non_diagonal_indices(self.dim)
assert_(nm.allclose(stress[:,:,i1], stress[:,:,i2]))
def get_cauchy_from_2pk(self, stress_in):
"""
Get the Cauchy stress given the second Piola-Kirchhoff stress.
.. math::
\sigma_{ij} = J^{-1} F_{ik} S_{kl} F_{jl}
"""
stress_in = nm.asarray(stress_in, dtype=nm.float64)
stress_in_full = stress_in[:,:,self.s2f,0]
val_il = dot_sequences(self.def_grad, stress_in_full)
val_ij = dot_sequences(val_il, self.def_grad, mode='ABT')
stress_out_full = val_ij / self.jacobian
sh = stress_out_full.shape
stress_out_full.shape = (sh[0], sh[1], sh[2] * sh[3])
self._assert_symmetry(stress_out_full)
stress_out = nm.empty_like(stress_in)
stress_out[...,0] = stress_out_full[:,:,self.f2s]
return stress_out
| |
###############################################################################
# Name: perspectives.py #
# Purpose: Editra's view management service #
# Author: Cody Precord <cprecord@editra.org> #
# Copyright: (c) 2007 Cody Precord <staff@editra.org> #
# License: wxWindows License #
###############################################################################
"""
Provides a perspective management class for saving and loading custom
perspectives in the MainWindow.
"""
__author__ = "Cody Precord <cprecord@editra.org>"
__cvsid__ = "$Id: perspective.py 70229 2012-01-01 01:27:10Z CJP $"
__revision__ = "$Revision: 70229 $"
#--------------------------------------------------------------------------#
# Dependencies
import os
import wx
import wx.aui as aui
# Editra Imports
import util
import ed_menu
import ed_fmgr
from profiler import Profile_Get, Profile_Set
#--------------------------------------------------------------------------#
# Globals
AUTO_PERSPECTIVE = u'Automatic'
DATA_FILE = u'perspectives'
LAST_KEY = u'**LASTVIEW**'
# ID's
ID_SAVE_PERSPECTIVE = wx.NewId()
ID_DELETE_PERSPECTIVE = wx.NewId()
ID_AUTO_PERSPECTIVE = wx.NewId()
# Aliases
_ = wx.GetTranslation
#--------------------------------------------------------------------------#
class PerspectiveManager(object):
"""Creates a perspective manager for the given aui managed window.
It supports saving and loading of on disk perspectives as created by
calling SavePerspective from the AuiManager. Mixin class for a wx.Frame.
"""
def __init__(self, base):
"""Initializes the perspective manager. The auimgr parameter is
a reference to the windows AuiManager instance, base is the base
path to where perspectives should be loaded from and saved to.
@param base: path to configuration cache
"""
super(PerspectiveManager, self).__init__()
hint = aui.AUI_MGR_TRANSPARENT_HINT
if wx.Platform == '__WXGTK__':
# Use venetian blinds style as transparent can cause crashes
# on linux when desktop compositing is used.
hint = aui.AUI_MGR_VENETIAN_BLINDS_HINT
self._mgr = ed_fmgr.EdFrameManager(flags=aui.AUI_MGR_DEFAULT |
aui.AUI_MGR_TRANSPARENT_DRAG |
hint |
aui.AUI_MGR_ALLOW_ACTIVE_PANE)
self._mgr.SetManagedWindow(self)
# Attributes
self._ids = list() # List of menu ids
self._base = os.path.join(base, DATA_FILE) # Path to config
self._viewset = dict() # Set of Views
self.LoadPerspectives()
self._menu = ed_menu.EdMenu() # Control menu
self._currview = Profile_Get('DEFAULT_VIEW') # Currently used view
# Setup Menu
self._menu.Append(ID_SAVE_PERSPECTIVE, _("Save Current View"),
_("Save the current window layout"))
self._menu.Append(ID_DELETE_PERSPECTIVE, _("Delete Saved View"))
self._menu.AppendSeparator()
self._menu.Append(ID_AUTO_PERSPECTIVE, _("Automatic"),
_("Automatically save/use window state from last session"),
wx.ITEM_CHECK)
self._menu.AppendSeparator()
for name in self._viewset:
self.AddPerspectiveMenuEntry(name)
# Restore the managed windows previous position preference if available.
pos = Profile_Get('WPOS', "size_tuple", False)
if Profile_Get('SET_WPOS') and pos:
# Ensure window is on screen
if not self.IsPositionOnScreen(pos):
pos = self.GetPrimaryDisplayOrigin()
self.SetPosition(pos)
# Event Handlers
self.Bind(wx.EVT_MENU, self.OnPerspectiveMenu)
#---- Properties ----#
PanelMgr = property(lambda self: self._mgr)
def AddPerspective(self, name, p_data=None):
"""Add a perspective to the view set. If the p_data parameter
is not set then the current view will be added with the given name.
@param name: name for new perspective
@keyword p_data: perspective data from auimgr
@return: bool (True == Added, False == Not Added)
"""
# Don't allow empty keys or ones that override the automatic
# settings to be added
name = name.strip()
if not len(name) or name == AUTO_PERSPECTIVE:
return False
domenu = not self.HasPerspective(name)
if p_data is None:
self._viewset[name] = self._mgr.SavePerspective()
else:
self._viewset[name] = p_data
self._currview = name
if name != AUTO_PERSPECTIVE and domenu:
self.AddPerspectiveMenuEntry(name)
self.SavePerspectives()
return True
def AddPerspectiveMenuEntry(self, name):
"""Adds an entry to list of perspectives in the menu for this manager.
@param name: name of perspective to add to menu
@return: bool (added or not)
"""
name = name.strip()
if not len(name) or name == AUTO_PERSPECTIVE:
return False
per_id = wx.NewId()
self._ids.append(per_id)
self._menu.InsertAlpha(per_id, name, _("Change view to \"%s\"") % name,
kind=wx.ITEM_CHECK, after=ID_AUTO_PERSPECTIVE)
return True
def GetFrameManager(self):
"""Returns the manager for this frame
@return: Reference to the AuiMgr of this window
"""
return self._mgr
def GetPerspectiveControls(self):
"""Returns the control menu for the manager
@return: menu of this manager
"""
return self._menu
def GetPerspective(self):
"""Returns the name of the current perspective used
@return: name of currently active perspective
"""
return self._currview
def GetPerspectiveData(self, name):
"""Returns the given named perspectives data string
@param name: name of perspective to fetch data from
"""
return self._viewset.get(name, None)
def GetPersectiveHandlers(self):
"""Gets a list of ID to UIHandlers for the perspective Menu
@return: list of [(ID, HandlerFunction)]
"""
handlers = [(m_id, self.OnUpdatePerspectiveMenu) for m_id in self._ids]
return handlers + [(ID_AUTO_PERSPECTIVE, self.OnUpdatePerspectiveMenu)]
def GetPerspectiveList(self):
"""Returns a list of all the loaded perspectives. The
returned list only provides the names of the perspectives
and not the actual data.
@return: list of all managed perspectives
"""
return sorted(self._viewset.keys())
def GetPrimaryDisplayOrigin(self):
"""Get the origin on the primary display to use as a default
window placement position.
@return: position tuple
"""
# NOTE: don't default to 0,0 otherwise on osx the frame will be
# stuck behind the menubar.
for idx in range(wx.Display.GetCount()):
disp = wx.Display(idx)
if disp.IsPrimary():
drect = disp.GetClientArea()
return drect.GetPosition() + (5, 5)
else:
return (5, 5)
def HasPerspective(self, name):
"""Returns True if there is a perspective by the given name
being managed by this manager, or False otherwise.
@param name: name of perspective to look for
@return: whether perspective is managed by this manager or not
"""
return name in self._viewset
def InitWindowAlpha(self):
"""Initialize the windows alpha setting"""
level = max(100, Profile_Get('ALPHA', default=255))
# Only set the transparency if it is not opaque
if level != 255:
self.SetTransparent(level)
def IsPositionOnScreen(self, pos):
"""Check if the given position is on any of the connected displays
@param pos: Position Tuple
@return: bool
"""
bOnScreen = False
if len(pos) == 2:
for idx in range(wx.Display.GetCount()):
disp = wx.Display(idx)
drect = disp.GetClientArea()
bOnScreen = drect.Contains(pos)
if bOnScreen:
break
return bOnScreen
def LoadPerspectives(self):
"""Loads the perspectives data into the manager. Returns
the number of perspectives that were successfully loaded.
@return: number of perspectives loaded
"""
reader = util.GetFileReader(self._base)
if reader == -1:
util.Log("[perspective][err] Failed to get " +
"file reader for %s" % self._base)
return 0
try:
for line in reader.readlines():
label, val = line.split(u"=", 1)
label = label.strip()
if not len(label):
continue
self._viewset[label] = val.strip()
reader.close()
finally:
if LAST_KEY in self._viewset:
self._currview = self._viewset[LAST_KEY]
del self._viewset[LAST_KEY]
return len(self._viewset)
def OnPerspectiveMenu(self, evt):
"""Handles menu events generated by the managers control menu.
@param evt: event that called this handler
"""
e_id = evt.GetId()
if e_id == ID_SAVE_PERSPECTIVE:
name = wx.GetTextFromUser(_("Perspective Name"), \
_("Save Perspective"))
if name:
self.AddPerspective(name, p_data=None)
self.SavePerspectives()
Profile_Set('DEFAULT_VIEW', name)
# It may make sense to update all windows to use this
# perspective at this point but it may be an unexpected
# event to happen when there is many windows open. Will
# leave this to future consideration.
for mainw in wx.GetApp().GetMainWindows():
mainw.AddPerspective(name, self._viewset[name])
elif e_id == ID_DELETE_PERSPECTIVE:
views = [ view for view in self._viewset.keys()
if view != AUTO_PERSPECTIVE ]
name = wx.GetSingleChoice(_("Perspective to Delete"),
_("Delete Perspective"), views)
if name:
self.RemovePerspective(name)
self.SavePerspectives()
for mainw in wx.GetApp().GetMainWindows():
mainw.RemovePerspective(name)
else:
pass
# Update all windows data sets
for mainw in wx.GetApp().GetMainWindows():
mainw.LoadPerspectives()
elif e_id in self._ids + [ID_AUTO_PERSPECTIVE]:
if e_id == ID_AUTO_PERSPECTIVE:
Profile_Set('DEFAULT_VIEW', AUTO_PERSPECTIVE)
self.SetAutoPerspective()
else:
self.SetPerspectiveById(e_id)
else:
evt.Skip()
def OnUpdatePerspectiveMenu(self, evt):
"""Update the perspective menu's check mark states
@param evt: UpdateUI event that called this handler
"""
e_id = evt.GetId()
if e_id in self._ids + [ID_AUTO_PERSPECTIVE]:
evt.Check(self._menu.GetLabel(e_id) == self._currview)
else:
evt.Skip()
def RemovePerspective(self, name):
"""Removes a named perspective from the managed set
@param name: name of perspective to remove/delete
"""
if name in self._viewset:
del self._viewset[name]
rem_id = self._menu.RemoveItemByName(name)
if rem_id:
self._ids.remove(rem_id)
def SetAutoPerspective(self):
"""Set the current perspective management into automatic mode
@postcondition: window is set into
"""
self._currview = AUTO_PERSPECTIVE
self.UpdateAutoPerspective()
def SavePerspectives(self):
"""Writes the perspectives out to disk. Returns True if all data was
written and False if there was an error.
@return: whether save was successful
"""
writer = util.GetFileWriter(self._base)
if writer == -1:
util.Log("[perspective][err] Failed to save %s" % self._base)
return False
try:
self._viewset[LAST_KEY] = self._currview
for perspect in self._viewset:
writer.write(u"%s=%s\n" % (perspect, self._viewset[perspect]))
del self._viewset[LAST_KEY]
except (IOError, OSError):
util.Log("[perspective][err] Write error: %s" % self._base)
return False
else:
return True
def SetPerspective(self, name):
"""Sets the perspective of the managed window, returns
True on success and False on failure.
@param name: name of perspective to set
@return: whether perspective was set or not
"""
if name in self._viewset:
self._mgr.LoadPerspective(self._viewset[name])
self._mgr.Update()
self._currview = name
self.SavePerspectives()
return True
else:
# Fall back to automatic mode
self.SetAutoPerspective()
return False
def SetPerspectiveById(self, per_id):
"""Sets the perspective using the given control id
@param per_id: id of requested perspective
@return: whether perspective was set or not
"""
name = None
for pos in range(self._menu.GetMenuItemCount()):
item = self._menu.FindItemByPosition(pos)
if per_id == item.GetId():
name = item.GetLabel()
break
if name is not None:
return self.SetPerspective(name)
else:
return False
def UpdateAutoPerspective(self):
"""Update the value of the auto-perspectives current saved state
@postcondition: The perspective data for the Automatic setting is
updated to have data for the current state of the
window.
"""
self._viewset[AUTO_PERSPECTIVE] = self._mgr.SavePerspective()
self.SavePerspectives()
| |
"""Raster warping and reprojection."""
from __future__ import absolute_import
from __future__ import division
from math import ceil
from affine import Affine
import numpy as np
import rasterio
from rasterio._base import _transform
from rasterio._warp import (
_transform_geom, _reproject, _calculate_default_transform)
from rasterio.enums import Resampling
from rasterio.env import ensure_env
from rasterio.errors import GDALBehaviorChangeException
from rasterio.transform import guard_transform
@ensure_env
def transform(src_crs, dst_crs, xs, ys, zs=None):
"""Transform vectors from source to target coordinate reference system.
Transform vectors of x, y and optionally z from source
coordinate reference system into target.
Parameters
------------
src_crs: CRS or dict
Source coordinate reference system, as a rasterio CRS object.
Example: CRS({'init': 'EPSG:4326'})
dst_crs: CRS or dict
Target coordinate reference system.
xs: array_like
Contains x values. Will be cast to double floating point values.
ys: array_like
Contains y values.
zs: array_like, optional
Contains z values. Assumed to be all 0 if absent.
Returns
---------
out: tuple of array_like, (xs, ys, [zs])
Tuple of x, y, and optionally z vectors, transformed into the target
coordinate reference system.
"""
return _transform(src_crs, dst_crs, xs, ys, zs)
@ensure_env
def transform_geom(
src_crs,
dst_crs,
geom,
antimeridian_cutting=True,
antimeridian_offset=10.0,
precision=-1):
"""Transform geometry from source coordinate reference system into target.
Parameters
------------
src_crs: CRS or dict
Source coordinate reference system, in rasterio dict format.
Example: CRS({'init': 'EPSG:4326'})
dst_crs: CRS or dict
Target coordinate reference system.
geom: GeoJSON like dict object
antimeridian_cutting: bool, optional
If True, cut geometries at the antimeridian, otherwise geometries
will not be cut (default). If False and GDAL is 2.2.0 or newer
an exception is raised. Antimeridian cutting is always on as of
GDAL 2.2.0 but this could produce an unexpected geometry.
antimeridian_offset: float
Offset from the antimeridian in degrees (default: 10) within which
any geometries will be split.
precision: float
If >= 0, geometry coordinates will be rounded to this number of decimal
places after the transform operation, otherwise original coordinate
values will be preserved (default).
Returns
---------
out: GeoJSON like dict object
Transformed geometry in GeoJSON dict format
"""
loose_gdal_version = filter(
lambda x: x.isdigit(),
rasterio.__gdal_version__.split('.'))
loose_gdal_version = tuple(map(int, loose_gdal_version))
if loose_gdal_version[:2] >= (2, 2) and not antimeridian_cutting:
raise GDALBehaviorChangeException(
"Antimeridian cutting is always enabled on GDAL 2.2.0 or "
"newer, which could produce a different geometry than expected.")
return _transform_geom(
src_crs,
dst_crs,
geom,
antimeridian_cutting,
antimeridian_offset,
precision)
def transform_bounds(
src_crs,
dst_crs,
left,
bottom,
right,
top,
densify_pts=21):
"""Transform bounds from src_crs to dst_crs.
Optionally densifying the edges (to account for nonlinear transformations
along these edges) and extracting the outermost bounds.
Note: this does not account for the antimeridian.
Parameters
----------
src_crs: CRS or dict
Source coordinate reference system, in rasterio dict format.
Example: CRS({'init': 'EPSG:4326'})
dst_crs: CRS or dict
Target coordinate reference system.
left, bottom, right, top: float
Bounding coordinates in src_crs, from the bounds property of a raster.
densify_pts: uint, optional
Number of points to add to each edge to account for nonlinear
edges produced by the transform process. Large numbers will produce
worse performance. Default: 21 (gdal default).
Returns
-------
left, bottom, right, top: float
Outermost coordinates in target coordinate reference system.
"""
if densify_pts < 0:
raise ValueError('densify parameter must be >= 0')
in_xs = []
in_ys = []
if densify_pts > 0:
densify_factor = 1.0 / float(densify_pts + 1)
# Add points along outer edges.
for x in (left, right):
in_xs.extend([x] * (densify_pts + 2))
in_ys.extend(
bottom + np.arange(0, densify_pts + 2, dtype=np.float32) *
((top - bottom) * densify_factor)
)
for y in (bottom, top):
in_xs.extend(
left + np.arange(1, densify_pts + 1, dtype=np.float32) *
((right - left) * densify_factor)
)
in_ys.extend([y] * densify_pts)
else:
in_xs = [left, left, right, right]
in_ys = [bottom, top, bottom, top]
xs, ys = transform(src_crs, dst_crs, in_xs, in_ys)
return (min(xs), min(ys), max(xs), max(ys))
@ensure_env
def reproject(source, destination, src_transform=None, gcps=None,
src_crs=None, src_nodata=None, dst_transform=None, dst_crs=None,
dst_nodata=None, resampling=Resampling.nearest,
init_dest_nodata=True, **kwargs):
"""Reproject a source raster to a destination raster.
If the source and destination are ndarrays, coordinate reference
system definitions and affine transformation parameters or ground
control points (gcps) are required for reprojection.
If the source and destination are rasterio Bands, shorthand for
bands of datasets on disk, the coordinate reference systems and
transforms or GCPs will be read from the appropriate datasets.
Parameters
------------
source, destination: ndarray or Band
The source and destination are 2 or 3-D ndarrays, or a single
or multiple Rasterio Band object. The dimensionality of source
and destination must match, i.e., for multiband reprojection
the lengths of the first axes of the source and destination
must be the same.
src_transform: affine.Affine(), optional
Source affine transformation. Required if source and
destination are ndarrays. Will be derived from source if it is
a rasterio Band. An error will be raised if this parameter is
defined together with gcps.
gcps: sequence of GroundControlPoint, optional
Ground control points for the source. An error will be raised
if this parameter is defined together with src_transform.
src_crs: CRS or dict, optional
Source coordinate reference system, in rasterio dict format.
Required if source and destination are ndarrays.
Will be derived from source if it is a rasterio Band.
Example: CRS({'init': 'EPSG:4326'})
src_nodata: int or float, optional
The source nodata value.Pixels with this value will not be
used for interpolation. If not set, it will be default to the
nodata value of the source image if a masked ndarray or
rasterio band, if available. Must be provided if dst_nodata is
not None.
dst_transform: affine.Affine(), optional
Target affine transformation. Required if source and
destination are ndarrays. Will be derived from target if it is
a rasterio Band.
dst_crs: CRS or dict, optional
Target coordinate reference system. Required if source and
destination are ndarrays. Will be derived from target if it
is a rasterio Band.
dst_nodata: int or float, optional
The nodata value used to initialize the destination; it will
remain in all areas not covered by the reprojected source.
Defaults to the nodata value of the destination image (if set),
the value of src_nodata, or 0 (GDAL default).
resampling: int
Resampling method to use. One of the following:
Resampling.nearest,
Resampling.bilinear,
Resampling.cubic,
Resampling.cubic_spline,
Resampling.lanczos,
Resampling.average,
Resampling.mode
init_dest_nodata: bool
Flag to specify initialization of nodata in destination;
prevents overwrite of previous warps. Defaults to True.
kwargs: dict, optional
Additional arguments passed to transformation function.
Returns
---------
out: None
Output is written to destination.
"""
if src_transform and gcps:
raise ValueError("src_transform and gcps parameters may not"
"be used together.")
# Resampling guard.
try:
Resampling(resampling)
if resampling == 7:
raise ValueError
except ValueError:
raise ValueError(
"resampling must be one of: {0}".format(", ".join(
['Resampling.{0}'.format(k) for k in
Resampling.__members__.keys() if k != 'gauss'])))
# If working with identity transform, assume it is crs-less data
# and that translating the matrix very slightly will avoid #674
eps = 1e-100
if src_transform and guard_transform(src_transform).is_identity:
src_transform = src_transform.translation(eps, eps)
if dst_transform and guard_transform(dst_transform).is_identity:
dst_transform = dst_transform.translation(eps, eps)
if src_transform:
src_transform = guard_transform(src_transform).to_gdal()
if dst_transform:
dst_transform = guard_transform(dst_transform).to_gdal()
# Passing None can cause segfault, use empty dict
if src_crs is None:
src_crs = {}
if dst_crs is None:
dst_crs = {}
_reproject(source, destination, src_transform, gcps, src_crs, src_nodata,
dst_transform, dst_crs, dst_nodata, resampling,
init_dest_nodata, **kwargs)
@ensure_env
def calculate_default_transform(src_crs, dst_crs, width, height,
left=None, bottom=None, right=None, top=None,
gcps=None, resolution=None):
"""Output dimensions and transform for a reprojection.
Source and destination coordinate reference systems and output
width and height are the first four, required, parameters. Source
georeferencing can be specified using either ground control points
(gcps) or spatial bounds (left, bottom, right, top). These two
forms of georeferencing are mutually exclusive.
The destination transform is anchored at the left, top coordinate.
Destination width and height (and resolution if not provided), are
calculated using GDAL's method for suggest warp output.
Parameters
----------
src_crs: CRS or dict
Source coordinate reference system, in rasterio dict format.
Example: CRS({'init': 'EPSG:4326'})
dst_crs: CRS or dict
Target coordinate reference system.
width, height: int
Source raster width and height.
left, bottom, right, top: float, optional
Bounding coordinates in src_crs, from the bounds property of a
raster. Required unless using gcps.
gcps: sequence of GroundControlPoint, optional
Instead of a bounding box for the source, a sequence of ground
control points may be provided.
resolution: tuple (x resolution, y resolution) or float, optional
Target resolution, in units of target coordinate reference
system.
Returns
-------
transform: Affine
Output affine transformation matrix
width, height: int
Output dimensions
Notes
-----
Some behavior of this function is determined by the
CHECK_WITH_INVERT_PROJ environment variable:
YES: constrain output raster to extents that can be inverted
avoids visual artifacts and coordinate discontinuties.
NO: reproject coordinates beyond valid bound limits
"""
if any(x is not None for x in (left, bottom, right, top)) and gcps:
raise ValueError("Bounding values and ground control points may not"
"be used together.")
if any(x is None for x in (left, bottom, right, top)) and not gcps:
raise ValueError("Either four bounding values or ground control points"
"must be specified")
dst_affine, dst_width, dst_height = _calculate_default_transform(
src_crs, dst_crs, width, height, left, bottom, right, top, gcps)
# If resolution is specified, Keep upper-left anchored
# adjust the transform resolutions
# adjust the width/height by the ratio of estimated:specified res (ceil'd)
if resolution:
# resolutions argument into tuple
try:
res = (float(resolution), float(resolution))
except TypeError:
res = (resolution[0], resolution[0]) \
if len(resolution) == 1 else resolution[0:2]
# Assume yres is provided as positive,
# needs to be negative for north-up affine
xres = res[0]
yres = -res[1]
xratio = dst_affine.a / xres
yratio = dst_affine.e / yres
dst_affine = Affine(xres, dst_affine.b, dst_affine.c,
dst_affine.d, yres, dst_affine.f)
dst_width = ceil(dst_width * xratio)
dst_height = ceil(dst_height * yratio)
return dst_affine, dst_width, dst_height
| |
"""
django_wurfl_tools.templatetags.wurfl
Provides template tags for easy querying of the requesting device.
All of these template tags assume that there is a variable named
`device` in the template context, which is an instance of pywurfl.Device.
This can be achieved by using the provided template context processor.
"""
from django import template
register = template.Library()
def get_device_from_context(context):
"""
Tries to get the device object from the context and return it.
@param context: The Django template context
@return: An instance of pywurfl.Device or None if the device is not found
"""
try:
device = context['device']
return device
except KeyError:
return None
def parse_with_else(parser, token, end_tag):
"""
Utility function to do the common task of parsing to an end tag,
with an optional else tag.
@param parser: The parser object passed to the templatetag compile function
@param token: The token object passed to the templatetag compile function
@param end_tag: The name of the end tag to parse to
@type end_tag: string
@return: a list of [nodelist_true, nodelist_false], representing nodelists to render for
true and false respectively
"""
nodelist_true = parser.parse(('else', end_tag))
token = parser.next_token()
if token.contents == 'else':
nodelist_false = parser.parse((end_tag,))
parser.delete_first_token()
else:
nodelist_false = template.NodeList()
return nodelist_true, nodelist_false
#A list of lambda functions to do inequality comparisons
ops = {
'==': lambda x,y: x == y,
'!=': lambda x,y: x != y,
'<': lambda x,y: x < y,
'>': lambda x,y: x > y,
'<=': lambda x,y: x <= y,
'>=': lambda x,y: x >= y
}
@register.tag
def device_has(parser, token):
"""Django templatetag compilation function to compare a device property
in a boolean context or with an inequality
Usage:
{% device_has "device_attr" %} Something {% else %} Something else {% end_device_has %} - Compares in a boolean context.
{% device_has "device_attr" >= 9.0 %} Something {% else %} Something else {% end_device_has %} - Compares against an inequality
{% device_has "device_attr" != "hi" %} Something {% end_device_has %} - Else is optional
{% device_has device_attr == attr_val %} Something {% end_device_has %} - Attribute name and value can both reference context variables
Valid Inequalities are:
==, !=, <, >, <=, >=, or any callable in the context. The callable should take two arguments, the property value and the
value to compare against, and should return a boolean.
"""
try:
values = token.split_contents()
tag_name = values[0]
prop_name = template.Variable(values[1])
if len(values) >= 3:
if len(values) != 4:
msg = "`%s` tag malformed.\n" % tag_name +\
"If you have more than one argument, you must have both.\n"+\
"an operator [==,!=,<,>,<=,>=,any callable in the context], and a value to compare to.\n"
raise template.TemplateSyntaxError(msg)
op = values[2]
try:
operator = ops[op]
except KeyError:
operator = template.Variable(op)
val = template.Variable(values[3])
else:
operator = ops['=']
val = True
except IndexError:
msg = "`%s` tag requires at least one argument, the property name. \n" % tag_name +\
"An optional second argument gives a value to compare against (default: boolean True)."
raise template.TemplateSyntaxError(msg)
nodelist_true, nodelist_false = parse_with_else(parser, token, 'end_device_has')
return DeviceHasNode(prop_name, val, operator, nodelist_true, nodelist_false)
class DeviceHasNode(template.Node):
"""Renderer for device_has template_tag.
See django_wurfl_tools.templatetags.device_has for more details."""
def __init__(self, prop_name, val, operator, nodelist_true, nodelist_false):
"""Initialiser for DeviceHasNode"""
self.prop_name = prop_name
self.val = val
self.operator = operator
self.nodelist_true = nodelist_true
self.nodelist_false = nodelist_false
def render(self, context):
"""Renders the true nodelist if the comparison evaluates to true,
otherwise renders the false nodelist"""
device = get_device_from_context(context)
passed = True
self.prop_name = self.prop_name.resolve(context)
self.val = self.val.resolve(context)
if not callable(self.operator):
try:
self.operator = self.operator.resolve(context)
except:
passed = False
#If no inequality and value were passed, we use a boolean context
if type(self.val) == type(True) and self.val == True:
compare = lambda x,y: bool(x) and unicode(x) != u"none"
else:
compare = lambda x,y: self.operator(x,y)
passed = passed and bool(device) and compare(getattr(device, self.prop_name, None), self.val)
if passed:
return self.nodelist_true.render(context)
else:
return self.nodelist_false.render(context)
@register.tag
def device_prop(parser, token):
"""Django templatetag compilation function to return a string representation
of a device property.
Usage:
{% device_prop "prop_name" %} - Outputs value of device.prop_name
{% device_prop prop_name %} - Property name can be a context variable
"""
try:
values = token.split_contents()
tag_name = values[0]
prop_name = template.Variable(values[1])
except IndexError:
msg = "`%s` tag requires at least one argument, the property name. \n" % tag_name
raise template.TemplateSyntaxError(msg)
return DevicePropNode(prop_name)
class DevicePropNode(template.Node):
"""Renderer for device_prop templatetag.
See django_wurfl_tools.templatetags.device_prop for more details."""
def __init__(self, prop_name):
"""Initialiser for DevicePropNode"""
self.prop_name = prop_name
def render(self, context):
"""Renders the DeviceProp node by getting the value on the device
or None if the property doesn't exist"""
device = get_device_from_context(context)
self.prop_name = self.prop_name.resolve(context)
if not device:
return "None"
else:
val = getattr(device, self.prop_name, None)
return unicode(val)
@register.tag
def device_debug(parser, token):
"""Django templatetag compilation function for printing some device debug."""
return DeviceDebugNode()
class DeviceDebugNode(template.Node):
"""Renderer for device_debug templatetag.
See django_wurfl_tools.templatetags.device_debug for more details."""
def __init__(self):
"Initialiser for DeviceDebugNode. Does nada"""
pass
def render(self, context):
"""Renders the DeviceDebugNode, printing out all the properties we
know about the device"""
debug = "<div class=\"wurfl-device-debug\"><h1>Device Debug</h1>"
device = get_device_from_context(context)
if not device:
debug += "<h2>No `device` attribute found in context, or device was not found.</h2></div>"
return debug
for group_name, capabilities in device.groups.iteritems():
debug += "<h2>%s</h2><pre>" % group_name
for capability in capabilities:
debug += "\n%s = %s" % (capability, getattr(device, capability))
debug += "</pre>"
debug += "</div>"
return debug
@register.tag
def device_found(parser, token):
"""Django templatetag compilation function to check if the device was found in WURFL"""
nodelist_true, nodelist_false = parse_with_else(parser, token, 'end_device_found')
return DeviceFoundNode(nodelist_true, nodelist_false)
class DeviceFoundNode(template.Node):
"""Renderer for device_found templatetag"""
def __init__(self, nodelist_true, nodelist_false):
""""Initialiser for DeviceFoundNode"""
self.nodelist_true = nodelist_true
self.nodelist_false = nodelist_false
def render(self, context):
"""Renders the true nodelist if the devices was found in WURFL,
otherwise renders the false nodelist."""
device = get_device_from_context(context)
if device:
return self.nodelist_true.render(context)
else:
return self.nodelist_false.render(context)
| |
# Author: Hamzeh Alsalhi <ha258@cornell.edu>
#
# License: BSD 3 clause
from __future__ import division
import numpy as np
import scipy.sparse as sp
import operator
import array
from sklearn.utils import check_random_state
from ._random import sample_without_replacement
__all__ = ['sample_without_replacement', 'choice']
# This is a backport of np.random.choice from numpy 1.7
# The function can be removed when we bump the requirements to >=1.7
def choice(a, size=None, replace=True, p=None, random_state=None):
"""
choice(a, size=None, replace=True, p=None)
Generates a random sample from a given 1-D array
.. versionadded:: 1.7.0
Parameters
-----------
a : 1-D array-like or int
If an ndarray, a random sample is generated from its elements.
If an int, the random sample is generated as if a was np.arange(n)
size : int or tuple of ints, optional
Output shape. Default is None, in which case a single value is
returned.
replace : boolean, optional
Whether the sample is with or without replacement.
p : 1-D array-like, optional
The probabilities associated with each entry in a.
If not given the sample assumes a uniform distribtion over all
entries in a.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
--------
samples : 1-D ndarray, shape (size,)
The generated random samples
Raises
-------
ValueError
If a is an int and less than zero, if a or p are not 1-dimensional,
if a is an array-like of size 0, if p is not a vector of
probabilities, if a and p have different lengths, or if
replace=False and the sample size is greater than the population
size
See Also
---------
randint, shuffle, permutation
Examples
---------
Generate a uniform random sample from np.arange(5) of size 3:
>>> np.random.choice(5, 3) # doctest: +SKIP
array([0, 3, 4])
>>> #This is equivalent to np.random.randint(0,5,3)
Generate a non-uniform random sample from np.arange(5) of size 3:
>>> np.random.choice(5, 3, p=[0.1, 0, 0.3, 0.6, 0]) # doctest: +SKIP
array([3, 3, 0])
Generate a uniform random sample from np.arange(5) of size 3 without
replacement:
>>> np.random.choice(5, 3, replace=False) # doctest: +SKIP
array([3,1,0])
>>> #This is equivalent to np.random.shuffle(np.arange(5))[:3]
Generate a non-uniform random sample from np.arange(5) of size
3 without replacement:
>>> np.random.choice(5, 3, replace=False, p=[0.1, 0, 0.3, 0.6, 0])
... # doctest: +SKIP
array([2, 3, 0])
Any of the above can be repeated with an arbitrary array-like
instead of just integers. For instance:
>>> aa_milne_arr = ['pooh', 'rabbit', 'piglet', 'Christopher']
>>> np.random.choice(aa_milne_arr, 5, p=[0.5, 0.1, 0.1, 0.3])
... # doctest: +SKIP
array(['pooh', 'pooh', 'pooh', 'Christopher', 'piglet'],
dtype='|S11')
"""
random_state = check_random_state(random_state)
# Format and Verify input
a = np.array(a, copy=False)
if a.ndim == 0:
try:
# __index__ must return an integer by python rules.
pop_size = operator.index(a.item())
except TypeError:
raise ValueError("a must be 1-dimensional or an integer")
if pop_size <= 0:
raise ValueError("a must be greater than 0")
elif a.ndim != 1:
raise ValueError("a must be 1-dimensional")
else:
pop_size = a.shape[0]
if pop_size is 0:
raise ValueError("a must be non-empty")
if None != p:
p = np.array(p, dtype=np.double, ndmin=1, copy=False)
if p.ndim != 1:
raise ValueError("p must be 1-dimensional")
if p.size != pop_size:
raise ValueError("a and p must have same size")
if np.any(p < 0):
raise ValueError("probabilities are not non-negative")
if not np.allclose(p.sum(), 1):
raise ValueError("probabilities do not sum to 1")
shape = size
if shape is not None:
size = np.prod(shape, dtype=np.intp)
else:
size = 1
# Actual sampling
if replace:
if None != p:
cdf = p.cumsum()
cdf /= cdf[-1]
uniform_samples = random_state.random_sample(shape)
idx = cdf.searchsorted(uniform_samples, side='right')
# searchsorted returns a scalar
idx = np.array(idx, copy=False)
else:
idx = random_state.randint(0, pop_size, size=shape)
else:
if size > pop_size:
raise ValueError("Cannot take a larger sample than "
"population when 'replace=False'")
if None != p:
if np.sum(p > 0) < size:
raise ValueError("Fewer non-zero entries in p than size")
n_uniq = 0
p = p.copy()
found = np.zeros(shape, dtype=np.int)
flat_found = found.ravel()
while n_uniq < size:
x = random_state.rand(size - n_uniq)
if n_uniq > 0:
p[flat_found[0:n_uniq]] = 0
cdf = np.cumsum(p)
cdf /= cdf[-1]
new = cdf.searchsorted(x, side='right')
_, unique_indices = np.unique(new, return_index=True)
unique_indices.sort()
new = new.take(unique_indices)
flat_found[n_uniq:n_uniq + new.size] = new
n_uniq += new.size
idx = found
else:
idx = random_state.permutation(pop_size)[:size]
if shape is not None:
idx.shape = shape
if shape is None and isinstance(idx, np.ndarray):
# In most cases a scalar will have been made an array
idx = idx.item(0)
# Use samples as indices for a if a is array-like
if a.ndim == 0:
return idx
if shape is not None and idx.ndim == 0:
# If size == () then the user requested a 0-d array as opposed to
# a scalar object when size is None. However a[idx] is always a
# scalar and not an array. So this makes sure the result is an
# array, taking into account that np.array(item) may not work
# for object arrays.
res = np.empty((), dtype=a.dtype)
res[()] = a[idx]
return res
return a[idx]
def random_choice_csc(n_samples, classes, class_probability=None,
random_state=None):
"""Generate a sparse random matrix given column class distributions
Parameters
----------
n_samples : int,
Number of samples to draw in each column.
classes : list of size n_outputs of arrays of size (n_classes,)
List of classes for each column.
class_probability : list of size n_outputs of arrays of size (n_classes,)
Optional (default=None). Class distribution of each column. If None the
uniform distribution is assumed.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
random_matrix : sparse csc matrix of size (n_samples, n_outputs)
"""
data = array.array('i')
indices = array.array('i')
indptr = array.array('i', [0])
for j in range(len(classes)):
classes[j] = np.asarray(classes[j])
if classes[j].dtype.kind != 'i':
raise ValueError("class dtype %s is not supported" %
classes[j].dtype)
classes[j] = classes[j].astype(int)
# use uniform distribution if no class_probability is given
if class_probability is None:
class_prob_j = np.empty(shape=classes[j].shape[0])
class_prob_j.fill(1 / classes[j].shape[0])
else:
class_prob_j = np.asarray(class_probability[j])
if np.sum(class_prob_j) != 1.0:
raise ValueError("Probability array at index {0} does not sum to "
"one".format(j))
if class_prob_j.shape[0] != classes[j].shape[0]:
raise ValueError("classes[{0}] (length {1}) and "
"class_probability[{0}] (length {2}) have "
"different length.".format(j,
classes[j].shape[0],
class_prob_j.shape[0]))
# If 0 is not present in the classes insert it with a probability 0.0
if 0 not in classes[j]:
classes[j] = np.insert(classes[j], 0, 0)
class_prob_j = np.insert(class_prob_j, 0, 0.0)
# If there are nonzero classes choose randomly using class_probability
if classes[j].shape[0] > 1:
p_nonzero = 1 - class_prob_j[classes[j] == 0]
nnz = int(n_samples * p_nonzero)
ind_sample = sample_without_replacement(n_population=n_samples,
n_samples=nnz,
random_state=random_state)
indices.extend(ind_sample)
# Normalize probabilites for the nonzero elements
classes_j_nonzero = classes[j] != 0
class_probability_nz = class_prob_j[classes_j_nonzero]
class_probability_nz_norm = (class_probability_nz /
np.sum(class_probability_nz))
classes_ind = np.searchsorted(class_probability_nz_norm.cumsum(),
np.random.rand(nnz))
data.extend(classes[j][classes_j_nonzero][classes_ind])
indptr.append(len(indices))
return sp.csc_matrix((data, indices, indptr),
(n_samples, len(classes)),
dtype=int)
| |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Faster commutators for two-body operators with diagonal Coulomb terms."""
import warnings
from openfermion import FermionOperator, normal_ordered
def commutator_ordered_diagonal_coulomb_with_two_body_operator(
operator_a, operator_b, prior_terms=None):
"""Compute the commutator of two-body operators provided that both are
normal-ordered and that the first only has diagonal Coulomb interactions.
Args:
operator_a: The first FermionOperator argument of the commutator.
All terms must be normal-ordered, and furthermore either hopping
operators (i^ j) or diagonal Coulomb operators (i^ i or i^ j^ i j).
operator_b: The second FermionOperator argument of the commutator.
operator_b can be any arbitrary two-body operator.
prior_terms (optional): The initial FermionOperator to add to.
Returns:
The commutator, or the commutator added to prior_terms if provided.
Notes:
The function could be readily extended to the case of arbitrary
two-body operator_a given that operator_b has the desired form;
however, the extra check slows it down without desirable added utility.
"""
if prior_terms is None:
prior_terms = FermionOperator.zero()
for term_a in operator_a.terms:
coeff_a = operator_a.terms[term_a]
for term_b in operator_b.terms:
coeff_b = operator_b.terms[term_b]
coefficient = coeff_a * coeff_b
# If term_a == term_b the terms commute, nothing to add.
if term_a == term_b or not term_a or not term_b:
continue
# Case 1: both operators are two-body, operator_a is i^ j^ i j.
if (len(term_a) == len(term_b) == 4 and
term_a[0][0] == term_a[2][0] and
term_a[1][0] == term_a[3][0]):
_commutator_two_body_diagonal_with_two_body(
term_a, term_b, coefficient, prior_terms)
# Case 2: commutator of a 1-body and a 2-body operator
elif (len(term_b) == 4 and len(term_a) == 2) or (
len(term_a) == 4 and len(term_b) == 2):
_commutator_one_body_with_two_body(
term_a, term_b, coefficient, prior_terms)
# Case 3: both terms are one-body operators (both length 2)
elif len(term_a) == 2 and len(term_b) == 2:
_commutator_one_body_with_one_body(
term_a, term_b, coefficient, prior_terms)
# Final case (case 4): violation of the input promise. Still
# compute the commutator, but warn the user.
else:
warnings.warn('Defaulted to standard commutator evaluation '
'due to an out-of-spec operator.')
additional = FermionOperator.zero()
additional.terms[term_a + term_b] = coefficient
additional.terms[term_b + term_a] = -coefficient
additional = normal_ordered(additional)
prior_terms += additional
return prior_terms
def _commutator_one_body_with_one_body(one_body_action_a, one_body_action_b,
coefficient, prior_terms):
"""Compute the commutator of two one-body operators specified by actions.
Args:
one_body_action_a, one_body_action_b (tuple): single terms of one-body
FermionOperators (i^ j or i^ i).
coefficient (complex float): coefficient of the commutator.
prior_terms (FermionOperator): prior terms to add the commutator to.
"""
# In the case that both the creation and annihilation operators of the
# two actions pair, two new terms must be added.
if (one_body_action_a[0][0] == one_body_action_b[1][0] and
one_body_action_b[0][0] == one_body_action_a[1][0]):
new_one_body_action_a = ((one_body_action_a[0][0], 1),
(one_body_action_a[0][0], 0))
new_one_body_action_b = ((one_body_action_b[0][0], 1),
(one_body_action_b[0][0], 0))
prior_terms.terms[new_one_body_action_a] = (
prior_terms.terms.get(new_one_body_action_a, 0.0) + coefficient)
prior_terms.terms[new_one_body_action_b] = (
prior_terms.terms.get(new_one_body_action_b, 0.0) - coefficient)
# A single pairing causes the mixed action a[0]^ b[1] to be added
elif one_body_action_a[1][0] == one_body_action_b[0][0]:
action_ab = ((one_body_action_a[0][0], 1),
(one_body_action_b[1][0], 0))
prior_terms.terms[action_ab] = (
prior_terms.terms.get(action_ab, 0.0) + coefficient)
# The other single pairing adds the mixed action b[0]^ a[1]
elif one_body_action_a[0][0] == one_body_action_b[1][0]:
action_ba = ((one_body_action_b[0][0], 1),
(one_body_action_a[1][0], 0))
prior_terms.terms[action_ba] = (
prior_terms.terms.get(action_ba, 0.0) - coefficient)
def _commutator_one_body_with_two_body(action_a, action_b,
coefficient, prior_terms):
"""Compute commutator of action-specified one- and two-body operators.
Args:
action_a, action_b (tuple): single terms, one one-body and the other
two-body, from normal-ordered FermionOperators. It does not matter
which is one- or two-body so long as only one of each appears.
coefficient (complex float): coefficient of the commutator.
prior_terms (FermionOperator): prior terms to add the commutator to.
"""
# Determine which action is 1-body and which is 2-body.
# Label the creation and annihilation parts of the two terms.
if len(action_a) == 4 and len(action_b) == 2:
one_body_create = action_b[0][0]
one_body_annihilate = action_b[1][0]
two_body_create = (action_a[0][0], action_a[1][0])
two_body_annihilate = (action_a[2][0], action_a[3][0])
new_action = list(action_a)
# Flip coefficient because we reversed the commutator's arguments.
coefficient *= -1
else:
one_body_create = action_a[0][0]
one_body_annihilate = action_a[1][0]
two_body_create = (action_b[0][0], action_b[1][0])
two_body_annihilate = (action_b[2][0], action_b[3][0])
new_action = list(action_b)
# If both terms are composed of number operators, they commute.
if one_body_create == one_body_annihilate and (
two_body_create == two_body_annihilate):
return
# If the one-body annihilation is in the two-body creation parts
if one_body_annihilate in two_body_create:
new_coeff = coefficient
new_inner_action = list(new_action)
# Determine which creation part(s) of the one-body action to use
if one_body_annihilate == two_body_create[0]:
new_inner_action[0] = (one_body_create, 1)
elif one_body_annihilate == two_body_create[1]:
new_inner_action[1] = (one_body_create, 1)
# Normal order if necessary
if new_inner_action[0][0] < new_inner_action[1][0]:
new_inner_action[0], new_inner_action[1] = (
new_inner_action[1], new_inner_action[0])
new_coeff *= -1
# Add the resulting term.
if new_inner_action[0][0] > new_inner_action[1][0]:
prior_terms.terms[tuple(new_inner_action)] = (
prior_terms.terms.get(tuple(new_inner_action), 0.0) +
new_coeff)
# If the one-body creation is in the two-body annihilation parts
if one_body_create in two_body_annihilate:
new_coeff = -coefficient
# Determine which annihilation part(s) of the one-body action to sub in
if one_body_create == two_body_annihilate[0]:
new_action[2] = (one_body_annihilate, 0)
elif one_body_create == two_body_annihilate[1]:
new_action[3] = (one_body_annihilate, 0)
# Normal order if necessary
if new_action[2][0] < new_action[3][0]:
new_action[2], new_action[3] = new_action[3], new_action[2]
new_coeff *= -1
# Add the resulting term.
if new_action[2][0] > new_action[3][0]:
prior_terms.terms[tuple(new_action)] = (
prior_terms.terms.get(tuple(new_action), 0.0) + new_coeff)
def _commutator_two_body_diagonal_with_two_body(
diagonal_coulomb_action, arbitrary_two_body_action,
coefficient, prior_terms):
"""Compute the commutator of two two-body operators specified by actions.
Args:
diagonal_coulomb_action (tuple): single term of a diagonal Coulomb
FermionOperator (i^ j^ i j). Must be in normal-ordered form,
i.e. i > j.
arbitrary_two_body_action (tuple): arbitrary single term of a two-body
FermionOperator, in normal-ordered form, i.e. i^ j^ k l with
i > j, k > l.
coefficient (complex float): coefficient of the commutator.
prior_terms (FermionOperator): prior terms to add the commutator to.
Notes:
The function could be readily extended to the case of reversed input
arguments (where diagonal_coulomb_action is the arbitrary one, and
arbitrary_two_body_action is from a diagonal Coulomb FermionOperator);
however, the extra check slows it down without significantly increased
utility.
"""
# Identify creation and annihilation parts of arbitrary_two_body_action.
arb_2bdy_create = (arbitrary_two_body_action[0][0],
arbitrary_two_body_action[1][0])
arb_2bdy_annihilate = (arbitrary_two_body_action[2][0],
arbitrary_two_body_action[3][0])
# The first two sub-cases cover when the creations and annihilations of
# diagonal_coulomb_action and arbitrary_two_body_action totally pair up.
if (diagonal_coulomb_action[2][0] == arbitrary_two_body_action[0][0] and
diagonal_coulomb_action[3][0] == arbitrary_two_body_action[1][0]):
prior_terms.terms[arbitrary_two_body_action] = (
prior_terms.terms.get(arbitrary_two_body_action, 0.0) -
coefficient)
elif (diagonal_coulomb_action[0][0] == arbitrary_two_body_action[2][0] and
diagonal_coulomb_action[1][0] == arbitrary_two_body_action[3][0]):
prior_terms.terms[arbitrary_two_body_action] = (
prior_terms.terms.get(arbitrary_two_body_action, 0.0) +
coefficient)
# Exactly one of diagonal_coulomb_action's creations matches one of
# arbitrary_two_body_action's annihilations.
elif diagonal_coulomb_action[0][0] in arb_2bdy_annihilate:
# Nothing gets added if there's an unbalanced double creation.
if diagonal_coulomb_action[1][0] in arb_2bdy_create or (
diagonal_coulomb_action[0][0] in arb_2bdy_create):
return
_add_three_body_term(
arbitrary_two_body_action, coefficient,
diagonal_coulomb_action[1][0], prior_terms)
elif diagonal_coulomb_action[1][0] in arb_2bdy_annihilate:
# Nothing gets added if there's an unbalanced double creation.
if diagonal_coulomb_action[0][0] in arb_2bdy_create or (
diagonal_coulomb_action[1][0] in arb_2bdy_create):
return
_add_three_body_term(arbitrary_two_body_action, coefficient,
diagonal_coulomb_action[0][0], prior_terms)
elif diagonal_coulomb_action[0][0] in arb_2bdy_create:
_add_three_body_term(arbitrary_two_body_action, -coefficient,
diagonal_coulomb_action[1][0], prior_terms)
elif diagonal_coulomb_action[1][0] in arb_2bdy_create:
_add_three_body_term(arbitrary_two_body_action, -coefficient,
diagonal_coulomb_action[0][0], prior_terms)
def _add_three_body_term(two_body_action, coefficient, mode, prior_terms):
new_action = list(two_body_action)
# Insert creation and annihilation operators into the two-body action.
new_action.insert(0, (mode, 1))
new_action.insert(3, (mode, 0))
# Normal order the creation operators of the new action.
# Each exchange in the action flips the sign of the coefficient.
if new_action[0][0] < new_action[1][0]:
new_action[0], new_action[1] = new_action[1], new_action[0]
coefficient *= -1
if new_action[1][0] < new_action[2][0]:
new_action[1], new_action[2] = new_action[2], new_action[1]
coefficient *= -1
# Normal order the annihilation operators of the new action.
# Each exchange in the action flips the sign of the coefficient.
if new_action[3][0] < new_action[4][0]:
new_action[3], new_action[4] = new_action[4], new_action[3]
coefficient *= -1
if new_action[4][0] < new_action[5][0]:
new_action[4], new_action[5] = new_action[5], new_action[4]
coefficient *= -1
# Add the new normal-ordered term to the prior terms.
prior_terms.terms[tuple(new_action)] = (
prior_terms.terms.get(tuple(new_action), 0.0) + coefficient)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.