gt stringclasses 1 value | context stringlengths 2.49k 119k |
|---|---|
import numpy as np
# import pandas as pd
import theano
import theano.tensor as T
import layers
import cc_layers
import custom
import load_data
import realtime_augmentation as ra
import time
import csv
import os
import cPickle as pickle
from datetime import datetime, timedelta
# import matplotlib.pyplot as plt
# plt.ion()
# import utils
debug = True
predict = False
y_train = np.load("data/solutions_train.npy")
ra.y_train=y_train
# split training data into training + a small validation set
ra.num_train = y_train.shape[0]
ra.num_valid = ra.num_train // 10 # integer division
ra.num_train -= ra.num_valid
ra.y_valid = ra.y_train[ra.num_train:]
ra.y_train = ra.y_train[:ra.num_train]
load_data.num_train=y_train.shape[0]
load_data.train_ids = np.load("data/train_ids.npy")
ra.load_data.num_train = load_data.num_train
ra.load_data.train_ids = load_data.train_ids
ra.valid_ids = load_data.train_ids[ra.num_train:]
ra.train_ids = load_data.train_ids[:ra.num_train]
BATCH_SIZE = 16
NUM_INPUT_FEATURES = 3
LEARNING_RATE_SCHEDULE = {
#0: 0.04,
#1800: 0.004,
#2300: 0.0004,
0: 0.08,
2000: 0.008,
3200: 0.0008,
4600: 0.0004,
}
MOMENTUM = 0.9
WEIGHT_DECAY = 0.0
CHUNK_SIZE = 1600#10000 # 30000 # this should be a multiple of the batch size, ideally.
NUM_CHUNKS = 40 #2500 # 3000 # 1500 # 600 # 600 # 600 # 500
VALIDATE_EVERY = 10 #20 # 12 # 6 # 6 # 6 # 5 # validate only every 5 chunks. MUST BE A DIVISOR OF NUM_CHUNKS!!!
# else computing the analysis data does not work correctly, since it assumes that the validation set is still loaded.
print("The training is running for %s chunks, each with %s images. That are about %s epochs." % (NUM_CHUNKS,CHUNK_SIZE,CHUNK_SIZE*NUM_CHUNKS / (ra.num_train-CHUNK_SIZE) ))
NUM_CHUNKS_NONORM = 1 # train without normalisation for this many chunks, to get the weights in the right 'zone'.
# this should be only a few, just 1 hopefully suffices.
GEN_BUFFER_SIZE = 1
with open("trainingNmbrs.txt", 'a')as f:
f.write("#noFlip, default \n")
f.write("#The training is running for %s chunks, each with %s images. That are about %s epochs. \n" % (NUM_CHUNKS,CHUNK_SIZE,CHUNK_SIZE*NUM_CHUNKS / (ra.num_train-CHUNK_SIZE) ))
f.write("#round ,time, mean_train_loss , mean_valid_loss \n")
# # need to load the full training data anyway to extract the validation set from it.
# # alternatively we could create separate validation set files.
# DATA_TRAIN_PATH = "data/images_train_color_cropped33_singletf.npy.gz"
# DATA2_TRAIN_PATH = "data/images_train_color_8x_singletf.npy.gz"
# DATA_VALIDONLY_PATH = "data/images_validonly_color_cropped33_singletf.npy.gz"
# DATA2_VALIDONLY_PATH = "data/images_validonly_color_8x_singletf.npy.gz"
# DATA_TEST_PATH = "data/images_test_color_cropped33_singletf.npy.gz"
# DATA2_TEST_PATH = "data/images_test_color_8x_singletf.npy.gz"
TARGET_PATH = "predictions/final/try_convnet_simpler2.csv"
ANALYSIS_PATH = "analysis/final/try_convnet_simpler2.pkl"
# FEATURES_PATTERN = "features/try_convnet_chunked_ra_b3sched.%s.npy"
print "Set up data loading"
# TODO: adapt this so it loads the validation data from JPEGs and does the processing realtime
input_sizes = [(69, 69), (69, 69)]
ds_transforms = [
ra.build_ds_transform(3.0, target_size=input_sizes[0]),
ra.build_ds_transform(3.0, target_size=input_sizes[1]) + ra.build_augmentation_transform(rotation=45)
]
num_input_representations = len(ds_transforms)
augmentation_params = {
'zoom_range': (1.0 / 1.3, 1.3),
'rotation_range': (0, 360),
'shear_range': (0, 0),
'translation_range': (-4, 4),
'do_flip': True,
}
augmented_data_gen = ra.realtime_augmented_data_gen(num_chunks=NUM_CHUNKS, chunk_size=CHUNK_SIZE,
augmentation_params=augmentation_params, ds_transforms=ds_transforms,
target_sizes=input_sizes)
post_augmented_data_gen = ra.post_augment_brightness_gen(augmented_data_gen, std=0.5)
train_gen = load_data.buffered_gen_mp(post_augmented_data_gen, buffer_size=GEN_BUFFER_SIZE)
y_train = np.load("data/solutions_train.npy")
train_ids = load_data.train_ids
test_ids = load_data.test_ids
# split training data into training + a small validation set
num_train = len(train_ids)
num_test = len(test_ids)
num_valid = num_train // 10 # integer division
num_train -= num_valid
y_valid = y_train[num_train:]
y_train = y_train[:num_train]
valid_ids = train_ids[num_train:]
train_ids = train_ids[:num_train]
train_indices = np.arange(num_train)
valid_indices = np.arange(num_train, num_train + num_valid)
test_indices = np.arange(num_test)
def create_train_gen():
"""
this generates the training data in order, for postprocessing. Do not use this for actual training.
"""
data_gen_train = ra.realtime_fixed_augmented_data_gen(train_indices, 'train',
ds_transforms=ds_transforms, chunk_size=CHUNK_SIZE, target_sizes=input_sizes)
return load_data.buffered_gen_mp(data_gen_train, buffer_size=GEN_BUFFER_SIZE)
def create_valid_gen():
data_gen_valid = ra.realtime_fixed_augmented_data_gen(valid_indices, 'train',
ds_transforms=ds_transforms, chunk_size=CHUNK_SIZE, target_sizes=input_sizes)
return load_data.buffered_gen_mp(data_gen_valid, buffer_size=GEN_BUFFER_SIZE)
def create_test_gen():
data_gen_test = ra.realtime_fixed_augmented_data_gen(test_indices, 'test',
ds_transforms=ds_transforms, chunk_size=CHUNK_SIZE, target_sizes=input_sizes)
return load_data.buffered_gen_mp(data_gen_test, buffer_size=GEN_BUFFER_SIZE)
print "Preprocess validation data upfront"
start_time = time.time()
xs_valid = [[] for _ in xrange(num_input_representations)]
for data, length in create_valid_gen():
for x_valid_list, x_chunk in zip(xs_valid, data):
x_valid_list.append(x_chunk[:length])
xs_valid = [np.vstack(x_valid) for x_valid in xs_valid]
xs_valid = [x_valid.transpose(0, 3, 1, 2) for x_valid in xs_valid] # move the colour dimension up
print " took %.2f seconds" % (time.time() - start_time)
print "Build model"
if debug : print("input size: %s x %s x %s x %s" % (input_sizes[0][0],input_sizes[0][1],NUM_INPUT_FEATURES,BATCH_SIZE))
l0 = layers.Input2DLayer(BATCH_SIZE, NUM_INPUT_FEATURES, input_sizes[0][0], input_sizes[0][1])
l0_45 = layers.Input2DLayer(BATCH_SIZE, NUM_INPUT_FEATURES, input_sizes[1][0], input_sizes[1][1])
#l0r = layers.MultiRotSliceLayer([l0, l0_45], part_size=45, include_flip=True)
l0r = layers.MultiRotSliceLayer([l0, l0_45], part_size=45, include_flip=False)
l0s = cc_layers.ShuffleBC01ToC01BLayer(l0r)
l1a = cc_layers.CudaConvnetConv2DLayer(l0s, n_filters=32, filter_size=6, weights_std=0.01, init_bias_value=0.1, dropout=0.0, partial_sum=1, untie_biases=True)
l1 = cc_layers.CudaConvnetPooling2DLayer(l1a, pool_size=2)
#l12 = cc_layers.CudaConvnetPooling2DLayer(l1a, pool_size=4)
#l3a = cc_layers.CudaConvnetConv2DLayer(l12, n_filters=64, filter_size=7, pad=0, weights_std=0.1, init_bias_value=0.1, dropout=0.0, partial_sum=1, untie_biases=True)
#l3 = cc_layers.CudaConvnetPooling2DLayer(l3a, pool_size=2)
l2a = cc_layers.CudaConvnetConv2DLayer(l1, n_filters=64, filter_size=5, weights_std=0.01, init_bias_value=0.1, dropout=0.0, partial_sum=1, untie_biases=True)
l2 = cc_layers.CudaConvnetPooling2DLayer(l2a, pool_size=2)
l3a = cc_layers.CudaConvnetConv2DLayer(l2, n_filters=128, filter_size=3, weights_std=0.01, init_bias_value=0.1, dropout=0.0, partial_sum=1, untie_biases=True)
l3b = cc_layers.CudaConvnetConv2DLayer(l3a, n_filters=128, filter_size=3, pad=0, weights_std=0.1, init_bias_value=0.1, dropout=0.0, partial_sum=1, untie_biases=True)
l3 = cc_layers.CudaConvnetPooling2DLayer(l3b, pool_size=2)
#l3s = cc_layers.ShuffleC01BToBC01Layer(l3)
l3s = cc_layers.ShuffleC01BToBC01Layer(l3)
j3 = layers.MultiRotMergeLayer(l3s, num_views=2) #4) # 2) # merge convolutional parts
l4a = layers.DenseLayer(j3, n_outputs=4096, weights_std=0.001, init_bias_value=0.01, dropout=0.5, nonlinearity=layers.identity)
#l4a = layers.DenseLayer(j3, n_outputs=4096, weights_std=0.001, init_bias_value=0.01)
l4b = layers.FeatureMaxPoolingLayer(l4a, pool_size=2, feature_dim=1, implementation='reshape')
#l4bc = layers.FeatureMaxPoolingLayer(l4a, pool_size=2, feature_dim=1, implementation='reshape')
l4c = layers.DenseLayer(l4b, n_outputs=4096, weights_std=0.001, init_bias_value=0.01, dropout=0.5, nonlinearity=layers.identity)
l4 = layers.FeatureMaxPoolingLayer(l4c, pool_size=2, feature_dim=1, implementation='reshape')
## l5 = layers.DenseLayer(l4, n_outputs=37, weights_std=0.01, init_bias_value=0.0, dropout=0.5, nonlinearity=custom.clip_01) # nonlinearity=layers.identity)
l5 = layers.DenseLayer(l4, n_outputs=37, weights_std=0.01, init_bias_value=0.1, dropout=0.5, nonlinearity=layers.identity)
#l5 = layers.DenseLayer(l4bc, n_outputs=37, weights_std=0.01, init_bias_value=0.1, nonlinearity=layers.identity)
## l6 = layers.OutputLayer(l5, error_measure='mse')
l6 = custom.OptimisedDivGalaxyOutputLayer(l5) # this incorporates the constraints on the output (probabilities sum to one, weighting, etc.)
if debug : print("output shapes: l0 %s , l0r %s , l1 %s , l3 %s , j3 %s , l4 %s , l5 %s " % ( l0.get_output_shape(), l0r.get_output_shape(), l1.get_output_shape(), l3.get_output_shape(), j3.get_output_shape(), l4.get_output_shape(), l5.get_output_shape()))
#train_loss_nonorm = l6.error(normalisation=False)
#train_loss = l6.error() # but compute and print this!
#valid_loss = l6.error(dropout_active=False)
train_loss_nonorm = l6.error(normalisation=False)
train_loss = l6.error() # but compute and print this!
valid_loss = l6.error(dropout_active=False)
all_parameters = layers.all_parameters(l6)
all_bias_parameters = layers.all_bias_parameters(l6)
xs_shared = [theano.shared(np.zeros((1,1,1,1), dtype=theano.config.floatX)) for _ in xrange(num_input_representations)]
y_shared = theano.shared(np.zeros((1,1), dtype=theano.config.floatX))
learning_rate = theano.shared(np.array(LEARNING_RATE_SCHEDULE[0], dtype=theano.config.floatX))
idx = T.lscalar('idx')
givens = {
l0.input_var: xs_shared[0][idx*BATCH_SIZE:(idx+1)*BATCH_SIZE],
l0_45.input_var: xs_shared[1][idx*BATCH_SIZE:(idx+1)*BATCH_SIZE],
l6.target_var: y_shared[idx*BATCH_SIZE:(idx+1)*BATCH_SIZE],
}
# updates = layers.gen_updates(train_loss, all_parameters, learning_rate=LEARNING_RATE, momentum=MOMENTUM, weight_decay=WEIGHT_DECAY)
updates_nonorm = layers.gen_updates_nesterov_momentum_no_bias_decay(train_loss_nonorm, all_parameters, all_bias_parameters, learning_rate=learning_rate, momentum=MOMENTUM, weight_decay=WEIGHT_DECAY)
updates = layers.gen_updates_nesterov_momentum_no_bias_decay(train_loss, all_parameters, all_bias_parameters, learning_rate=learning_rate, momentum=MOMENTUM, weight_decay=WEIGHT_DECAY)
train_nonorm = theano.function([idx], train_loss_nonorm, givens=givens, updates=updates_nonorm)
train_norm = theano.function([idx], train_loss, givens=givens, updates=updates)
compute_loss = theano.function([idx], valid_loss, givens=givens) # dropout_active=False
compute_output = theano.function([idx], l6.predictions(dropout_active=False), givens=givens, on_unused_input='ignore') # not using the labels, so theano complains
compute_features = theano.function([idx], l4.output(dropout_active=False), givens=givens, on_unused_input='ignore')
#compute_features = theano.function([idx], l4bc.output(dropout_active=False), givens=givens, on_unused_input='ignore')
print "Train model"
start_time = time.time()
prev_time = start_time
num_batches_valid = x_valid.shape[0] // BATCH_SIZE
losses_train = []
losses_valid = []
param_stds = []
for e in xrange(NUM_CHUNKS):
print "Chunk %d/%d" % (e + 1, NUM_CHUNKS)
chunk_data, chunk_length = train_gen.next()
y_chunk = chunk_data.pop() # last element is labels.
xs_chunk = chunk_data
# need to transpose the chunks to move the 'channels' dimension up
xs_chunk = [x_chunk.transpose(0, 3, 1, 2) for x_chunk in xs_chunk]
if e in LEARNING_RATE_SCHEDULE:
current_lr = LEARNING_RATE_SCHEDULE[e]
learning_rate.set_value(LEARNING_RATE_SCHEDULE[e])
print " setting learning rate to %.6f" % current_lr
# train without normalisation for the first # chunks.
if e >= NUM_CHUNKS_NONORM:
train = train_norm
else:
train = train_nonorm
print " load training data onto GPU"
for x_shared, x_chunk in zip(xs_shared, xs_chunk):
x_shared.set_value(x_chunk)
y_shared.set_value(y_chunk)
num_batches_chunk = x_chunk.shape[0] // BATCH_SIZE
# import pdb; pdb.set_trace()
print " batch SGD"
losses = []
for b in xrange(num_batches_chunk):
#if b % 1000 == 0:
#print " batch %d/%d" % (b + 1, num_batches_chunk)
loss = train(b)
losses.append(loss)
# print " loss: %.6f" % loss
mean_train_loss = np.sqrt(np.mean(losses))
print " mean training loss (RMSE):\t\t%.6f" % mean_train_loss
losses_train.append(mean_train_loss)
# store param stds during training
param_stds.append([p.std() for p in layers.get_param_values(l6)])
if ((e + 1) % VALIDATE_EVERY) == 0:
print
print "VALIDATING"
print " load validation data onto GPU"
for x_shared, x_valid in zip(xs_shared, xs_valid):
x_shared.set_value(x_valid)
y_shared.set_value(y_valid)
print " compute losses"
losses = []
for b in xrange(num_batches_valid):
# if b % 1000 == 0:
# print " batch %d/%d" % (b + 1, num_batches_valid)
loss = compute_loss(b)
losses.append(loss)
mean_valid_loss = np.sqrt(np.mean(losses))
print " mean validation loss (RMSE):\t\t%.6f" % mean_valid_loss
losses_valid.append(mean_valid_loss)
now = time.time()
time_since_start = now - start_time
time_since_prev = now - prev_time
prev_time = now
est_time_left = time_since_start * (float(NUM_CHUNKS - (e + 1)) / float(e + 1))
eta = datetime.now() + timedelta(seconds=est_time_left)
eta_str = eta.strftime("%c")
print " %s since start (%.2f s)" % (load_data.hms(time_since_start), time_since_prev)
print " estimated %s to go (ETA: %s)" % (load_data.hms(est_time_left), eta_str)
print
if ((e + 1) % VALIDATE_EVERY) == 0:
with open("trainingNmbrs.txt", 'a')as f:
f.write(" %s , %s , %s , %s \n" % (e+1,time_since_start, mean_train_loss,mean_valid_loss) )
del chunk_data, xs_chunk, x_chunk, y_chunk, xs_valid, x_valid # memory cleanup
print "Compute predictions on validation set for analysis in batches"
predictions_list = []
for b in xrange(num_batches_valid):
# if b % 1000 == 0:
# print " batch %d/%d" % (b + 1, num_batches_valid)
predictions = compute_output(b)
predictions_list.append(predictions)
all_predictions = np.vstack(predictions_list)
# postprocessing: clip all predictions to 0-1
all_predictions[all_predictions > 1] = 1.0
all_predictions[all_predictions < 0] = 0.0
print "Write validation set predictions to %s" % ANALYSIS_PATH
with open(ANALYSIS_PATH, 'w') as f:
pickle.dump({
'ids': valid_ids[:num_batches_valid * BATCH_SIZE], # note that we need to truncate the ids to a multiple of the batch size.
'predictions': all_predictions,
'targets': y_valid,
'mean_train_loss': mean_train_loss,
'mean_valid_loss': mean_valid_loss,
'time_since_start': time_since_start,
'losses_train': losses_train,
'losses_valid': losses_valid,
'param_values': layers.get_param_values(l6),
'param_stds': param_stds,
}, f, pickle.HIGHEST_PROTOCOL)
del predictions_list, all_predictions # memory cleanup
# print "Loading test data"
# x_test = load_data.load_gz(DATA_TEST_PATH)
# x2_test = load_data.load_gz(DATA2_TEST_PATH)
# test_ids = np.load("data/test_ids.npy")
# num_test = x_test.shape[0]
# x_test = x_test.transpose(0, 3, 1, 2) # move the colour dimension up.
# x2_test = x2_test.transpose(0, 3, 1, 2)
# create_test_gen = lambda: load_data.array_chunker_gen([x_test, x2_test], chunk_size=CHUNK_SIZE, loop=False, truncate=False, shuffle=False)
if not predict:
exit()
print "Computing predictions on test data"
predictions_list = []
for e, (xs_chunk, chunk_length) in enumerate(create_test_gen()):
print "Chunk %d" % (e + 1)
xs_chunk = [x_chunk.transpose(0, 3, 1, 2) for x_chunk in xs_chunk] # move the colour dimension up.
for x_shared, x_chunk in zip(xs_shared, xs_chunk):
x_shared.set_value(x_chunk)
num_batches_chunk = int(np.ceil(chunk_length / float(BATCH_SIZE))) # need to round UP this time to account for all data
# make predictions for testset, don't forget to cute off the zeros at the end
for b in xrange(num_batches_chunk):
# if b % 1000 == 0:
# print " batch %d/%d" % (b + 1, num_batches_chunk)
predictions = compute_output(b)
predictions_list.append(predictions)
all_predictions = np.vstack(predictions_list)
all_predictions = all_predictions[:num_test] # truncate back to the correct length
# postprocessing: clip all predictions to 0-1
all_predictions[all_predictions > 1] = 1.0
all_predictions[all_predictions < 0] = 0.0
print "Write predictions to %s" % TARGET_PATH
# test_ids = np.load("data/test_ids.npy")
with open(TARGET_PATH, 'wb') as csvfile:
writer = csv.writer(csvfile) # , delimiter=',', quoting=csv.QUOTE_MINIMAL)
# write header
writer.writerow(['GalaxyID', 'Class1.1', 'Class1.2', 'Class1.3', 'Class2.1', 'Class2.2', 'Class3.1', 'Class3.2', 'Class4.1', 'Class4.2', 'Class5.1', 'Class5.2', 'Class5.3', 'Class5.4', 'Class6.1', 'Class6.2', 'Class7.1', 'Class7.2', 'Class7.3', 'Class8.1', 'Class8.2', 'Class8.3', 'Class8.4', 'Class8.5', 'Class8.6', 'Class8.7', 'Class9.1', 'Class9.2', 'Class9.3', 'Class10.1', 'Class10.2', 'Class10.3', 'Class11.1', 'Class11.2', 'Class11.3', 'Class11.4', 'Class11.5', 'Class11.6'])
# write data
for k in xrange(test_ids.shape[0]):
row = [test_ids[k]] + all_predictions[k].tolist()
writer.writerow(row)
print "Gzipping..."
os.system("gzip -c %s > %s.gz" % (TARGET_PATH, TARGET_PATH))
del all_predictions, predictions_list, xs_chunk, x_chunk # memory cleanup
# # need to reload training data because it has been split and shuffled.
# # don't need to reload test data
# x_train = load_data.load_gz(DATA_TRAIN_PATH)
# x2_train = load_data.load_gz(DATA2_TRAIN_PATH)
# x_train = x_train.transpose(0, 3, 1, 2) # move the colour dimension up
# x2_train = x2_train.transpose(0, 3, 1, 2)
# train_gen_features = load_data.array_chunker_gen([x_train, x2_train], chunk_size=CHUNK_SIZE, loop=False, truncate=False, shuffle=False)
# test_gen_features = load_data.array_chunker_gen([x_test, x2_test], chunk_size=CHUNK_SIZE, loop=False, truncate=False, shuffle=False)
# for name, gen, num in zip(['train', 'test'], [train_gen_features, test_gen_features], [x_train.shape[0], x_test.shape[0]]):
# print "Extracting feature representations for all galaxies: %s" % name
# features_list = []
# for e, (xs_chunk, chunk_length) in enumerate(gen):
# print "Chunk %d" % (e + 1)
# x_chunk, x2_chunk = xs_chunk
# x_shared.set_value(x_chunk)
# x2_shared.set_value(x2_chunk)
# num_batches_chunk = int(np.ceil(chunk_length / float(BATCH_SIZE))) # need to round UP this time to account for all data
# # compute features for set, don't forget to cute off the zeros at the end
# for b in xrange(num_batches_chunk):
# if b % 1000 == 0:
# print " batch %d/%d" % (b + 1, num_batches_chunk)
# features = compute_features(b)
# features_list.append(features)
# all_features = np.vstack(features_list)
# all_features = all_features[:num] # truncate back to the correct length
# features_path = FEATURES_PATTERN % name
# print " write features to %s" % features_path
# np.save(features_path, all_features)
print "Done!"
| |
"""Functions to convert NetworkX graphs to and from other formats.
The preferred way of converting data to a NetworkX graph is through the
graph constuctor. The constructor calls the to_networkx_graph() function
which attempts to guess the input type and convert it automatically.
Examples
--------
Create a graph with a single edge from a dictionary of dictionaries
>>> d={0: {1: 1}} # dict-of-dicts single edge (0,1)
>>> G=nx.Graph(d)
See Also
--------
nx_agraph, nx_pydot
"""
# Copyright (C) 2006-2013 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
import warnings
import networkx as nx
__author__ = """\n""".join(['Aric Hagberg <aric.hagberg@gmail.com>',
'Pieter Swart (swart@lanl.gov)',
'Dan Schult(dschult@colgate.edu)'])
__all__ = ['to_networkx_graph',
'from_dict_of_dicts', 'to_dict_of_dicts',
'from_dict_of_lists', 'to_dict_of_lists',
'from_edgelist', 'to_edgelist']
def _prep_create_using(create_using):
"""Return a graph object ready to be populated.
If create_using is None return the default (just networkx.Graph())
If create_using.clear() works, assume it returns a graph object.
Otherwise raise an exception because create_using is not a networkx graph.
"""
if create_using is None:
return nx.Graph()
try:
create_using.clear()
except:
raise TypeError("Input graph is not a networkx graph type")
return create_using
def to_networkx_graph(data,create_using=None,multigraph_input=False):
"""Make a NetworkX graph from a known data structure.
The preferred way to call this is automatically
from the class constructor
>>> d={0: {1: {'weight':1}}} # dict-of-dicts single edge (0,1)
>>> G=nx.Graph(d)
instead of the equivalent
>>> G=nx.from_dict_of_dicts(d)
Parameters
----------
data : object to be converted
Current known types are:
any NetworkX graph
dict-of-dicts
dist-of-lists
list of edges
numpy matrix
numpy ndarray
scipy sparse matrix
pygraphviz agraph
create_using : NetworkX graph
Use specified graph for result. Otherwise a new graph is created.
multigraph_input : bool (default False)
If True and data is a dict_of_dicts,
try to create a multigraph assuming dict_of_dict_of_lists.
If data and create_using are both multigraphs then create
a multigraph from a multigraph.
"""
# NX graph
if hasattr(data,"adj"):
try:
result= from_dict_of_dicts(data.adj,\
create_using=create_using,\
multigraph_input=data.is_multigraph())
if hasattr(data,'graph'): # data.graph should be dict-like
result.graph.update(data.graph)
if hasattr(data,'node'): # data.node should be dict-like
result.node.update( (n,dd.copy()) for n,dd in data.node.items() )
return result
except:
raise nx.NetworkXError("Input is not a correct NetworkX graph.")
# pygraphviz agraph
if hasattr(data,"is_strict"):
try:
return nx.nx_agraph.from_agraph(data,create_using=create_using)
except:
raise nx.NetworkXError("Input is not a correct pygraphviz graph.")
# dict of dicts/lists
if isinstance(data,dict):
try:
return from_dict_of_dicts(data,create_using=create_using,\
multigraph_input=multigraph_input)
except:
try:
return from_dict_of_lists(data,create_using=create_using)
except:
raise TypeError("Input is not known type.")
# list or generator of edges
if (isinstance(data,list)
or isinstance(data,tuple)
or hasattr(data,'next')
or hasattr(data, '__next__')):
try:
return from_edgelist(data,create_using=create_using)
except:
raise nx.NetworkXError("Input is not a valid edge list")
# Pandas DataFrame
try:
import pandas as pd
if isinstance(data, pd.DataFrame):
try:
return nx.from_pandas_dataframe(data, create_using=create_using)
except:
msg = "Input is not a correct Pandas DataFrame."
raise nx.NetworkXError(msg)
except ImportError:
msg = 'pandas not found, skipping conversion test.'
warnings.warn(msg, ImportWarning)
# numpy matrix or ndarray
try:
import numpy
if isinstance(data,numpy.matrix) or \
isinstance(data,numpy.ndarray):
try:
return nx.from_numpy_matrix(data,create_using=create_using)
except:
raise nx.NetworkXError(\
"Input is not a correct numpy matrix or array.")
except ImportError:
warnings.warn('numpy not found, skipping conversion test.',
ImportWarning)
# scipy sparse matrix - any format
try:
import scipy
if hasattr(data,"format"):
try:
return nx.from_scipy_sparse_matrix(data,create_using=create_using)
except:
raise nx.NetworkXError(\
"Input is not a correct scipy sparse matrix type.")
except ImportError:
warnings.warn('scipy not found, skipping conversion test.',
ImportWarning)
raise nx.NetworkXError(\
"Input is not a known data type for conversion.")
return
def to_dict_of_lists(G,nodelist=None):
"""Return adjacency representation of graph as a dictionary of lists.
Parameters
----------
G : graph
A NetworkX graph
nodelist : list
Use only nodes specified in nodelist
Notes
-----
Completely ignores edge data for MultiGraph and MultiDiGraph.
"""
if nodelist is None:
nodelist=G
d = {}
for n in nodelist:
d[n]=[nbr for nbr in G.neighbors(n) if nbr in nodelist]
return d
def from_dict_of_lists(d,create_using=None):
"""Return a graph from a dictionary of lists.
Parameters
----------
d : dictionary of lists
A dictionary of lists adjacency representation.
create_using : NetworkX graph
Use specified graph for result. Otherwise a new graph is created.
Examples
--------
>>> dol= {0:[1]} # single edge (0,1)
>>> G=nx.from_dict_of_lists(dol)
or
>>> G=nx.Graph(dol) # use Graph constructor
"""
G=_prep_create_using(create_using)
G.add_nodes_from(d)
if G.is_multigraph() and not G.is_directed():
# a dict_of_lists can't show multiedges. BUT for undirected graphs,
# each edge shows up twice in the dict_of_lists.
# So we need to treat this case separately.
seen={}
for node,nbrlist in d.items():
for nbr in nbrlist:
if nbr not in seen:
G.add_edge(node,nbr)
seen[node]=1 # don't allow reverse edge to show up
else:
G.add_edges_from( ((node,nbr) for node,nbrlist in d.items()
for nbr in nbrlist) )
return G
def to_dict_of_dicts(G,nodelist=None,edge_data=None):
"""Return adjacency representation of graph as a dictionary of dictionaries.
Parameters
----------
G : graph
A NetworkX graph
nodelist : list
Use only nodes specified in nodelist
edge_data : list, optional
If provided, the value of the dictionary will be
set to edge_data for all edges. This is useful to make
an adjacency matrix type representation with 1 as the edge data.
If edgedata is None, the edgedata in G is used to fill the values.
If G is a multigraph, the edgedata is a dict for each pair (u,v).
"""
dod={}
if nodelist is None:
if edge_data is None:
for u,nbrdict in G.adjacency():
dod[u]=nbrdict.copy()
else: # edge_data is not None
for u,nbrdict in G.adjacency():
dod[u]=dod.fromkeys(nbrdict, edge_data)
else: # nodelist is not None
if edge_data is None:
for u in nodelist:
dod[u]={}
for v,data in ((v,data) for v,data in G[u].items() if v in nodelist):
dod[u][v]=data
else: # nodelist and edge_data are not None
for u in nodelist:
dod[u]={}
for v in ( v for v in G[u] if v in nodelist):
dod[u][v]=edge_data
return dod
def from_dict_of_dicts(d,create_using=None,multigraph_input=False):
"""Return a graph from a dictionary of dictionaries.
Parameters
----------
d : dictionary of dictionaries
A dictionary of dictionaries adjacency representation.
create_using : NetworkX graph
Use specified graph for result. Otherwise a new graph is created.
multigraph_input : bool (default False)
When True, the values of the inner dict are assumed
to be containers of edge data for multiple edges.
Otherwise this routine assumes the edge data are singletons.
Examples
--------
>>> dod= {0: {1:{'weight':1}}} # single edge (0,1)
>>> G=nx.from_dict_of_dicts(dod)
or
>>> G=nx.Graph(dod) # use Graph constructor
"""
G=_prep_create_using(create_using)
G.add_nodes_from(d)
# is dict a MultiGraph or MultiDiGraph?
if multigraph_input:
# make a copy of the list of edge data (but not the edge data)
if G.is_directed():
if G.is_multigraph():
G.add_edges_from( (u,v,key,data)
for u,nbrs in d.items()
for v,datadict in nbrs.items()
for key,data in datadict.items()
)
else:
G.add_edges_from( (u,v,data)
for u,nbrs in d.items()
for v,datadict in nbrs.items()
for key,data in datadict.items()
)
else: # Undirected
if G.is_multigraph():
seen=set() # don't add both directions of undirected graph
for u,nbrs in d.items():
for v,datadict in nbrs.items():
if (u,v) not in seen:
G.add_edges_from( (u,v,key,data)
for key,data in datadict.items()
)
seen.add((v,u))
else:
seen=set() # don't add both directions of undirected graph
for u,nbrs in d.items():
for v,datadict in nbrs.items():
if (u,v) not in seen:
G.add_edges_from( (u,v,data)
for key,data in datadict.items() )
seen.add((v,u))
else: # not a multigraph to multigraph transfer
if G.is_multigraph() and not G.is_directed():
# d can have both representations u-v, v-u in dict. Only add one.
# We don't need this check for digraphs since we add both directions,
# or for Graph() since it is done implicitly (parallel edges not allowed)
seen=set()
for u,nbrs in d.items():
for v,data in nbrs.items():
if (u,v) not in seen:
G.add_edge(u,v,key=0)
G[u][v][0].update(data)
seen.add((v,u))
else:
G.add_edges_from( ( (u,v,data)
for u,nbrs in d.items()
for v,data in nbrs.items()) )
return G
def to_edgelist(G,nodelist=None):
"""Return a list of edges in the graph.
Parameters
----------
G : graph
A NetworkX graph
nodelist : list
Use only nodes specified in nodelist
"""
if nodelist is None:
return G.edges(data=True)
else:
return G.edges(nodelist,data=True)
def from_edgelist(edgelist,create_using=None):
"""Return a graph from a list of edges.
Parameters
----------
edgelist : list or iterator
Edge tuples
create_using : NetworkX graph
Use specified graph for result. Otherwise a new graph is created.
Examples
--------
>>> edgelist= [(0,1)] # single edge (0,1)
>>> G=nx.from_edgelist(edgelist)
or
>>> G=nx.Graph(edgelist) # use Graph constructor
"""
G=_prep_create_using(create_using)
G.add_edges_from(edgelist)
return G
| |
#!/usr/bin/env python
#-*- coding: utf-8 -*-
# Copyright (c) 2006-2010 Tampere University of Technology
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import sys
import os
import time
launcher_path = os.path.realpath(sys.argv[0])
argv0 = sys.argv[0]
def executable_in_path(program):
def is_executable(filepath):
return os.path.exists(filepath) and os.access(filepath, os.X_OK)
for path in os.environ["PATH"].split(os.pathsep):
executable_file = os.path.join(path, program)
if is_executable(executable_file):
return executable_file
return None
def strip_path( full_path, remove_this = "tema" ):
(rest, last ) = os.path.split(full_path)
while last and last != remove_this :
# print rest, last, remove_this
# sys.stdout.flush()
# time.sleep(5)
(rest, last ) = os.path.split(rest)
return rest
tema_path = strip_path(launcher_path)
modelutils_path = os.path.join(strip_path(tema_path, "TemaLib"), "ModelUtils")
validation_path = os.path.join(strip_path(tema_path, "TemaLib"), "Validation")
mocksut_path = os.path.join(tema_path,"MockSUT")
man_path = os.path.join(os.path.join(strip_path(tema_path, "TemaLib"), "Docs"),"man")
if tema_path not in sys.path:
sys.path.reverse()
sys.path.append(tema_path)
sys.path.reverse()
command_set = dict()
command_set['testengine'] = "testengine.testengine"
command_set["mdm2svg"] = "eini.mdm2svg"
logtools = ["plotter","logreader","log2srt","sequencer"]
exec_commands = set([ "xsimulate", "simulate", "validate","analysator","runmodelpackage","help", "mocksut", "model2dot", "actionlist" ])
exec_commands.update(logtools)
modelutils_commands = set(["generatetaskswitcher","gt","rextendedrules","renamerules","composemodel","specialiser","generatetestconf"])
other_commands = set(["modelutils","engine_home","packagereader","ats4appmodel2lsts","variablemodelcreator","filterexpand","model2lsts","do_python","do_make"])
other_commands.update(modelutils_commands)
help_commands_exceptions = dict()
def print_usage(path,exec_commands,other_commands,command_set):
print >> sys.stdout, "Usage:", os.path.basename(path), "<command>"
print >> sys.stdout, ""
print >> sys.stdout, "Available commands:"
# Sort all commands and print them
commands = []
commands.extend(exec_commands)
commands.extend(other_commands)
commands.extend(command_set.keys())
commands.sort()
[ sys.stdout.write(" %s\n" % command ) for command in commands]
print >> sys.stdout, ""
print >> sys.stdout, "See 'tema help COMMAND' for more information on a specific command."
print >> sys.stdout, "Note that all commands don't have help pages."
if len(sys.argv) < 2 :
print_usage(argv0,exec_commands,other_commands,command_set)
raise SystemExit(1)
sys.argv[0:1]=[]
## print >> sys.stderr, "__".join(sys.argv)
try:
module= command_set[sys.argv[0]]
if sys.argv[0] == "model2lsts":
sys.argv[0] = "2lsts"
except KeyError:
if sys.argv[0] in exec_commands :
environment = os.environ
base_command = sys.argv[0]
exec_path = modelutils_path
if sys.argv[0] == "simulate" or sys.argv[0] == "xsimulate":
exec_path = os.path.join(validation_path, "simulation")
base_command = sys.argv[0] + ".py"
if sys.argv[0] == "model2dot":
exec_path = os.path.join(validation_path, "viewer")
base_command = sys.argv[0] + ".py"
if sys.argv[0] in ["runmodelpackage","actionlist"]:
exec_path = modelutils_path
base_command = sys.argv[0] + ".py"
if sys.argv[0] == "mocksut":
exec_path = mocksut_path
base_command = sys.argv[0] + ".py"
if sys.argv[0] == "help" :
environment['MANPATH'] = man_path
if len(sys.argv) == 1:
print_usage(argv0,exec_commands,other_commands,command_set)
raise SystemExit(1)
if not sys.argv[1].startswith("tema."):
command = "tema.%s" % (sys.argv[1])
else:
command = sys.argv[1]
if command in help_commands_exceptions:
sys.argv[1] = help_commands_exceptions[command]
else:
sys.argv[1] = command
base_command = "man"
for dir in os.environ.get('PATH', '').split(os.pathsep):
candidate = os.path.join(dir,base_command)
if os.path.isfile(candidate) and not os.path.isdir(candidate):
exec_path=dir
break
if sys.argv[0] in logtools:
exec_path = os.path.join(validation_path, "loghandling")
base_command = sys.argv[0] + ".py"
if sys.argv[0] == "validate" or sys.argv[0] == "analysator" :
exec_path = os.path.join(validation_path, "analysis")
base_command = sys.argv[0] + ".py"
exec_path = os.path.join(exec_path, base_command)
environment['PYTHONPATH'] = ":".join(sys.path)
# print >> sys.stderr, exec_path
try:
os.execve( exec_path, sys.argv, environment )
except Exception, e:
print e
print >> sys.stderr, exec_path
raise SystemExit(1)
elif sys.argv[0] in other_commands :
if sys.argv[0] == "modelutils" :
print modelutils_path
elif sys.argv[0] == "engine_home" :
print tema_path
elif sys.argv[0] == "filterexpand" :
environment = os.environ
environment['PYTHONPATH'] = ":".join(sys.path)
path = tema_path + "/tema/filter/filterexpand.py"
args = sys.argv
args[0] = path
os.execve( path, args, environment )
elif sys.argv[0] == "model2lsts" :
environment = os.environ
environment['PYTHONPATH'] = ":".join(sys.path)
path = tema_path + "/tema/model/model2lsts.py"
args = sys.argv
args[0] = path
os.execve( path, args, environment )
elif sys.argv[0] in ["do_python"]:
environment = os.environ
environment['PYTHONPATH'] = ":".join(sys.path)
path = sys.executable
args = [path]
os.execve(path,args,environment)
elif sys.argv[0] in ["do_make"]:
path = executable_in_path("gmake")
if not path:
path = executable_in_path("make")
environment = os.environ
environment['PYTHONPATH'] = ":".join(sys.path)
environment['TEMA_MODEL_TOOLS'] = modelutils_path
args = sys.argv
args[0] = path
os.execve(path,args,environment)
elif sys.argv[0] in modelutils_commands:
environment = os.environ
environment['PYTHONPATH'] = ":".join(sys.path)
path = tema_path + "/tema/modelutils/%s.py" % sys.argv[0]
args = sys.argv
args[0] = path
os.execve( path, args, environment )
elif sys.argv[0] == "packagereader" :
environment = os.environ
environment['PYTHONPATH'] = ":".join(sys.path)
path = tema_path + "/tema/packagereader/packagereader.py"
args = sys.argv
args[0] = path
os.execve( path, args, environment )
elif sys.argv[0] == "variablemodelcreator" :
environment = os.environ
environment['PYTHONPATH'] = ":".join(sys.path)
path = tema_path + "/tema/variablemodels/VariableModelCreator.py"
args = sys.argv
args[0] = path
os.execve( path, args, environment )
elif sys.argv[0] == "ats4appmodel2lsts" :
environment = os.environ
environment['PYTHONPATH'] = ":".join(sys.path)
path = tema_path + "/tema/ats4appmodel/ats4appmodel2lsts.py"
args = sys.argv
args[0] = path
os.execve( path, args, environment )
else:
print >> sys.stderr, "Command", sys.argv[0], "not found"
raise SystemExit(1)
raise SystemExit(0)
__import__( module, globals(), locals(), [''])
| |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import datetime
import pytest
import pyarrow as pa
@pytest.mark.gandiva
def test_tree_exp_builder():
import pyarrow.gandiva as gandiva
builder = gandiva.TreeExprBuilder()
field_a = pa.field('a', pa.int32())
field_b = pa.field('b', pa.int32())
schema = pa.schema([field_a, field_b])
field_result = pa.field('res', pa.int32())
node_a = builder.make_field(field_a)
node_b = builder.make_field(field_b)
assert node_a.return_type() == field_a.type
condition = builder.make_function("greater_than", [node_a, node_b],
pa.bool_())
if_node = builder.make_if(condition, node_a, node_b, pa.int32())
expr = builder.make_expression(if_node, field_result)
assert expr.result().type == pa.int32()
projector = gandiva.make_projector(
schema, [expr], pa.default_memory_pool())
# Gandiva generates compute kernel function named `@expr_X`
assert projector.llvm_ir.find("@expr_") != -1
a = pa.array([10, 12, -20, 5], type=pa.int32())
b = pa.array([5, 15, 15, 17], type=pa.int32())
e = pa.array([10, 15, 15, 17], type=pa.int32())
input_batch = pa.RecordBatch.from_arrays([a, b], names=['a', 'b'])
r, = projector.evaluate(input_batch)
assert r.equals(e)
@pytest.mark.gandiva
def test_table():
import pyarrow.gandiva as gandiva
table = pa.Table.from_arrays([pa.array([1.0, 2.0]), pa.array([3.0, 4.0])],
['a', 'b'])
builder = gandiva.TreeExprBuilder()
node_a = builder.make_field(table.schema.field("a"))
node_b = builder.make_field(table.schema.field("b"))
sum = builder.make_function("add", [node_a, node_b], pa.float64())
field_result = pa.field("c", pa.float64())
expr = builder.make_expression(sum, field_result)
projector = gandiva.make_projector(
table.schema, [expr], pa.default_memory_pool())
# TODO: Add .evaluate function which can take Tables instead of
# RecordBatches
r, = projector.evaluate(table.to_batches()[0])
e = pa.array([4.0, 6.0])
assert r.equals(e)
@pytest.mark.gandiva
def test_filter():
import pyarrow.gandiva as gandiva
table = pa.Table.from_arrays([pa.array([1.0 * i for i in range(10000)])],
['a'])
builder = gandiva.TreeExprBuilder()
node_a = builder.make_field(table.schema.field("a"))
thousand = builder.make_literal(1000.0, pa.float64())
cond = builder.make_function("less_than", [node_a, thousand], pa.bool_())
condition = builder.make_condition(cond)
assert condition.result().type == pa.bool_()
filter = gandiva.make_filter(table.schema, condition)
# Gandiva generates compute kernel function named `@expr_X`
assert filter.llvm_ir.find("@expr_") != -1
result = filter.evaluate(table.to_batches()[0], pa.default_memory_pool())
assert result.to_array().equals(pa.array(range(1000), type=pa.uint32()))
@pytest.mark.gandiva
def test_in_expr():
import pyarrow.gandiva as gandiva
arr = pa.array(["ga", "an", "nd", "di", "iv", "va"])
table = pa.Table.from_arrays([arr], ["a"])
# string
builder = gandiva.TreeExprBuilder()
node_a = builder.make_field(table.schema.field("a"))
cond = builder.make_in_expression(node_a, ["an", "nd"], pa.string())
condition = builder.make_condition(cond)
filter = gandiva.make_filter(table.schema, condition)
result = filter.evaluate(table.to_batches()[0], pa.default_memory_pool())
assert result.to_array().equals(pa.array([1, 2], type=pa.uint32()))
# int32
arr = pa.array([3, 1, 4, 1, 5, 9, 2, 6, 5, 4])
table = pa.Table.from_arrays([arr.cast(pa.int32())], ["a"])
node_a = builder.make_field(table.schema.field("a"))
cond = builder.make_in_expression(node_a, [1, 5], pa.int32())
condition = builder.make_condition(cond)
filter = gandiva.make_filter(table.schema, condition)
result = filter.evaluate(table.to_batches()[0], pa.default_memory_pool())
assert result.to_array().equals(pa.array([1, 3, 4, 8], type=pa.uint32()))
# int64
arr = pa.array([3, 1, 4, 1, 5, 9, 2, 6, 5, 4])
table = pa.Table.from_arrays([arr], ["a"])
node_a = builder.make_field(table.schema.field("a"))
cond = builder.make_in_expression(node_a, [1, 5], pa.int64())
condition = builder.make_condition(cond)
filter = gandiva.make_filter(table.schema, condition)
result = filter.evaluate(table.to_batches()[0], pa.default_memory_pool())
assert result.to_array().equals(pa.array([1, 3, 4, 8], type=pa.uint32()))
@pytest.mark.skip(reason="Gandiva C++ did not have *real* binary, "
"time and date support.")
def test_in_expr_todo():
import pyarrow.gandiva as gandiva
# TODO: Implement reasonable support for timestamp, time & date.
# Current exceptions:
# pyarrow.lib.ArrowException: ExpressionValidationError:
# Evaluation expression for IN clause returns XXXX values are of typeXXXX
# binary
arr = pa.array([b"ga", b"an", b"nd", b"di", b"iv", b"va"])
table = pa.Table.from_arrays([arr], ["a"])
builder = gandiva.TreeExprBuilder()
node_a = builder.make_field(table.schema.field("a"))
cond = builder.make_in_expression(node_a, [b'an', b'nd'], pa.binary())
condition = builder.make_condition(cond)
filter = gandiva.make_filter(table.schema, condition)
result = filter.evaluate(table.to_batches()[0], pa.default_memory_pool())
assert result.to_array().equals(pa.array([1, 2], type=pa.uint32()))
# timestamp
datetime_1 = datetime.datetime.utcfromtimestamp(1542238951.621877)
datetime_2 = datetime.datetime.utcfromtimestamp(1542238911.621877)
datetime_3 = datetime.datetime.utcfromtimestamp(1542238051.621877)
arr = pa.array([datetime_1, datetime_2, datetime_3])
table = pa.Table.from_arrays([arr], ["a"])
builder = gandiva.TreeExprBuilder()
node_a = builder.make_field(table.schema.field("a"))
cond = builder.make_in_expression(node_a, [datetime_2], pa.timestamp('ms'))
condition = builder.make_condition(cond)
filter = gandiva.make_filter(table.schema, condition)
result = filter.evaluate(table.to_batches()[0], pa.default_memory_pool())
assert list(result.to_array()) == [1]
# time
time_1 = datetime_1.time()
time_2 = datetime_2.time()
time_3 = datetime_3.time()
arr = pa.array([time_1, time_2, time_3])
table = pa.Table.from_arrays([arr], ["a"])
builder = gandiva.TreeExprBuilder()
node_a = builder.make_field(table.schema.field("a"))
cond = builder.make_in_expression(node_a, [time_2], pa.time64('ms'))
condition = builder.make_condition(cond)
filter = gandiva.make_filter(table.schema, condition)
result = filter.evaluate(table.to_batches()[0], pa.default_memory_pool())
assert list(result.to_array()) == [1]
# date
date_1 = datetime_1.date()
date_2 = datetime_2.date()
date_3 = datetime_3.date()
arr = pa.array([date_1, date_2, date_3])
table = pa.Table.from_arrays([arr], ["a"])
builder = gandiva.TreeExprBuilder()
node_a = builder.make_field(table.schema.field("a"))
cond = builder.make_in_expression(node_a, [date_2], pa.date32())
condition = builder.make_condition(cond)
filter = gandiva.make_filter(table.schema, condition)
result = filter.evaluate(table.to_batches()[0], pa.default_memory_pool())
assert list(result.to_array()) == [1]
@pytest.mark.gandiva
def test_boolean():
import pyarrow.gandiva as gandiva
table = pa.Table.from_arrays([
pa.array([1., 31., 46., 3., 57., 44., 22.]),
pa.array([5., 45., 36., 73., 83., 23., 76.])],
['a', 'b'])
builder = gandiva.TreeExprBuilder()
node_a = builder.make_field(table.schema.field("a"))
node_b = builder.make_field(table.schema.field("b"))
fifty = builder.make_literal(50.0, pa.float64())
eleven = builder.make_literal(11.0, pa.float64())
cond_1 = builder.make_function("less_than", [node_a, fifty], pa.bool_())
cond_2 = builder.make_function("greater_than", [node_a, node_b],
pa.bool_())
cond_3 = builder.make_function("less_than", [node_b, eleven], pa.bool_())
cond = builder.make_or([builder.make_and([cond_1, cond_2]), cond_3])
condition = builder.make_condition(cond)
filter = gandiva.make_filter(table.schema, condition)
result = filter.evaluate(table.to_batches()[0], pa.default_memory_pool())
assert result.to_array().equals(pa.array([0, 2, 5], type=pa.uint32()))
@pytest.mark.gandiva
def test_literals():
import pyarrow.gandiva as gandiva
builder = gandiva.TreeExprBuilder()
builder.make_literal(True, pa.bool_())
builder.make_literal(0, pa.uint8())
builder.make_literal(1, pa.uint16())
builder.make_literal(2, pa.uint32())
builder.make_literal(3, pa.uint64())
builder.make_literal(4, pa.int8())
builder.make_literal(5, pa.int16())
builder.make_literal(6, pa.int32())
builder.make_literal(7, pa.int64())
builder.make_literal(8.0, pa.float32())
builder.make_literal(9.0, pa.float64())
builder.make_literal("hello", pa.string())
builder.make_literal(b"world", pa.binary())
builder.make_literal(True, "bool")
builder.make_literal(0, "uint8")
builder.make_literal(1, "uint16")
builder.make_literal(2, "uint32")
builder.make_literal(3, "uint64")
builder.make_literal(4, "int8")
builder.make_literal(5, "int16")
builder.make_literal(6, "int32")
builder.make_literal(7, "int64")
builder.make_literal(8.0, "float32")
builder.make_literal(9.0, "float64")
builder.make_literal("hello", "string")
builder.make_literal(b"world", "binary")
with pytest.raises(TypeError):
builder.make_literal("hello", pa.int64())
with pytest.raises(TypeError):
builder.make_literal(True, None)
@pytest.mark.gandiva
def test_regex():
import pyarrow.gandiva as gandiva
elements = ["park", "sparkle", "bright spark and fire", "spark"]
data = pa.array(elements, type=pa.string())
table = pa.Table.from_arrays([data], names=['a'])
builder = gandiva.TreeExprBuilder()
node_a = builder.make_field(table.schema.field("a"))
regex = builder.make_literal("%spark%", pa.string())
like = builder.make_function("like", [node_a, regex], pa.bool_())
field_result = pa.field("b", pa.bool_())
expr = builder.make_expression(like, field_result)
projector = gandiva.make_projector(
table.schema, [expr], pa.default_memory_pool())
r, = projector.evaluate(table.to_batches()[0])
b = pa.array([False, True, True, True], type=pa.bool_())
assert r.equals(b)
@pytest.mark.gandiva
def test_get_registered_function_signatures():
import pyarrow.gandiva as gandiva
signatures = gandiva.get_registered_function_signatures()
assert type(signatures[0].return_type()) is pa.DataType
assert type(signatures[0].param_types()) is list
assert hasattr(signatures[0], "name")
@pytest.mark.gandiva
def test_filter_project():
import pyarrow.gandiva as gandiva
mpool = pa.default_memory_pool()
# Create a table with some sample data
array0 = pa.array([10, 12, -20, 5, 21, 29], pa.int32())
array1 = pa.array([5, 15, 15, 17, 12, 3], pa.int32())
array2 = pa.array([1, 25, 11, 30, -21, None], pa.int32())
table = pa.Table.from_arrays([array0, array1, array2], ['a', 'b', 'c'])
field_result = pa.field("res", pa.int32())
builder = gandiva.TreeExprBuilder()
node_a = builder.make_field(table.schema.field("a"))
node_b = builder.make_field(table.schema.field("b"))
node_c = builder.make_field(table.schema.field("c"))
greater_than_function = builder.make_function("greater_than",
[node_a, node_b], pa.bool_())
filter_condition = builder.make_condition(
greater_than_function)
project_condition = builder.make_function("less_than",
[node_b, node_c], pa.bool_())
if_node = builder.make_if(project_condition,
node_b, node_c, pa.int32())
expr = builder.make_expression(if_node, field_result)
# Build a filter for the expressions.
filter = gandiva.make_filter(table.schema, filter_condition)
# Build a projector for the expressions.
projector = gandiva.make_projector(
table.schema, [expr], mpool, "UINT32")
# Evaluate filter
selection_vector = filter.evaluate(table.to_batches()[0], mpool)
# Evaluate project
r, = projector.evaluate(
table.to_batches()[0], selection_vector)
exp = pa.array([1, -21, None], pa.int32())
assert r.equals(exp)
@pytest.mark.gandiva
def test_to_string():
import pyarrow.gandiva as gandiva
builder = gandiva.TreeExprBuilder()
assert str(builder.make_literal(2.0, pa.float64())
).startswith('(const double) 2 raw(')
assert str(builder.make_literal(2, pa.int64())) == '(const int64) 2'
assert str(builder.make_field(pa.field('x', pa.float64()))) == '(double) x'
assert str(builder.make_field(pa.field('y', pa.string()))) == '(string) y'
field_z = builder.make_field(pa.field('z', pa.bool_()))
func_node = builder.make_function('not', [field_z], pa.bool_())
assert str(func_node) == 'bool not((bool) z)'
field_y = builder.make_field(pa.field('y', pa.bool_()))
and_node = builder.make_and([func_node, field_y])
assert str(and_node) == 'bool not((bool) z) && (bool) y'
| |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
from barbican.common import config
from barbican.common import utils
from barbican.model import models
from barbican.model import repositories
from barbican.plugin.crypto import crypto
from barbican.plugin.crypto import manager
from barbican.plugin.interface import secret_store as sstore
CONF = config.new_config()
config.parse_args(CONF)
class StoreCryptoContext(object):
"""Context for crypto-adapter secret store plugins.
This context object allows access to core Barbican resources such as
datastore models.
"""
def __init__(
self,
project_model,
secret_model=None,
private_secret_model=None,
public_secret_model=None,
passphrase_secret_model=None,
content_type=None):
self.secret_model = secret_model
self.private_secret_model = private_secret_model
self.public_secret_model = public_secret_model
self.passphrase_secret_model = passphrase_secret_model
self.project_model = project_model
self.content_type = content_type
class StoreCryptoAdapterPlugin(object):
"""Secret store plugin adapting to 'crypto' devices as backend.
HSM-style 'crypto' devices perform encryption/decryption processing but
do not actually store the encrypted information, unlike other 'secret
store' plugins that do provide storage. Hence, this adapter bridges
between these two plugin styles, providing Barbican persistence services
as needed to store information.
Note that this class does not inherit from SecretStoreBase, as it also
requires access to lower-level datastore entities such as KEKDatum. This
additional information is passed in via the 'context' parameter.
"""
def __init__(self):
super(StoreCryptoAdapterPlugin, self).__init__()
def store_secret(self, secret_dto, context):
"""Store a secret.
:param secret_dto: SecretDTO for secret
:param context: StoreCryptoContext for secret
:returns: an optional dictionary containing metadata about the secret
"""
# Find HSM-style 'crypto' plugin.
encrypting_plugin = manager.get_manager().get_plugin_store_generate(
crypto.PluginSupportTypes.ENCRYPT_DECRYPT
)
# Find or create a key encryption key metadata.
kek_datum_model, kek_meta_dto = _find_or_create_kek_objects(
encrypting_plugin, context.project_model)
# Secrets are base64 encoded before being passed to the secret stores.
secret_bytes = base64.b64decode(secret_dto.secret)
encrypt_dto = crypto.EncryptDTO(secret_bytes)
# Enhance the context with content_type, This is needed to build
# datum_model to store
if not context.content_type:
context.content_type = secret_dto.content_type
# Create an encrypted datum instance and add the encrypted cyphertext.
response_dto = encrypting_plugin.encrypt(
encrypt_dto, kek_meta_dto, context.project_model.external_id
)
# Convert binary data into a text-based format.
_store_secret_and_datum(
context, context.secret_model, kek_datum_model, response_dto)
return None
def get_secret(self, secret_type, metadata, context):
"""Retrieve a secret.
:param secret_type: secret type
:param metadata: secret metadata
:param context: StoreCryptoContext for secret
:returns: SecretDTO that contains secret
"""
if (not context.secret_model or
not context.secret_model.encrypted_data):
raise sstore.SecretNotFoundException()
# TODO(john-wood-w) Need to revisit 1 to many datum relationship.
datum_model = context.secret_model.encrypted_data[0]
# Find HSM-style 'crypto' plugin.
decrypting_plugin = manager.get_manager().get_plugin_retrieve(
datum_model.kek_meta_project.plugin_name)
# wrap the KEKDatum instance in our DTO
kek_meta_dto = crypto.KEKMetaDTO(datum_model.kek_meta_project)
# Convert from text-based storage format to binary.
encrypted = base64.b64decode(datum_model.cypher_text)
decrypt_dto = crypto.DecryptDTO(encrypted)
# Decrypt the secret.
secret = decrypting_plugin.decrypt(decrypt_dto,
kek_meta_dto,
datum_model.kek_meta_extended,
context.project_model.external_id)
secret = base64.b64encode(secret)
key_spec = sstore.KeySpec(alg=context.secret_model.algorithm,
bit_length=context.secret_model.bit_length,
mode=context.secret_model.mode)
return sstore.SecretDTO(secret_type,
secret, key_spec,
datum_model.content_type)
def delete_secret(self, secret_metadata):
"""Delete a secret."""
pass
def generate_symmetric_key(self, key_spec, context):
"""Generate a symmetric key.
:param key_spec: KeySpec that contains details on the type of key to
generate
:param context: StoreCryptoContext for secret
:returns: a dictionary that contains metadata about the key
"""
# Find HSM-style 'crypto' plugin.
plugin_type = _determine_generation_type(key_spec.alg)
if crypto.PluginSupportTypes.SYMMETRIC_KEY_GENERATION != plugin_type:
raise sstore.SecretAlgorithmNotSupportedException(key_spec.alg)
generating_plugin = manager.get_manager().get_plugin_store_generate(
plugin_type,
key_spec.alg,
key_spec.bit_length,
key_spec.mode)
# Find or create a key encryption key metadata.
kek_datum_model, kek_meta_dto = _find_or_create_kek_objects(
generating_plugin, context.project_model)
# Create an encrypted datum instance and add the created cypher text.
generate_dto = crypto.GenerateDTO(key_spec.alg,
key_spec.bit_length,
key_spec.mode, None)
# Create the encrypted meta.
response_dto = generating_plugin.generate_symmetric(
generate_dto, kek_meta_dto, context.project_model.external_id)
# Convert binary data into a text-based format.
_store_secret_and_datum(
context, context.secret_model, kek_datum_model, response_dto)
return None
def generate_asymmetric_key(self, key_spec, context):
"""Generates an asymmetric key.
Returns a AsymmetricKeyMetadataDTO object containing
metadata(s) for asymmetric key components. The metadata
can be used to retrieve individual components of
asymmetric key pair.
"""
plugin_type = _determine_generation_type(key_spec.alg)
if crypto.PluginSupportTypes.ASYMMETRIC_KEY_GENERATION != plugin_type:
raise sstore.SecretAlgorithmNotSupportedException(key_spec.alg)
generating_plugin = manager.get_manager().get_plugin_store_generate(
plugin_type, key_spec.alg, key_spec.bit_length, None)
# Find or create a key encryption key metadata.
kek_datum_model, kek_meta_dto = _find_or_create_kek_objects(
generating_plugin, context.project_model)
generate_dto = crypto.GenerateDTO(key_spec.alg,
key_spec.bit_length,
None, key_spec.passphrase)
# Create the encrypted meta.
private_key_dto, public_key_dto, passwd_dto = (
generating_plugin.generate_asymmetric(
generate_dto, kek_meta_dto, context.project_model.external_id
)
)
_store_secret_and_datum(
context,
context.private_secret_model,
kek_datum_model,
private_key_dto)
_store_secret_and_datum(
context,
context.public_secret_model,
kek_datum_model,
public_key_dto)
if key_spec.passphrase and passwd_dto:
_store_secret_and_datum(
context,
context.passphrase_secret_model,
kek_datum_model,
passwd_dto)
return sstore.AsymmetricKeyMetadataDTO()
def generate_supports(self, key_spec):
"""Key generation supported?
Specifies whether the plugin supports key generation with the
given key_spec.
"""
return (key_spec and
(key_spec.alg.lower() in
sstore.KeyAlgorithm.ASYMMETRIC_ALGORITHMS
or key_spec.alg.lower() in
sstore.KeyAlgorithm.SYMMETRIC_ALGORITHMS))
def store_secret_supports(self, key_spec):
"""Key storage supported?
Specifies whether the plugin supports storage of the secret given
the attributes included in the KeySpec
"""
return True
def _determine_generation_type(algorithm):
"""Determines the type based on algorithm."""
if not algorithm:
raise sstore.SecretAlgorithmNotSupportedException(algorithm)
symmetric_algs = crypto.PluginSupportTypes.SYMMETRIC_ALGORITHMS
asymmetric_algs = crypto.PluginSupportTypes.ASYMMETRIC_ALGORITHMS
if algorithm.lower() in symmetric_algs:
return crypto.PluginSupportTypes.SYMMETRIC_KEY_GENERATION
elif algorithm.lower() in asymmetric_algs:
return crypto.PluginSupportTypes.ASYMMETRIC_KEY_GENERATION
else:
raise sstore.SecretAlgorithmNotSupportedException(algorithm)
def _find_or_create_kek_objects(plugin_inst, project_model):
kek_repo = repositories.get_kek_datum_repository()
# Find or create a key encryption key.
full_plugin_name = utils.generate_fullname_for(plugin_inst)
kek_datum_model = kek_repo.find_or_create_kek_datum(project_model,
full_plugin_name)
# Bind to the plugin's key management.
# TODO(jwood): Does this need to be in a critical section? Should the
# bind operation just be declared idempotent in the plugin contract?
kek_meta_dto = crypto.KEKMetaDTO(kek_datum_model)
if not kek_datum_model.bind_completed:
kek_meta_dto = plugin_inst.bind_kek_metadata(kek_meta_dto)
# By contract, enforce that plugins return a
# (typically modified) DTO.
if kek_meta_dto is None:
raise crypto.CryptoKEKBindingException(full_plugin_name)
_indicate_bind_completed(kek_meta_dto, kek_datum_model)
kek_repo.save(kek_datum_model)
return kek_datum_model, kek_meta_dto
def _store_secret_and_datum(
context, secret_model, kek_datum_model, generated_dto):
# Create Secret entities in data store.
if not secret_model.id:
secret_model.project_id = context.project_model.id
repositories.get_secret_repository().create_from(secret_model)
# setup and store encrypted datum
datum_model = models.EncryptedDatum(secret_model, kek_datum_model)
datum_model.content_type = context.content_type
datum_model.cypher_text = base64.b64encode(generated_dto.cypher_text)
datum_model.kek_meta_extended = generated_dto.kek_meta_extended
datum_model.secret_id = secret_model.id
repositories.get_encrypted_datum_repository().create_from(
datum_model)
def _indicate_bind_completed(kek_meta_dto, kek_datum):
"""Updates the supplied kek_datum instance
Updates the the kek_datum per the contents of the supplied
kek_meta_dto instance. This function is typically used once plugins
have had a chance to bind kek_meta_dto to their crypto systems.
:param kek_meta_dto:
:param kek_datum:
:return: None
"""
kek_datum.bind_completed = True
kek_datum.algorithm = kek_meta_dto.algorithm
kek_datum.bit_length = kek_meta_dto.bit_length
kek_datum.mode = kek_meta_dto.mode
kek_datum.plugin_meta = kek_meta_dto.plugin_meta
| |
"""
Support for the Xiaomi vacuum cleaner robot.
For more details about this platform, please refer to the documentation
https://home-assistant.io/components/vacuum.xiaomi_miio/
"""
import asyncio
from functools import partial
import logging
import voluptuous as vol
from homeassistant.components.vacuum import (
ATTR_CLEANED_AREA, DOMAIN, PLATFORM_SCHEMA, SUPPORT_BATTERY,
SUPPORT_CLEAN_SPOT, SUPPORT_FAN_SPEED, SUPPORT_LOCATE, SUPPORT_PAUSE,
SUPPORT_RETURN_HOME, SUPPORT_SEND_COMMAND, SUPPORT_STOP,
SUPPORT_STATE, SUPPORT_START, VACUUM_SERVICE_SCHEMA, StateVacuumDevice,
STATE_CLEANING, STATE_DOCKED, STATE_PAUSED, STATE_IDLE, STATE_RETURNING,
STATE_ERROR)
from homeassistant.const import (
ATTR_ENTITY_ID, CONF_HOST, CONF_NAME, CONF_TOKEN, STATE_OFF, STATE_ON)
import homeassistant.helpers.config_validation as cv
REQUIREMENTS = ['python-miio==0.4.1', 'construct==2.9.41']
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = 'Xiaomi Vacuum cleaner'
DATA_KEY = 'vacuum.xiaomi_miio'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_TOKEN): vol.All(str, vol.Length(min=32, max=32)),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
}, extra=vol.ALLOW_EXTRA)
SERVICE_MOVE_REMOTE_CONTROL = 'xiaomi_remote_control_move'
SERVICE_MOVE_REMOTE_CONTROL_STEP = 'xiaomi_remote_control_move_step'
SERVICE_START_REMOTE_CONTROL = 'xiaomi_remote_control_start'
SERVICE_STOP_REMOTE_CONTROL = 'xiaomi_remote_control_stop'
FAN_SPEEDS = {
'Quiet': 38,
'Balanced': 60,
'Turbo': 77,
'Max': 90}
ATTR_CLEANING_TIME = 'cleaning_time'
ATTR_DO_NOT_DISTURB = 'do_not_disturb'
ATTR_DO_NOT_DISTURB_START = 'do_not_disturb_start'
ATTR_DO_NOT_DISTURB_END = 'do_not_disturb_end'
ATTR_MAIN_BRUSH_LEFT = 'main_brush_left'
ATTR_SIDE_BRUSH_LEFT = 'side_brush_left'
ATTR_FILTER_LEFT = 'filter_left'
ATTR_SENSOR_DIRTY_LEFT = 'sensor_dirty_left'
ATTR_CLEANING_COUNT = 'cleaning_count'
ATTR_CLEANED_TOTAL_AREA = 'total_cleaned_area'
ATTR_CLEANING_TOTAL_TIME = 'total_cleaning_time'
ATTR_ERROR = 'error'
ATTR_RC_DURATION = 'duration'
ATTR_RC_ROTATION = 'rotation'
ATTR_RC_VELOCITY = 'velocity'
ATTR_STATUS = 'status'
SERVICE_SCHEMA_REMOTE_CONTROL = VACUUM_SERVICE_SCHEMA.extend({
vol.Optional(ATTR_RC_VELOCITY):
vol.All(vol.Coerce(float), vol.Clamp(min=-0.29, max=0.29)),
vol.Optional(ATTR_RC_ROTATION):
vol.All(vol.Coerce(int), vol.Clamp(min=-179, max=179)),
vol.Optional(ATTR_RC_DURATION): cv.positive_int,
})
SERVICE_TO_METHOD = {
SERVICE_START_REMOTE_CONTROL: {'method': 'async_remote_control_start'},
SERVICE_STOP_REMOTE_CONTROL: {'method': 'async_remote_control_stop'},
SERVICE_MOVE_REMOTE_CONTROL: {
'method': 'async_remote_control_move',
'schema': SERVICE_SCHEMA_REMOTE_CONTROL},
SERVICE_MOVE_REMOTE_CONTROL_STEP: {
'method': 'async_remote_control_move_step',
'schema': SERVICE_SCHEMA_REMOTE_CONTROL},
}
SUPPORT_XIAOMI = SUPPORT_STATE | SUPPORT_PAUSE | \
SUPPORT_STOP | SUPPORT_RETURN_HOME | SUPPORT_FAN_SPEED | \
SUPPORT_SEND_COMMAND | SUPPORT_LOCATE | \
SUPPORT_BATTERY | SUPPORT_CLEAN_SPOT | SUPPORT_START
STATE_CODE_TO_STATE = {
2: STATE_IDLE,
3: STATE_IDLE,
5: STATE_CLEANING,
6: STATE_RETURNING,
8: STATE_DOCKED,
9: STATE_ERROR,
10: STATE_PAUSED,
11: STATE_CLEANING,
12: STATE_ERROR,
15: STATE_RETURNING,
16: STATE_CLEANING,
17: STATE_CLEANING,
}
@asyncio.coroutine
def async_setup_platform(hass, config, async_add_entities,
discovery_info=None):
"""Set up the Xiaomi vacuum cleaner robot platform."""
from miio import Vacuum
if DATA_KEY not in hass.data:
hass.data[DATA_KEY] = {}
host = config.get(CONF_HOST)
name = config.get(CONF_NAME)
token = config.get(CONF_TOKEN)
# Create handler
_LOGGER.info("Initializing with host %s (token %s...)", host, token[:5])
vacuum = Vacuum(host, token)
mirobo = MiroboVacuum(name, vacuum)
hass.data[DATA_KEY][host] = mirobo
async_add_entities([mirobo], update_before_add=True)
@asyncio.coroutine
def async_service_handler(service):
"""Map services to methods on MiroboVacuum."""
method = SERVICE_TO_METHOD.get(service.service)
params = {key: value for key, value in service.data.items()
if key != ATTR_ENTITY_ID}
entity_ids = service.data.get(ATTR_ENTITY_ID)
if entity_ids:
target_vacuums = [vac for vac in hass.data[DATA_KEY].values()
if vac.entity_id in entity_ids]
else:
target_vacuums = hass.data[DATA_KEY].values()
update_tasks = []
for vacuum in target_vacuums:
yield from getattr(vacuum, method['method'])(**params)
for vacuum in target_vacuums:
update_coro = vacuum.async_update_ha_state(True)
update_tasks.append(update_coro)
if update_tasks:
yield from asyncio.wait(update_tasks, loop=hass.loop)
for vacuum_service in SERVICE_TO_METHOD:
schema = SERVICE_TO_METHOD[vacuum_service].get(
'schema', VACUUM_SERVICE_SCHEMA)
hass.services.async_register(
DOMAIN, vacuum_service, async_service_handler,
schema=schema)
class MiroboVacuum(StateVacuumDevice):
"""Representation of a Xiaomi Vacuum cleaner robot."""
def __init__(self, name, vacuum):
"""Initialize the Xiaomi vacuum cleaner robot handler."""
self._name = name
self._vacuum = vacuum
self.vacuum_state = None
self._available = False
self.consumable_state = None
self.clean_history = None
self.dnd_state = None
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def state(self):
"""Return the status of the vacuum cleaner."""
if self.vacuum_state is not None:
try:
return STATE_CODE_TO_STATE[int(self.vacuum_state.state_code)]
except KeyError:
_LOGGER.error("STATE not supported: %s, state_code: %s",
self.vacuum_state.state,
self.vacuum_state.state_code)
return None
@property
def battery_level(self):
"""Return the battery level of the vacuum cleaner."""
if self.vacuum_state is not None:
return self.vacuum_state.battery
@property
def fan_speed(self):
"""Return the fan speed of the vacuum cleaner."""
if self.vacuum_state is not None:
speed = self.vacuum_state.fanspeed
if speed in FAN_SPEEDS.values():
return [key for key, value in FAN_SPEEDS.items()
if value == speed][0]
return speed
@property
def fan_speed_list(self):
"""Get the list of available fan speed steps of the vacuum cleaner."""
return list(sorted(FAN_SPEEDS.keys(), key=lambda s: FAN_SPEEDS[s]))
@property
def device_state_attributes(self):
"""Return the specific state attributes of this vacuum cleaner."""
attrs = {}
if self.vacuum_state is not None:
attrs.update({
ATTR_DO_NOT_DISTURB:
STATE_ON if self.dnd_state.enabled else STATE_OFF,
ATTR_DO_NOT_DISTURB_START: str(self.dnd_state.start),
ATTR_DO_NOT_DISTURB_END: str(self.dnd_state.end),
# Not working --> 'Cleaning mode':
# STATE_ON if self.vacuum_state.in_cleaning else STATE_OFF,
ATTR_CLEANING_TIME: int(
self.vacuum_state.clean_time.total_seconds()
/ 60),
ATTR_CLEANED_AREA: int(self.vacuum_state.clean_area),
ATTR_CLEANING_COUNT: int(self.clean_history.count),
ATTR_CLEANED_TOTAL_AREA: int(self.clean_history.total_area),
ATTR_CLEANING_TOTAL_TIME: int(
self.clean_history.total_duration.total_seconds()
/ 60),
ATTR_MAIN_BRUSH_LEFT: int(
self.consumable_state.main_brush_left.total_seconds()
/ 3600),
ATTR_SIDE_BRUSH_LEFT: int(
self.consumable_state.side_brush_left.total_seconds()
/ 3600),
ATTR_FILTER_LEFT: int(
self.consumable_state.filter_left.total_seconds()
/ 3600),
ATTR_SENSOR_DIRTY_LEFT: int(
self.consumable_state.sensor_dirty_left.total_seconds()
/ 3600),
ATTR_STATUS: str(self.vacuum_state.state)
})
if self.vacuum_state.got_error:
attrs[ATTR_ERROR] = self.vacuum_state.error
return attrs
@property
def available(self) -> bool:
"""Return True if entity is available."""
return self._available
@property
def supported_features(self):
"""Flag vacuum cleaner robot features that are supported."""
return SUPPORT_XIAOMI
@asyncio.coroutine
def _try_command(self, mask_error, func, *args, **kwargs):
"""Call a vacuum command handling error messages."""
from miio import DeviceException
try:
yield from self.hass.async_add_job(partial(func, *args, **kwargs))
return True
except DeviceException as exc:
_LOGGER.error(mask_error, exc)
return False
async def async_start(self):
"""Start or resume the cleaning task."""
await self._try_command(
"Unable to start the vacuum: %s", self._vacuum.start)
async def async_pause(self):
"""Pause the cleaning task."""
if self.state == STATE_CLEANING:
await self._try_command(
"Unable to set start/pause: %s", self._vacuum.pause)
@asyncio.coroutine
def async_stop(self, **kwargs):
"""Stop the vacuum cleaner."""
yield from self._try_command(
"Unable to stop: %s", self._vacuum.stop)
@asyncio.coroutine
def async_set_fan_speed(self, fan_speed, **kwargs):
"""Set fan speed."""
if fan_speed.capitalize() in FAN_SPEEDS:
fan_speed = FAN_SPEEDS[fan_speed.capitalize()]
else:
try:
fan_speed = int(fan_speed)
except ValueError as exc:
_LOGGER.error("Fan speed step not recognized (%s). "
"Valid speeds are: %s", exc,
self.fan_speed_list)
return
yield from self._try_command(
"Unable to set fan speed: %s",
self._vacuum.set_fan_speed, fan_speed)
@asyncio.coroutine
def async_return_to_base(self, **kwargs):
"""Set the vacuum cleaner to return to the dock."""
yield from self._try_command(
"Unable to return home: %s", self._vacuum.home)
@asyncio.coroutine
def async_clean_spot(self, **kwargs):
"""Perform a spot clean-up."""
yield from self._try_command(
"Unable to start the vacuum for a spot clean-up: %s",
self._vacuum.spot)
@asyncio.coroutine
def async_locate(self, **kwargs):
"""Locate the vacuum cleaner."""
yield from self._try_command(
"Unable to locate the botvac: %s", self._vacuum.find)
@asyncio.coroutine
def async_send_command(self, command, params=None, **kwargs):
"""Send raw command."""
yield from self._try_command(
"Unable to send command to the vacuum: %s",
self._vacuum.raw_command, command, params)
@asyncio.coroutine
def async_remote_control_start(self):
"""Start remote control mode."""
yield from self._try_command(
"Unable to start remote control the vacuum: %s",
self._vacuum.manual_start)
@asyncio.coroutine
def async_remote_control_stop(self):
"""Stop remote control mode."""
yield from self._try_command(
"Unable to stop remote control the vacuum: %s",
self._vacuum.manual_stop)
@asyncio.coroutine
def async_remote_control_move(self,
rotation: int = 0,
velocity: float = 0.3,
duration: int = 1500):
"""Move vacuum with remote control mode."""
yield from self._try_command(
"Unable to move with remote control the vacuum: %s",
self._vacuum.manual_control,
velocity=velocity, rotation=rotation, duration=duration)
@asyncio.coroutine
def async_remote_control_move_step(self,
rotation: int = 0,
velocity: float = 0.2,
duration: int = 1500):
"""Move vacuum one step with remote control mode."""
yield from self._try_command(
"Unable to remote control the vacuum: %s",
self._vacuum.manual_control_once,
velocity=velocity, rotation=rotation, duration=duration)
def update(self):
"""Fetch state from the device."""
from miio import DeviceException
try:
state = self._vacuum.status()
self.vacuum_state = state
self.consumable_state = self._vacuum.consumable_status()
self.clean_history = self._vacuum.clean_history()
self.dnd_state = self._vacuum.dnd_status()
self._available = True
except OSError as exc:
_LOGGER.error("Got OSError while fetching the state: %s", exc)
except DeviceException as exc:
_LOGGER.warning("Got exception while fetching the state: %s", exc)
| |
from pypika import (
Parameter,
Tables,
VerticaQuery,
functions as fn,
terms,
)
from .base import Database
from .sql_types import (
BigInt,
Boolean,
Char,
Date,
DateTime,
Decimal,
DoublePrecision,
Float,
Integer,
Numeric,
Real,
SmallInt,
Text,
Time,
Timestamp,
VarChar,
)
from .type_engine import TypeEngine
class Trunc(terms.Function):
"""
Wrapper for Vertica TRUNC function for truncating dates.
"""
def __init__(self, field, date_format, alias=None):
super(Trunc, self).__init__('TRUNC', field, date_format, alias=alias)
class VerticaDatabase(Database):
"""
Vertica client that uses the vertica_python driver.
"""
# The pypika query class to use for constructing queries
query_cls = VerticaQuery
DATETIME_INTERVALS = {
"hour": "HH",
"day": "DD",
"week": "IW",
"month": "MM",
"quarter": "Q",
"year": "Y",
}
def __init__(
self,
host="localhost",
port=5433,
database="vertica",
user="vertica",
password=None,
connection_timeout=None,
log_level=None,
**kwargs,
):
super(VerticaDatabase, self).__init__(host, port, database, **kwargs)
self.user = user
self.password = password
self.connection_timeout = connection_timeout
self.type_engine = VerticaTypeEngine()
self.log_level = log_level
def cancel(self, connection):
connection.cancel()
def connect(self):
import vertica_python
connection_options = dict(
host=self.host,
port=self.port,
database=self.database,
user=self.user,
password=self.password,
connection_timeout=self.connection_timeout,
unicode_error="replace",
)
if self.log_level:
connection_options['log_level'] = self.log_level
return vertica_python.connect(**connection_options)
def trunc_date(self, field, interval):
trunc_date_interval = self.DATETIME_INTERVALS.get(str(interval), "DD")
return Trunc(field, trunc_date_interval)
def date_add(self, field, date_part, interval):
return fn.TimestampAdd(str(date_part), interval, field)
def get_column_definitions(self, schema, table, connection=None):
view_columns, table_columns = Tables('view_columns', 'columns')
view_query = (
VerticaQuery.from_(view_columns)
.select(view_columns.column_name, view_columns.data_type)
.where(
(view_columns.table_schema == Parameter(':schema'))
& (view_columns.field('table_name') == Parameter(':table'))
)
.distinct()
)
table_query = (
VerticaQuery.from_(table_columns, immutable=False)
.select(table_columns.column_name, table_columns.data_type)
.where(
(table_columns.table_schema == Parameter(':schema'))
& (table_columns.field("table_name") == Parameter(':table'))
)
.distinct()
)
return self.fetch(
str(view_query + table_query), parameters=dict(schema=schema, table=table), connection=connection
)
def import_csv(self, table, file_path, connection=None):
"""
Imports a file into a database table.
:param table: The name of a table to import data into.
:param file_path: The path of the file to be imported.
:param connection: (Optional) The connection to execute this query with.
"""
import_query = VerticaQuery.from_file(file_path).copy_(table)
self.execute(str(import_query), connection=connection)
def create_temporary_table_from_columns(self, table, columns, connection=None):
"""
Creates a temporary table from a list of columns.
:param table: The name of the new temporary table.
:param columns: The columns of the new temporary table.
:param connection: (Optional) The connection to execute this query with.
"""
create_query = VerticaQuery.create_table(table).temporary().local().preserve_rows().columns(*columns)
self.execute(str(create_query), connection=connection)
def create_temporary_table_from_select(self, table, select_query, connection=None):
"""
Creates a temporary table from a SELECT query.
:param table: The name of the new temporary table.
:param select_query: The query to be used for selecting data of an existing table for the new temporary table.
:param connection: (Optional) The connection to execute this query with.
"""
create_query = VerticaQuery.create_table(table).temporary().local().preserve_rows().as_select(select_query)
self.execute(str(create_query), connection=connection)
class VerticaTypeEngine(TypeEngine):
vertica_to_ansi_mapper = {
"char": Char,
"varchar": VarChar,
"varchar2": VarChar,
"longvarchar": Text,
"boolean": Boolean,
"int": Integer,
"integer": Integer,
"int8": Integer,
"smallint": SmallInt,
"tinyint": SmallInt,
"bigint": BigInt,
"decimal": Decimal,
"numeric": Numeric,
"number": Numeric,
"float": Float,
"float8": Float,
"real": Real,
"double": DoublePrecision,
"date": Date,
"time": Time,
"timetz": Time,
"datetime": DateTime,
"smalldatetime": DateTime,
"timestamp": Timestamp,
"timestamptz": Timestamp,
}
ansi_to_vertica_mapper = {
"CHAR": "char",
"VARCHAR": "varchar",
"TEXT": "longvarchar",
"BOOLEAN": "boolean",
"INTEGER": "integer",
"SMALLINT": "smallint",
"BIGINT": "bigint",
"DECIMAL": "decimal",
"NUMERIC": "numeric",
"FLOAT": "float",
"REAL": "real",
"DOUBLEPRECISION": "double",
"DATE": "date",
"TIME": "time",
"DATETIME": "datetime",
"TIMESTAMP": "timestamp",
}
def __init__(self):
super(VerticaTypeEngine, self).__init__(self.vertica_to_ansi_mapper, self.ansi_to_vertica_mapper)
| |
import pyemto
import numpy as np
import os
latpath = "../../../../" # Path do bmdl, kstr and shape directories
# each system need to have same number of alloy elements
#systems = [['Fe','Al'],['Fe','Cr']]
#systems = [['Fe'],['Al']]
systems = [['Al']]
#concentrations = [[0.5,0.5]]
concentrations = [[1.0]]
magn = "NM" # Possible NM (Non-magnetic), FM (ferromagnetic) and
# DLM (Disordered local moments)
initial_sws = 3.0
# Check that initialsws is correct format
if type(initial_sws) is float:
initial_sws = [initial_sws for x in range(3)]
elif type(initial_sws) is list:
pass
else:
print("ERROR: Initialsws should be float or list of 3 floats")
exit()
if not len(initial_sws) == 3:
print("ERROR: intialsws shoubd be a float or list of 3 floats!")
exit()
# Sanity checks
for s in systems:
if not len(s) == len(systems[0]):
print("Each system need to have same number of alloy elements!")
exit()
for c in concentrations:
if not len(c) == len(systems[0]):
print("Each given concetrations must have same number number as elements in system!")
exit()
# Next check magnetic states of system and initialize splits
splits = []
if magn == "FM":
afm = "F"
for s in systems:
splt = []
for atom in s:
if atom == "Fe":
splt.append(2.0)
else:
splt.append(0.5)
splits.append(splt)
elif magn == "DLM":
afm = "F"
# First duplicate each atoms and concetration
newsystems = []
newconcs = []
for i in range(len(systems)):
news = []
newc = []
splt = []
for j in range(len(systems[i])):
news.append(systems[i][j])
news.append(systems[i][j])
if systems[i][j] == "Fe":
splt.append( 2.0)
splt.append(-2.0)
else:
splt.append( 0.5)
splt.append(-0.5)
splits.append(splt)
newsystems.append(news)
systems = newsystems
for c in concentrations:
newc = []
for conc in c:
newc.append(conc)
newc.append(conc)
newconcs.append(newc)
concentrations = newconcs
elif magn == "NM":
afm = "P"
for s in systems:
splt = []
for atom in s:
splt.append(0.0)
splits.append(splt)
else:
print("Wrong magnetic state is given: " + magn)
print("Should be one of NM, FM or DLM!")
exit()
results = []
#We are ready to make inputs
for si in range(len(systems)):
s = systems[si]
split = splits[si]
# Create main directory
sname = ""
if magn == "DLM":
nlist = [s[i] for i in range(0,len(s),2)]
else:
nlist = s
for atom in nlist:
sname = sname + atom
#
# Make directories
if not os.path.lexists(sname):
os.makedirs(sname)
for c in concentrations:
sc_res = []
# Make subdirectory for concentration
cname = ""
count = 0
if magn == "DLM":
clist = [c[i] for i in range(0,len(c),2)]
else:
clist = c
for conc in clist:
count += 1
cname = cname +str(int(conc*1000)).zfill(4)
if not count == len(clist):
cname = cname+"-"
apath = os.path.join(sname,cname)
if not os.path.lexists(apath):
os.makedirs(apath)
# Make subdirectory for magnetic state
apath = os.path.join(apath,magn)
if not os.path.lexists(apath):
os.makedirs(apath)
# Construct base jobname
jobname = ""
for i in range(len(nlist)):
if jobname == "":
pass
else:
jobname = jobname + "_"
jobname = jobname + nlist[i].lower() + "%4.2f" % (clist[i])
finalname = jobname + "_final"
# BCC first
alloy = pyemto.System(folder=apath)
initialsws = initial_sws[0] # We need some clever way to get this
alloy.bulk(lat='bcc', jobname=jobname+"_bcc",atoms=s,concs=c,
latpath=latpath,sws=initialsws, xc='PBE')
swsrange = np.linspace(initialsws-0.1,initialsws+0.1,7) # A list of 7 different volumes
#alloy.lattice_constants_batch_generate(sws=swsrange)
sws0, B0, e0 = alloy.lattice_constants_analyze(sws=swsrange,prn=False)
sc_res.append([e0,B0,sws0])
alloy.bulk(lat='bcc',
jobname=finalname+"_bcc",
latpath=latpath,
sws=sws0,
atoms = s,
concs = c,
splts = split,
afm = afm,
amix=0.02,
efmix=0.9,
expan='M',
sofc='Y',
xc='PBE',
nky=21)
alloy.write_inputs()
# FCC second
alloy = pyemto.System(folder=apath)
initialsws = initial_sws[1] # We need some clever way to get this
alloy.bulk(lat='fcc', jobname=jobname+"_fcc",atoms=s,concs=c,
latpath=latpath,sws=initialsws, xc='PBE')
swsrange = np.linspace(initialsws-0.1,initialsws+0.1,7) # A list of 7 different volumes
sws0, B0, e0 = alloy.lattice_constants_analyze(sws=swsrange,prn=False)
sc_res.append([e0,B0,sws0])
alloy.bulk(lat='fcc',
jobname=finalname+"_fcc",
latpath=latpath,
sws=sws0,
atoms = s,
concs = c,
splts = split,
afm = afm,
amix=0.02,
efmix=0.9,
expan='M',
sofc='Y',
xc='PBE',
nky=21)
alloy.write_inputs()
# HCP last
alloy = pyemto.System(folder=apath)
initialsws = initial_sws[2] # We need some clever way to get this
alloy.bulk(lat='hcp',jobname=jobname,latpath=latpath,
sws=initialsws, atoms = s,concs = c, xc='PBE')
swsrange = np.linspace(initialsws-0.1,initialsws+0.1,7) # A list of 7 different volumes
#alloy.lattice_constants_batch_generate(sws=swsrange)
sws0, c_over_a0, B0, e0, R0, cs0 = alloy.lattice_constants_analyze(sws=swsrange,prn=False)
alloy.sws = sws0
ca = round(c_over_a0,3)
sc_res.append([e0,B0,sws0,c_over_a0])
# Check is bmdl, kstr and kstr exsist with correct c over a
hcpname ="hcp_"+str(ca) # Structure name
strucpath = "../"
# Check if input files are in place
if os.path.exists(os.path.join(strucpath,hcpname+".bmdl")):
pass
else:
print("Making structures")
# make input files
alloy.lattice.set_values(jobname=hcpname,latpath="",
lat='hcp',kappaw=[0.0,-20.0],msgl=0,ca=ca,
dmax=2.2)
alloy.lattice.bmdl.write_input_file(folder=strucpath)
alloy.lattice.kstr.write_input_file(folder=strucpath)
alloy.lattice.shape.write_input_file(folder=strucpath)
alloy.lattice.batch.write_input_file(folder=strucpath)
# Make kfcd and kgrn input files
alloy.bulk(lat='hcp',
jobname=finalname+"_hcp",
latpath=latpath,
latname=hcpname,
sws=sws0,
ca= ca,
atoms = s,
concs = c,
splts = split,
afm = afm,
amix=0.02,
efmix=0.9,
expan='M',
sofc='Y',
xc='PBE',
nky=21,
nkz=17)
alloy.write_inputs()
results.append([[s,c],sc_res])
print("Results obtained:")
for r in results:
# Generate system name
sname = ""
for i in range(len(r[0][0])):
sname=sname+r[0][0][i]+str(r[0][1][i])
output = "System: "+sname+"\n"
output = output + " Magn: " +magn+"\n"
bcc = r[1][0]
output = output+"# Strc. E sws B (c/a)\n"
output = output+" bcc: %f %f %f\n" %(bcc[0],bcc[1],bcc[2])
fcc = r[1][1]
output = output + " fcc: %f %f %f\n" %(fcc[0],fcc[1],fcc[2])
hcp = r[1][2]
output = output +" hpc: %f %f %f %f\n" %(hcp[0],hcp[1],hcp[2],hcp[3])
print(output)
| |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import inspect
import math
import time
import warnings
import django
from django import template
from django.core.cache import cache
from django.utils.safestring import mark_safe
from django.utils.encoding import smart_text
from django.utils.html import escape
from django.utils.translation import ugettext as _, ungettext
from django.utils import dateformat
from django.utils.timezone import timedelta
from django.utils.timezone import now as tznow
from pybb.compat import is_authenticated, is_anonymous
from pybb.models import TopicReadTracker, ForumReadTracker, PollAnswerUser, Topic, Post
from pybb.permissions import perms
from pybb import defaults, util, compat
register = template.Library()
if django.VERSION >= (1, 9):
register.assignment_tag = register.simple_tag
#noinspection PyUnusedLocal
@register.tag
def pybb_time(parser, token):
try:
tag, context_time = token.split_contents()
except ValueError: # pragma: no cover
raise template.TemplateSyntaxError('pybb_time requires single argument')
else:
return PybbTimeNode(context_time)
@register.assignment_tag(takes_context=True)
def pybb_get_time(context, context_time):
return pybb_user_time(context_time, context['user'])
class PybbTimeNode(template.Node):
def __init__(self, time):
#noinspection PyRedeclaration
self.time = template.Variable(time)
def render(self, context):
context_time = self.time.resolve(context)
return pybb_user_time(context_time, context['user'])
def pybb_user_time(context_time, user):
delta = tznow() - context_time
today = tznow().replace(hour=0, minute=0, second=0)
yesterday = today - timedelta(days=1)
tomorrow = today + timedelta(days=1)
if delta.days == 0:
if delta.seconds < 60:
msg = ungettext('%d second ago', '%d seconds ago', delta.seconds)
return msg % delta.seconds
elif delta.seconds < 3600:
minutes = int(delta.seconds / 60)
msg = ungettext('%d minute ago', '%d minutes ago', minutes)
return msg % minutes
if is_authenticated(user):
if time.daylight: # pragma: no cover
tz1 = time.altzone
else: # pragma: no cover
tz1 = time.timezone
tz = tz1 + util.get_pybb_profile(user).time_zone * 60 * 60
context_time = context_time + timedelta(seconds=tz)
if today < context_time < tomorrow:
return _('today, %s') % context_time.strftime('%H:%M')
elif yesterday < context_time < today:
return _('yesterday, %s') % context_time.strftime('%H:%M')
else:
return dateformat.format(context_time, 'd M, Y H:i')
@register.simple_tag
def pybb_link(object, anchor=''):
"""
Return A tag with link to object.
"""
url = hasattr(object, 'get_absolute_url') and object.get_absolute_url() or None
#noinspection PyRedeclaration
anchor = anchor or smart_text(object)
return mark_safe('<a href="%s">%s</a>' % (url, escape(anchor)))
@register.filter
def pybb_topic_moderated_by(topic, user): # pragma: no cover
"""
Check if user is moderator of topic's forum.
"""
warnings.warn("pybb_topic_moderated_by filter is deprecated and will be removed in later releases. "
"Use pybb_may_moderate_topic(user, topic) filter instead",
DeprecationWarning)
return perms.may_moderate_topic(user, topic)
@register.filter
def pybb_editable_by(post, user): # pragma: no cover
"""
Check if the post could be edited by the user.
"""
warnings.warn("pybb_editable_by filter is deprecated and will be removed in later releases. "
"Use pybb_may_edit_post(user, post) filter instead",
DeprecationWarning)
return perms.may_edit_post(user, post)
@register.filter
def pybb_posted_by(post, user):
"""
Check if the post is writed by the user.
"""
return post.user == user
@register.filter
def pybb_is_topic_unread(topic, user):
if not is_authenticated(user):
return False
last_topic_update = topic.updated or topic.created
unread = not ForumReadTracker.objects.filter(
forum=topic.forum,
user=user.id,
time_stamp__gte=last_topic_update).exists()
unread &= not TopicReadTracker.objects.filter(
topic=topic,
user=user.id,
time_stamp__gte=last_topic_update).exists()
return unread
@register.filter
def pybb_topic_unread(topics, user):
"""
Mark all topics in queryset/list with .unread for target user
"""
topic_list = list(topics)
if is_authenticated(user):
for topic in topic_list:
topic.unread = True
forums_ids = [f.forum_id for f in topic_list]
forum_marks = dict([(m.forum_id, m.time_stamp)
for m
in ForumReadTracker.objects.filter(user=user, forum__in=forums_ids)])
if len(forum_marks):
for topic in topic_list:
topic_updated = topic.updated or topic.created
if topic.forum.id in forum_marks and topic_updated <= forum_marks[topic.forum.id]:
topic.unread = False
qs = TopicReadTracker.objects.filter(user=user, topic__in=topic_list).select_related('topic')
topic_marks = list(qs)
topic_dict = dict(((topic.id, topic) for topic in topic_list))
for mark in topic_marks:
if topic_dict[mark.topic.id].updated <= mark.time_stamp:
topic_dict[mark.topic.id].unread = False
return topic_list
@register.filter
def pybb_forum_unread(forums, user):
"""
Check if forum has unread messages.
"""
forum_list = list(forums)
if is_authenticated(user):
for forum in forum_list:
forum.unread = forum.topic_count > 0
forum_marks = ForumReadTracker.objects.filter(
user=user,
forum__in=forum_list
).select_related('forum')
forum_dict = dict(((forum.id, forum) for forum in forum_list))
for mark in forum_marks:
curr_forum = forum_dict[mark.forum.id]
if (curr_forum.updated is None) or (curr_forum.updated <= mark.time_stamp):
if not any((f.unread for f in pybb_forum_unread(curr_forum.child_forums.all(), user))):
forum_dict[mark.forum.id].unread = False
return forum_list
@register.filter
def pybb_topic_inline_pagination(topic):
page_count = int(math.ceil(topic.post_count / float(defaults.PYBB_TOPIC_PAGE_SIZE)))
if page_count <= 5:
return range(1, page_count+1)
return list(range(1, 5)) + ['...', page_count]
@register.filter
def pybb_topic_poll_not_voted(topic, user):
if is_anonymous(user):
return True
return not PollAnswerUser.objects.filter(poll_answer__topic=topic, user=user).exists()
@register.filter
def endswith(str, substr):
return str.endswith(substr)
@register.assignment_tag
def pybb_get_profile(*args, **kwargs):
try:
return util.get_pybb_profile(kwargs.get('user') or args[0])
except:
return None
@register.assignment_tag(takes_context=True)
def pybb_get_latest_topics(context, cnt=5, user=None):
qs = Topic.objects.all().order_by('-updated', '-created', '-id')
if not user:
user = context['user']
qs = perms.filter_topics(user, qs)
return qs[:cnt]
@register.assignment_tag(takes_context=True)
def pybb_get_latest_posts(context, cnt=5, user=None):
qs = Post.objects.all().order_by('-created', '-id')
if not user:
user = context['user']
qs = perms.filter_posts(user, qs)
return qs[:cnt]
def load_perms_filters():
def partial(func_name, perms_obj):
def newfunc(user, obj_or_qs):
return getattr(perms_obj, func_name)(user, obj_or_qs)
return newfunc
def partial_no_param(func_name, perms_obj):
def newfunc(user):
return getattr(perms_obj, func_name)(user)
return newfunc
for method_name, method in inspect.getmembers(perms):
if not inspect.ismethod(method):
continue # pragma: no cover - only methods are used to dynamically build templatetags
if not method_name.startswith('may') and not method_name.startswith('filter'):
continue # pragma: no cover - only (may|filter)* methods are used to dynamically build templatetags
method_args = inspect.getargspec(method).args
args_count = len(method_args)
if args_count not in (2, 3):
continue # pragma: no cover - only methods with 2 or 3 params
if method_args[0] != 'self' or method_args[1] != 'user':
continue # pragma: no cover - only methods with self and user as first args
if len(inspect.getargspec(method).args) == 3:
register.filter('%s%s' % ('pybb_', method_name), partial(method_name, perms))
elif len(inspect.getargspec(method).args) == 2:
register.filter('%s%s' % ('pybb_', method_name), partial_no_param(method_name, perms))
load_perms_filters()
@register.filter
def check_app_installed(app_name):
return compat.is_installed(app_name)
@register.filter
def pybbm_calc_topic_views(topic):
cache_key = util.build_cache_key('anonymous_topic_views', topic_id=topic.id)
return topic.views + cache.get(cache_key, 0)
| |
# Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
# Copyright (c) 2009 The Hewlett-Packard Development Company
# Copyright (c) 2013 Advanced Micro Devices, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from m5.util import orderdict
from slicc.symbols.Symbol import Symbol
from slicc.symbols.Var import Var
import slicc.generate.html as html
import re
python_class_map = {
"int": "Int",
"NodeID": "Int",
"uint32_t" : "UInt32",
"std::string": "String",
"bool": "Bool",
"CacheMemory": "RubyCache",
"WireBuffer": "RubyWireBuffer",
"Sequencer": "RubySequencer",
"GPUCoalescer" : "RubyGPUCoalescer",
"VIPERCoalescer" : "VIPERCoalescer",
"DirectoryMemory": "RubyDirectoryMemory",
"PerfectCacheMemory": "RubyPerfectCacheMemory",
"MemoryControl": "MemoryControl",
"MessageBuffer": "MessageBuffer",
"DMASequencer": "DMASequencer",
"Prefetcher":"Prefetcher",
"Cycles":"Cycles",
}
class StateMachine(Symbol):
def __init__(self, symtab, ident, location, pairs, config_parameters):
super(StateMachine, self).__init__(symtab, ident, location, pairs)
self.table = None
# Data members in the State Machine that have been declared before
# the opening brace '{' of the machine. Note that these along with
# the members in self.objects form the entire set of data members.
self.config_parameters = config_parameters
self.prefetchers = []
for param in config_parameters:
if param.pointer:
var = Var(symtab, param.ident, location, param.type_ast.type,
"(*m_%s_ptr)" % param.ident, {}, self)
else:
var = Var(symtab, param.ident, location, param.type_ast.type,
"m_%s" % param.ident, {}, self)
self.symtab.registerSym(param.ident, var)
if str(param.type_ast.type) == "Prefetcher":
self.prefetchers.append(var)
self.states = orderdict()
self.events = orderdict()
self.actions = orderdict()
self.request_types = orderdict()
self.transitions = []
self.in_ports = []
self.functions = []
# Data members in the State Machine that have been declared inside
# the {} machine. Note that these along with the config params
# form the entire set of data members of the machine.
self.objects = []
self.TBEType = None
self.EntryType = None
self.debug_flags = set()
self.debug_flags.add('RubyGenerated')
self.debug_flags.add('RubySlicc')
def __repr__(self):
return "[StateMachine: %s]" % self.ident
def addState(self, state):
assert self.table is None
self.states[state.ident] = state
def addEvent(self, event):
assert self.table is None
self.events[event.ident] = event
def addAction(self, action):
assert self.table is None
# Check for duplicate action
for other in self.actions.itervalues():
if action.ident == other.ident:
action.warning("Duplicate action definition: %s" % action.ident)
action.error("Duplicate action definition: %s" % action.ident)
if action.short == other.short:
other.warning("Duplicate action shorthand: %s" % other.ident)
other.warning(" shorthand = %s" % other.short)
action.warning("Duplicate action shorthand: %s" % action.ident)
action.error(" shorthand = %s" % action.short)
self.actions[action.ident] = action
def addDebugFlag(self, flag):
self.debug_flags.add(flag)
def addRequestType(self, request_type):
assert self.table is None
self.request_types[request_type.ident] = request_type
def addTransition(self, trans):
assert self.table is None
self.transitions.append(trans)
def addInPort(self, var):
self.in_ports.append(var)
def addFunc(self, func):
# register func in the symbol table
self.symtab.registerSym(str(func), func)
self.functions.append(func)
def addObject(self, obj):
self.symtab.registerSym(str(obj), obj)
self.objects.append(obj)
def addType(self, type):
type_ident = '%s' % type.c_ident
if type_ident == "%s_TBE" %self.ident:
if self.TBEType != None:
self.error("Multiple Transaction Buffer types in a " \
"single machine.");
self.TBEType = type
elif "interface" in type and "AbstractCacheEntry" == type["interface"]:
if "main" in type and "false" == type["main"].lower():
pass # this isn't the EntryType
else:
if self.EntryType != None:
self.error("Multiple AbstractCacheEntry types in a " \
"single machine.");
self.EntryType = type
# Needs to be called before accessing the table
def buildTable(self):
assert self.table is None
table = {}
for trans in self.transitions:
# Track which actions we touch so we know if we use them
# all -- really this should be done for all symbols as
# part of the symbol table, then only trigger it for
# Actions, States, Events, etc.
for action in trans.actions:
action.used = True
index = (trans.state, trans.event)
if index in table:
table[index].warning("Duplicate transition: %s" % table[index])
trans.error("Duplicate transition: %s" % trans)
table[index] = trans
# Look at all actions to make sure we used them all
for action in self.actions.itervalues():
if not action.used:
error_msg = "Unused action: %s" % action.ident
if "desc" in action:
error_msg += ", " + action.desc
action.warning(error_msg)
self.table = table
# determine the port->msg buffer mappings
def getBufferMaps(self, ident):
msg_bufs = []
port_to_buf_map = {}
in_msg_bufs = {}
for port in self.in_ports:
buf_name = "m_%s_ptr" % port.pairs["buffer_expr"].name
msg_bufs.append(buf_name)
port_to_buf_map[port] = msg_bufs.index(buf_name)
if buf_name not in in_msg_bufs:
in_msg_bufs[buf_name] = [port]
else:
in_msg_bufs[buf_name].append(port)
return port_to_buf_map, in_msg_bufs, msg_bufs
def writeCodeFiles(self, path, includes):
self.printControllerPython(path)
self.printControllerHH(path)
self.printControllerCC(path, includes)
self.printCSwitch(path)
self.printCWakeup(path, includes)
def printControllerPython(self, path):
code = self.symtab.codeFormatter()
ident = self.ident
py_ident = "%s_Controller" % ident
c_ident = "%s_Controller" % self.ident
code('''
from m5.params import *
from m5.SimObject import SimObject
from Controller import RubyController
class $py_ident(RubyController):
type = '$py_ident'
cxx_header = 'mem/protocol/${c_ident}.hh'
''')
code.indent()
for param in self.config_parameters:
dflt_str = ''
if param.rvalue is not None:
dflt_str = str(param.rvalue.inline()) + ', '
if python_class_map.has_key(param.type_ast.type.c_ident):
python_type = python_class_map[param.type_ast.type.c_ident]
code('${{param.ident}} = Param.${{python_type}}(${dflt_str}"")')
else:
self.error("Unknown c++ to python class conversion for c++ " \
"type: '%s'. Please update the python_class_map " \
"in StateMachine.py", param.type_ast.type.c_ident)
code.dedent()
code.write(path, '%s.py' % py_ident)
def printControllerHH(self, path):
'''Output the method declarations for the class declaration'''
code = self.symtab.codeFormatter()
ident = self.ident
c_ident = "%s_Controller" % self.ident
code('''
/** \\file $c_ident.hh
*
* Auto generated C++ code started by $__file__:$__line__
* Created by slicc definition of Module "${{self.short}}"
*/
#ifndef __${ident}_CONTROLLER_HH__
#define __${ident}_CONTROLLER_HH__
#include <iostream>
#include <sstream>
#include <string>
#include "mem/protocol/TransitionResult.hh"
#include "mem/protocol/Types.hh"
#include "mem/ruby/common/Consumer.hh"
#include "mem/ruby/slicc_interface/AbstractController.hh"
#include "params/$c_ident.hh"
''')
seen_types = set()
for var in self.objects:
if var.type.ident not in seen_types and not var.type.isPrimitive:
code('#include "mem/protocol/${{var.type.c_ident}}.hh"')
seen_types.add(var.type.ident)
# for adding information to the protocol debug trace
code('''
extern std::stringstream ${ident}_transitionComment;
class $c_ident : public AbstractController
{
public:
typedef ${c_ident}Params Params;
$c_ident(const Params *p);
static int getNumControllers();
void init();
MessageBuffer *getMandatoryQueue() const;
MessageBuffer *getMemoryQueue() const;
void initNetQueues();
void print(std::ostream& out) const;
void wakeup();
void resetStats();
void regStats();
void collateStats();
void recordCacheTrace(int cntrl, CacheRecorder* tr);
Sequencer* getCPUSequencer() const;
GPUCoalescer* getGPUCoalescer() const;
int functionalWriteBuffers(PacketPtr&);
void countTransition(${ident}_State state, ${ident}_Event event);
void possibleTransition(${ident}_State state, ${ident}_Event event);
uint64_t getEventCount(${ident}_Event event);
bool isPossible(${ident}_State state, ${ident}_Event event);
uint64_t getTransitionCount(${ident}_State state, ${ident}_Event event);
private:
''')
code.indent()
# added by SS
for param in self.config_parameters:
if param.pointer:
code('${{param.type_ast.type}}* m_${{param.ident}}_ptr;')
else:
code('${{param.type_ast.type}} m_${{param.ident}};')
code('''
TransitionResult doTransition(${ident}_Event event,
''')
if self.EntryType != None:
code('''
${{self.EntryType.c_ident}}* m_cache_entry_ptr,
''')
if self.TBEType != None:
code('''
${{self.TBEType.c_ident}}* m_tbe_ptr,
''')
code('''
Addr addr);
TransitionResult doTransitionWorker(${ident}_Event event,
${ident}_State state,
${ident}_State& next_state,
''')
if self.TBEType != None:
code('''
${{self.TBEType.c_ident}}*& m_tbe_ptr,
''')
if self.EntryType != None:
code('''
${{self.EntryType.c_ident}}*& m_cache_entry_ptr,
''')
code('''
Addr addr);
int m_counters[${ident}_State_NUM][${ident}_Event_NUM];
int m_event_counters[${ident}_Event_NUM];
bool m_possible[${ident}_State_NUM][${ident}_Event_NUM];
static std::vector<Stats::Vector *> eventVec;
static std::vector<std::vector<Stats::Vector *> > transVec;
static int m_num_controllers;
// Internal functions
''')
for func in self.functions:
proto = func.prototype
if proto:
code('$proto')
if self.EntryType != None:
code('''
// Set and Reset for cache_entry variable
void set_cache_entry(${{self.EntryType.c_ident}}*& m_cache_entry_ptr, AbstractCacheEntry* m_new_cache_entry);
void unset_cache_entry(${{self.EntryType.c_ident}}*& m_cache_entry_ptr);
''')
if self.TBEType != None:
code('''
// Set and Reset for tbe variable
void set_tbe(${{self.TBEType.c_ident}}*& m_tbe_ptr, ${ident}_TBE* m_new_tbe);
void unset_tbe(${{self.TBEType.c_ident}}*& m_tbe_ptr);
''')
# Prototype the actions that the controller can take
code('''
// Actions
''')
if self.TBEType != None and self.EntryType != None:
for action in self.actions.itervalues():
code('/** \\brief ${{action.desc}} */')
code('void ${{action.ident}}(${{self.TBEType.c_ident}}*& '
'm_tbe_ptr, ${{self.EntryType.c_ident}}*& '
'm_cache_entry_ptr, Addr addr);')
elif self.TBEType != None:
for action in self.actions.itervalues():
code('/** \\brief ${{action.desc}} */')
code('void ${{action.ident}}(${{self.TBEType.c_ident}}*& '
'm_tbe_ptr, Addr addr);')
elif self.EntryType != None:
for action in self.actions.itervalues():
code('/** \\brief ${{action.desc}} */')
code('void ${{action.ident}}(${{self.EntryType.c_ident}}*& '
'm_cache_entry_ptr, Addr addr);')
else:
for action in self.actions.itervalues():
code('/** \\brief ${{action.desc}} */')
code('void ${{action.ident}}(Addr addr);')
# the controller internal variables
code('''
// Objects
''')
for var in self.objects:
th = var.get("template", "")
code('${{var.type.c_ident}}$th* m_${{var.ident}}_ptr;')
code.dedent()
code('};')
code('#endif // __${ident}_CONTROLLER_H__')
code.write(path, '%s.hh' % c_ident)
def printControllerCC(self, path, includes):
'''Output the actions for performing the actions'''
code = self.symtab.codeFormatter()
ident = self.ident
c_ident = "%s_Controller" % self.ident
code('''
/** \\file $c_ident.cc
*
* Auto generated C++ code started by $__file__:$__line__
* Created by slicc definition of Module "${{self.short}}"
*/
#include <sys/types.h>
#include <unistd.h>
#include <cassert>
#include <sstream>
#include <string>
#include <typeinfo>
#include "base/compiler.hh"
#include "mem/ruby/common/BoolVec.hh"
#include "base/cprintf.hh"
''')
for f in self.debug_flags:
code('#include "debug/${{f}}.hh"')
code('''
#include "mem/protocol/${ident}_Controller.hh"
#include "mem/protocol/${ident}_Event.hh"
#include "mem/protocol/${ident}_State.hh"
#include "mem/protocol/Types.hh"
#include "mem/ruby/network/Network.hh"
#include "mem/ruby/system/RubySystem.hh"
''')
for include_path in includes:
code('#include "${{include_path}}"')
code('''
using namespace std;
''')
# include object classes
seen_types = set()
for var in self.objects:
if var.type.ident not in seen_types and not var.type.isPrimitive:
code('#include "mem/protocol/${{var.type.c_ident}}.hh"')
seen_types.add(var.type.ident)
num_in_ports = len(self.in_ports)
code('''
$c_ident *
${c_ident}Params::create()
{
return new $c_ident(this);
}
int $c_ident::m_num_controllers = 0;
std::vector<Stats::Vector *> $c_ident::eventVec;
std::vector<std::vector<Stats::Vector *> > $c_ident::transVec;
// for adding information to the protocol debug trace
stringstream ${ident}_transitionComment;
#ifndef NDEBUG
#define APPEND_TRANSITION_COMMENT(str) (${ident}_transitionComment << str)
#else
#define APPEND_TRANSITION_COMMENT(str) do {} while (0)
#endif
/** \\brief constructor */
$c_ident::$c_ident(const Params *p)
: AbstractController(p)
{
m_machineID.type = MachineType_${ident};
m_machineID.num = m_version;
m_num_controllers++;
m_in_ports = $num_in_ports;
''')
code.indent()
#
# After initializing the universal machine parameters, initialize the
# this machines config parameters. Also if these configuration params
# include a sequencer, connect the it to the controller.
#
for param in self.config_parameters:
if param.pointer:
code('m_${{param.ident}}_ptr = p->${{param.ident}};')
else:
code('m_${{param.ident}} = p->${{param.ident}};')
if re.compile("sequencer").search(param.ident) or \
param.type_ast.type.c_ident == "GPUCoalescer" or \
param.type_ast.type.c_ident == "VIPERCoalescer":
code('''
if (m_${{param.ident}}_ptr != NULL) {
m_${{param.ident}}_ptr->setController(this);
}
''')
code('''
for (int state = 0; state < ${ident}_State_NUM; state++) {
for (int event = 0; event < ${ident}_Event_NUM; event++) {
m_possible[state][event] = false;
m_counters[state][event] = 0;
}
}
for (int event = 0; event < ${ident}_Event_NUM; event++) {
m_event_counters[event] = 0;
}
''')
code.dedent()
code('''
}
void
$c_ident::initNetQueues()
{
MachineType machine_type = string_to_MachineType("${{self.ident}}");
int base M5_VAR_USED = MachineType_base_number(machine_type);
''')
code.indent()
# set for maintaining the vnet, direction pairs already seen for this
# machine. This map helps in implementing the check for avoiding
# multiple message buffers being mapped to the same vnet.
vnet_dir_set = set()
for var in self.config_parameters:
vid = "m_%s_ptr" % var.ident
if "network" in var:
vtype = var.type_ast.type
code('assert($vid != NULL);')
# Network port object
network = var["network"]
if "virtual_network" in var:
vnet = var["virtual_network"]
vnet_type = var["vnet_type"]
assert (vnet, network) not in vnet_dir_set
vnet_dir_set.add((vnet,network))
code('''
m_net_ptr->set${network}NetQueue(m_version + base, $vid->getOrdered(), $vnet,
"$vnet_type", $vid);
''')
# Set Priority
if "rank" in var:
code('$vid->setPriority(${{var["rank"]}})')
code.dedent()
code('''
}
void
$c_ident::init()
{
// initialize objects
''')
code.indent()
for var in self.objects:
vtype = var.type
vid = "m_%s_ptr" % var.ident
if "network" not in var:
# Not a network port object
if "primitive" in vtype:
code('$vid = new ${{vtype.c_ident}};')
if "default" in var:
code('(*$vid) = ${{var["default"]}};')
else:
# Normal Object
th = var.get("template", "")
expr = "%s = new %s%s" % (vid, vtype.c_ident, th)
args = ""
if "non_obj" not in vtype and not vtype.isEnumeration:
args = var.get("constructor", "")
code('$expr($args);')
code('assert($vid != NULL);')
if "default" in var:
code('*$vid = ${{var["default"]}}; // Object default')
elif "default" in vtype:
comment = "Type %s default" % vtype.ident
code('*$vid = ${{vtype["default"]}}; // $comment')
# Set the prefetchers
code()
for prefetcher in self.prefetchers:
code('${{prefetcher.code}}.setController(this);')
code()
for port in self.in_ports:
# Set the queue consumers
code('${{port.code}}.setConsumer(this);')
# Initialize the transition profiling
code()
for trans in self.transitions:
# Figure out if we stall
stall = False
for action in trans.actions:
if action.ident == "z_stall":
stall = True
# Only possible if it is not a 'z' case
if not stall:
state = "%s_State_%s" % (self.ident, trans.state.ident)
event = "%s_Event_%s" % (self.ident, trans.event.ident)
code('possibleTransition($state, $event);')
code.dedent()
code('''
AbstractController::init();
resetStats();
}
''')
mq_ident = "NULL"
for port in self.in_ports:
if port.code.find("mandatoryQueue_ptr") >= 0:
mq_ident = "m_mandatoryQueue_ptr"
memq_ident = "NULL"
for port in self.in_ports:
if port.code.find("responseFromMemory_ptr") >= 0:
memq_ident = "m_responseFromMemory_ptr"
seq_ident = "NULL"
for param in self.config_parameters:
if param.ident == "sequencer":
assert(param.pointer)
seq_ident = "m_%s_ptr" % param.ident
coal_ident = "NULL"
for param in self.config_parameters:
if param.ident == "coalescer":
assert(param.pointer)
coal_ident = "m_%s_ptr" % param.ident
if seq_ident != "NULL":
code('''
Sequencer*
$c_ident::getCPUSequencer() const
{
if (NULL != $seq_ident && $seq_ident->isCPUSequencer()) {
return $seq_ident;
} else {
return NULL;
}
}
''')
else:
code('''
Sequencer*
$c_ident::getCPUSequencer() const
{
return NULL;
}
''')
if coal_ident != "NULL":
code('''
GPUCoalescer*
$c_ident::getGPUCoalescer() const
{
if (NULL != $coal_ident && !$coal_ident->isCPUSequencer()) {
return $coal_ident;
} else {
return NULL;
}
}
''')
else:
code('''
GPUCoalescer*
$c_ident::getGPUCoalescer() const
{
return NULL;
}
''')
code('''
void
$c_ident::regStats()
{
AbstractController::regStats();
if (m_version == 0) {
for (${ident}_Event event = ${ident}_Event_FIRST;
event < ${ident}_Event_NUM; ++event) {
Stats::Vector *t = new Stats::Vector();
t->init(m_num_controllers);
t->name(params()->ruby_system->name() + ".${c_ident}." +
${ident}_Event_to_string(event));
t->flags(Stats::pdf | Stats::total | Stats::oneline |
Stats::nozero);
eventVec.push_back(t);
}
for (${ident}_State state = ${ident}_State_FIRST;
state < ${ident}_State_NUM; ++state) {
transVec.push_back(std::vector<Stats::Vector *>());
for (${ident}_Event event = ${ident}_Event_FIRST;
event < ${ident}_Event_NUM; ++event) {
Stats::Vector *t = new Stats::Vector();
t->init(m_num_controllers);
t->name(params()->ruby_system->name() + ".${c_ident}." +
${ident}_State_to_string(state) +
"." + ${ident}_Event_to_string(event));
t->flags(Stats::pdf | Stats::total | Stats::oneline |
Stats::nozero);
transVec[state].push_back(t);
}
}
}
}
void
$c_ident::collateStats()
{
for (${ident}_Event event = ${ident}_Event_FIRST;
event < ${ident}_Event_NUM; ++event) {
for (unsigned int i = 0; i < m_num_controllers; ++i) {
RubySystem *rs = params()->ruby_system;
std::map<uint32_t, AbstractController *>::iterator it =
rs->m_abstract_controls[MachineType_${ident}].find(i);
assert(it != rs->m_abstract_controls[MachineType_${ident}].end());
(*eventVec[event])[i] =
(($c_ident *)(*it).second)->getEventCount(event);
}
}
for (${ident}_State state = ${ident}_State_FIRST;
state < ${ident}_State_NUM; ++state) {
for (${ident}_Event event = ${ident}_Event_FIRST;
event < ${ident}_Event_NUM; ++event) {
for (unsigned int i = 0; i < m_num_controllers; ++i) {
RubySystem *rs = params()->ruby_system;
std::map<uint32_t, AbstractController *>::iterator it =
rs->m_abstract_controls[MachineType_${ident}].find(i);
assert(it != rs->m_abstract_controls[MachineType_${ident}].end());
(*transVec[state][event])[i] =
(($c_ident *)(*it).second)->getTransitionCount(state, event);
}
}
}
}
void
$c_ident::countTransition(${ident}_State state, ${ident}_Event event)
{
assert(m_possible[state][event]);
m_counters[state][event]++;
m_event_counters[event]++;
}
void
$c_ident::possibleTransition(${ident}_State state,
${ident}_Event event)
{
m_possible[state][event] = true;
}
uint64_t
$c_ident::getEventCount(${ident}_Event event)
{
return m_event_counters[event];
}
bool
$c_ident::isPossible(${ident}_State state, ${ident}_Event event)
{
return m_possible[state][event];
}
uint64_t
$c_ident::getTransitionCount(${ident}_State state,
${ident}_Event event)
{
return m_counters[state][event];
}
int
$c_ident::getNumControllers()
{
return m_num_controllers;
}
MessageBuffer*
$c_ident::getMandatoryQueue() const
{
return $mq_ident;
}
MessageBuffer*
$c_ident::getMemoryQueue() const
{
return $memq_ident;
}
void
$c_ident::print(ostream& out) const
{
out << "[$c_ident " << m_version << "]";
}
void $c_ident::resetStats()
{
for (int state = 0; state < ${ident}_State_NUM; state++) {
for (int event = 0; event < ${ident}_Event_NUM; event++) {
m_counters[state][event] = 0;
}
}
for (int event = 0; event < ${ident}_Event_NUM; event++) {
m_event_counters[event] = 0;
}
AbstractController::resetStats();
}
''')
if self.EntryType != None:
code('''
// Set and Reset for cache_entry variable
void
$c_ident::set_cache_entry(${{self.EntryType.c_ident}}*& m_cache_entry_ptr, AbstractCacheEntry* m_new_cache_entry)
{
m_cache_entry_ptr = (${{self.EntryType.c_ident}}*)m_new_cache_entry;
}
void
$c_ident::unset_cache_entry(${{self.EntryType.c_ident}}*& m_cache_entry_ptr)
{
m_cache_entry_ptr = 0;
}
''')
if self.TBEType != None:
code('''
// Set and Reset for tbe variable
void
$c_ident::set_tbe(${{self.TBEType.c_ident}}*& m_tbe_ptr, ${{self.TBEType.c_ident}}* m_new_tbe)
{
m_tbe_ptr = m_new_tbe;
}
void
$c_ident::unset_tbe(${{self.TBEType.c_ident}}*& m_tbe_ptr)
{
m_tbe_ptr = NULL;
}
''')
code('''
void
$c_ident::recordCacheTrace(int cntrl, CacheRecorder* tr)
{
''')
#
# Record cache contents for all associated caches.
#
code.indent()
for param in self.config_parameters:
if param.type_ast.type.ident == "CacheMemory":
assert(param.pointer)
code('m_${{param.ident}}_ptr->recordCacheContents(cntrl, tr);')
code.dedent()
code('''
}
// Actions
''')
if self.TBEType != None and self.EntryType != None:
for action in self.actions.itervalues():
if "c_code" not in action:
continue
code('''
/** \\brief ${{action.desc}} */
void
$c_ident::${{action.ident}}(${{self.TBEType.c_ident}}*& m_tbe_ptr, ${{self.EntryType.c_ident}}*& m_cache_entry_ptr, Addr addr)
{
DPRINTF(RubyGenerated, "executing ${{action.ident}}\\n");
try {
${{action["c_code"]}}
} catch (const RejectException & e) {
fatal("Error in action ${{ident}}:${{action.ident}}: "
"executed a peek statement with the wrong message "
"type specified. ");
}
}
''')
elif self.TBEType != None:
for action in self.actions.itervalues():
if "c_code" not in action:
continue
code('''
/** \\brief ${{action.desc}} */
void
$c_ident::${{action.ident}}(${{self.TBEType.c_ident}}*& m_tbe_ptr, Addr addr)
{
DPRINTF(RubyGenerated, "executing ${{action.ident}}\\n");
${{action["c_code"]}}
}
''')
elif self.EntryType != None:
for action in self.actions.itervalues():
if "c_code" not in action:
continue
code('''
/** \\brief ${{action.desc}} */
void
$c_ident::${{action.ident}}(${{self.EntryType.c_ident}}*& m_cache_entry_ptr, Addr addr)
{
DPRINTF(RubyGenerated, "executing ${{action.ident}}\\n");
${{action["c_code"]}}
}
''')
else:
for action in self.actions.itervalues():
if "c_code" not in action:
continue
code('''
/** \\brief ${{action.desc}} */
void
$c_ident::${{action.ident}}(Addr addr)
{
DPRINTF(RubyGenerated, "executing ${{action.ident}}\\n");
${{action["c_code"]}}
}
''')
for func in self.functions:
code(func.generateCode())
# Function for functional writes to messages buffered in the controller
code('''
int
$c_ident::functionalWriteBuffers(PacketPtr& pkt)
{
int num_functional_writes = 0;
''')
for var in self.objects:
vtype = var.type
if vtype.isBuffer:
vid = "m_%s_ptr" % var.ident
code('num_functional_writes += $vid->functionalWrite(pkt);')
for var in self.config_parameters:
vtype = var.type_ast.type
if vtype.isBuffer:
vid = "m_%s_ptr" % var.ident
code('num_functional_writes += $vid->functionalWrite(pkt);')
code('''
return num_functional_writes;
}
''')
code.write(path, "%s.cc" % c_ident)
def printCWakeup(self, path, includes):
'''Output the wakeup loop for the events'''
code = self.symtab.codeFormatter()
ident = self.ident
outputRequest_types = True
if len(self.request_types) == 0:
outputRequest_types = False
code('''
// Auto generated C++ code started by $__file__:$__line__
// ${ident}: ${{self.short}}
#include <sys/types.h>
#include <unistd.h>
#include <cassert>
#include <typeinfo>
#include "base/misc.hh"
''')
for f in self.debug_flags:
code('#include "debug/${{f}}.hh"')
code('''
#include "mem/protocol/${ident}_Controller.hh"
#include "mem/protocol/${ident}_Event.hh"
#include "mem/protocol/${ident}_State.hh"
''')
if outputRequest_types:
code('''#include "mem/protocol/${ident}_RequestType.hh"''')
code('''
#include "mem/protocol/Types.hh"
#include "mem/ruby/system/RubySystem.hh"
''')
for include_path in includes:
code('#include "${{include_path}}"')
port_to_buf_map, in_msg_bufs, msg_bufs = self.getBufferMaps(ident)
code('''
using namespace std;
void
${ident}_Controller::wakeup()
{
int counter = 0;
while (true) {
unsigned char rejected[${{len(msg_bufs)}}];
memset(rejected, 0, sizeof(unsigned char)*${{len(msg_bufs)}});
// Some cases will put us into an infinite loop without this limit
assert(counter <= m_transitions_per_cycle);
if (counter == m_transitions_per_cycle) {
// Count how often we are fully utilized
m_fully_busy_cycles++;
// Wakeup in another cycle and try again
scheduleEvent(Cycles(1));
break;
}
''')
code.indent()
code.indent()
# InPorts
#
for port in self.in_ports:
code.indent()
code('// ${ident}InPort $port')
if port.pairs.has_key("rank"):
code('m_cur_in_port = ${{port.pairs["rank"]}};')
else:
code('m_cur_in_port = 0;')
if port in port_to_buf_map:
code('try {')
code.indent()
code('${{port["c_code_in_port"]}}')
if port in port_to_buf_map:
code.dedent()
code('''
} catch (const RejectException & e) {
rejected[${{port_to_buf_map[port]}}]++;
}
''')
code.dedent()
code('')
code.dedent()
code.dedent()
code('''
// If we got this far, we have nothing left todo or something went
// wrong''')
for buf_name, ports in in_msg_bufs.items():
if len(ports) > 1:
# only produce checks when a buffer is shared by multiple ports
code('''
if (${{buf_name}}->isReady(clockEdge()) && rejected[${{port_to_buf_map[ports[0]]}}] == ${{len(ports)}})
{
// no port claimed the message on the top of this buffer
panic("Runtime Error at Ruby Time: %d. "
"All ports rejected a message. "
"You are probably sending a message type to this controller "
"over a virtual network that do not define an in_port for "
"the incoming message type.\\n",
Cycles(1));
}
''')
code('''
break;
}
}
''')
code.write(path, "%s_Wakeup.cc" % self.ident)
def printCSwitch(self, path):
'''Output switch statement for transition table'''
code = self.symtab.codeFormatter()
ident = self.ident
code('''
// Auto generated C++ code started by $__file__:$__line__
// ${ident}: ${{self.short}}
#include <cassert>
#include "base/misc.hh"
#include "base/trace.hh"
#include "debug/ProtocolTrace.hh"
#include "debug/RubyGenerated.hh"
#include "mem/protocol/${ident}_Controller.hh"
#include "mem/protocol/${ident}_Event.hh"
#include "mem/protocol/${ident}_State.hh"
#include "mem/protocol/Types.hh"
#include "mem/ruby/system/RubySystem.hh"
#define HASH_FUN(state, event) ((int(state)*${ident}_Event_NUM)+int(event))
#define GET_TRANSITION_COMMENT() (${ident}_transitionComment.str())
#define CLEAR_TRANSITION_COMMENT() (${ident}_transitionComment.str(""))
TransitionResult
${ident}_Controller::doTransition(${ident}_Event event,
''')
if self.EntryType != None:
code('''
${{self.EntryType.c_ident}}* m_cache_entry_ptr,
''')
if self.TBEType != None:
code('''
${{self.TBEType.c_ident}}* m_tbe_ptr,
''')
code('''
Addr addr)
{
''')
code.indent()
if self.TBEType != None and self.EntryType != None:
code('${ident}_State state = getState(m_tbe_ptr, m_cache_entry_ptr, addr);')
elif self.TBEType != None:
code('${ident}_State state = getState(m_tbe_ptr, addr);')
elif self.EntryType != None:
code('${ident}_State state = getState(m_cache_entry_ptr, addr);')
else:
code('${ident}_State state = getState(addr);')
code('''
${ident}_State next_state = state;
DPRINTF(RubyGenerated, "%s, Time: %lld, state: %s, event: %s, addr: %#x\\n",
*this, curCycle(), ${ident}_State_to_string(state),
${ident}_Event_to_string(event), addr);
TransitionResult result =
''')
if self.TBEType != None and self.EntryType != None:
code('doTransitionWorker(event, state, next_state, m_tbe_ptr, m_cache_entry_ptr, addr);')
elif self.TBEType != None:
code('doTransitionWorker(event, state, next_state, m_tbe_ptr, addr);')
elif self.EntryType != None:
code('doTransitionWorker(event, state, next_state, m_cache_entry_ptr, addr);')
else:
code('doTransitionWorker(event, state, next_state, addr);')
port_to_buf_map, in_msg_bufs, msg_bufs = self.getBufferMaps(ident)
code('''
if (result == TransitionResult_Valid) {
DPRINTF(RubyGenerated, "next_state: %s\\n",
${ident}_State_to_string(next_state));
countTransition(state, event);
DPRINTFR(ProtocolTrace, "%15d %3s %10s%20s %6s>%-6s %#x %s\\n",
curTick(), m_version, "${ident}",
${ident}_Event_to_string(event),
${ident}_State_to_string(state),
${ident}_State_to_string(next_state),
printAddress(addr), GET_TRANSITION_COMMENT());
CLEAR_TRANSITION_COMMENT();
''')
if self.TBEType != None and self.EntryType != None:
code('setState(m_tbe_ptr, m_cache_entry_ptr, addr, next_state);')
code('setAccessPermission(m_cache_entry_ptr, addr, next_state);')
elif self.TBEType != None:
code('setState(m_tbe_ptr, addr, next_state);')
code('setAccessPermission(addr, next_state);')
elif self.EntryType != None:
code('setState(m_cache_entry_ptr, addr, next_state);')
code('setAccessPermission(m_cache_entry_ptr, addr, next_state);')
else:
code('setState(addr, next_state);')
code('setAccessPermission(addr, next_state);')
code('''
} else if (result == TransitionResult_ResourceStall) {
DPRINTFR(ProtocolTrace, "%15s %3s %10s%20s %6s>%-6s %#x %s\\n",
curTick(), m_version, "${ident}",
${ident}_Event_to_string(event),
${ident}_State_to_string(state),
${ident}_State_to_string(next_state),
printAddress(addr), "Resource Stall");
} else if (result == TransitionResult_ProtocolStall) {
DPRINTF(RubyGenerated, "stalling\\n");
DPRINTFR(ProtocolTrace, "%15s %3s %10s%20s %6s>%-6s %#x %s\\n",
curTick(), m_version, "${ident}",
${ident}_Event_to_string(event),
${ident}_State_to_string(state),
${ident}_State_to_string(next_state),
printAddress(addr), "Protocol Stall");
}
return result;
''')
code.dedent()
code('''
}
TransitionResult
${ident}_Controller::doTransitionWorker(${ident}_Event event,
${ident}_State state,
${ident}_State& next_state,
''')
if self.TBEType != None:
code('''
${{self.TBEType.c_ident}}*& m_tbe_ptr,
''')
if self.EntryType != None:
code('''
${{self.EntryType.c_ident}}*& m_cache_entry_ptr,
''')
code('''
Addr addr)
{
switch(HASH_FUN(state, event)) {
''')
# This map will allow suppress generating duplicate code
cases = orderdict()
for trans in self.transitions:
case_string = "%s_State_%s, %s_Event_%s" % \
(self.ident, trans.state.ident, self.ident, trans.event.ident)
case = self.symtab.codeFormatter()
# Only set next_state if it changes
if trans.state != trans.nextState:
if trans.nextState.isWildcard():
# When * is encountered as an end state of a transition,
# the next state is determined by calling the
# machine-specific getNextState function. The next state
# is determined before any actions of the transition
# execute, and therefore the next state calculation cannot
# depend on any of the transitionactions.
case('next_state = getNextState(addr);')
else:
ns_ident = trans.nextState.ident
case('next_state = ${ident}_State_${ns_ident};')
actions = trans.actions
request_types = trans.request_types
# Check for resources
case_sorter = []
res = trans.resources
for key,val in res.iteritems():
val = '''
if (!%s.areNSlotsAvailable(%s, clockEdge()))
return TransitionResult_ResourceStall;
''' % (key.code, val)
case_sorter.append(val)
# Check all of the request_types for resource constraints
for request_type in request_types:
val = '''
if (!checkResourceAvailable(%s_RequestType_%s, addr)) {
return TransitionResult_ResourceStall;
}
''' % (self.ident, request_type.ident)
case_sorter.append(val)
# Emit the code sequences in a sorted order. This makes the
# output deterministic (without this the output order can vary
# since Map's keys() on a vector of pointers is not deterministic
for c in sorted(case_sorter):
case("$c")
# Record access types for this transition
for request_type in request_types:
case('recordRequestType(${ident}_RequestType_${{request_type.ident}}, addr);')
# Figure out if we stall
stall = False
for action in actions:
if action.ident == "z_stall":
stall = True
break
if stall:
case('return TransitionResult_ProtocolStall;')
else:
if self.TBEType != None and self.EntryType != None:
for action in actions:
case('${{action.ident}}(m_tbe_ptr, m_cache_entry_ptr, addr);')
elif self.TBEType != None:
for action in actions:
case('${{action.ident}}(m_tbe_ptr, addr);')
elif self.EntryType != None:
for action in actions:
case('${{action.ident}}(m_cache_entry_ptr, addr);')
else:
for action in actions:
case('${{action.ident}}(addr);')
case('return TransitionResult_Valid;')
case = str(case)
# Look to see if this transition code is unique.
if case not in cases:
cases[case] = []
cases[case].append(case_string)
# Walk through all of the unique code blocks and spit out the
# corresponding case statement elements
for case,transitions in cases.iteritems():
# Iterative over all the multiple transitions that share
# the same code
for trans in transitions:
code(' case HASH_FUN($trans):')
code(' $case\n')
code('''
default:
panic("Invalid transition\\n"
"%s time: %d addr: %s event: %s state: %s\\n",
name(), curCycle(), addr, event, state);
}
return TransitionResult_Valid;
}
''')
code.write(path, "%s_Transitions.cc" % self.ident)
# **************************
# ******* HTML Files *******
# **************************
def frameRef(self, click_href, click_target, over_href, over_num, text):
code = self.symtab.codeFormatter(fix_newlines=False)
code("""<A href=\"$click_href\" target=\"$click_target\" onmouseover=\"
if (parent.frames[$over_num].location != parent.location + '$over_href') {
parent.frames[$over_num].location='$over_href'
}\">
${{html.formatShorthand(text)}}
</A>""")
return str(code)
def writeHTMLFiles(self, path):
# Create table with no row hilighted
self.printHTMLTransitions(path, None)
# Generate transition tables
for state in self.states.itervalues():
self.printHTMLTransitions(path, state)
# Generate action descriptions
for action in self.actions.itervalues():
name = "%s_action_%s.html" % (self.ident, action.ident)
code = html.createSymbol(action, "Action")
code.write(path, name)
# Generate state descriptions
for state in self.states.itervalues():
name = "%s_State_%s.html" % (self.ident, state.ident)
code = html.createSymbol(state, "State")
code.write(path, name)
# Generate event descriptions
for event in self.events.itervalues():
name = "%s_Event_%s.html" % (self.ident, event.ident)
code = html.createSymbol(event, "Event")
code.write(path, name)
def printHTMLTransitions(self, path, active_state):
code = self.symtab.codeFormatter()
code('''
<HTML>
<BODY link="blue" vlink="blue">
<H1 align="center">${{html.formatShorthand(self.short)}}:
''')
code.indent()
for i,machine in enumerate(self.symtab.getAllType(StateMachine)):
mid = machine.ident
if i != 0:
extra = " - "
else:
extra = ""
if machine == self:
code('$extra$mid')
else:
code('$extra<A target="Table" href="${mid}_table.html">$mid</A>')
code.dedent()
code("""
</H1>
<TABLE border=1>
<TR>
<TH> </TH>
""")
for event in self.events.itervalues():
href = "%s_Event_%s.html" % (self.ident, event.ident)
ref = self.frameRef(href, "Status", href, "1", event.short)
code('<TH bgcolor=white>$ref</TH>')
code('</TR>')
# -- Body of table
for state in self.states.itervalues():
# -- Each row
if state == active_state:
color = "yellow"
else:
color = "white"
click = "%s_table_%s.html" % (self.ident, state.ident)
over = "%s_State_%s.html" % (self.ident, state.ident)
text = html.formatShorthand(state.short)
ref = self.frameRef(click, "Table", over, "1", state.short)
code('''
<TR>
<TH bgcolor=$color>$ref</TH>
''')
# -- One column for each event
for event in self.events.itervalues():
trans = self.table.get((state,event), None)
if trans is None:
# This is the no transition case
if state == active_state:
color = "#C0C000"
else:
color = "lightgrey"
code('<TD bgcolor=$color> </TD>')
continue
next = trans.nextState
stall_action = False
# -- Get the actions
for action in trans.actions:
if action.ident == "z_stall" or \
action.ident == "zz_recycleMandatoryQueue":
stall_action = True
# -- Print out "actions/next-state"
if stall_action:
if state == active_state:
color = "#C0C000"
else:
color = "lightgrey"
elif active_state and next.ident == active_state.ident:
color = "aqua"
elif state == active_state:
color = "yellow"
else:
color = "white"
code('<TD bgcolor=$color>')
for action in trans.actions:
href = "%s_action_%s.html" % (self.ident, action.ident)
ref = self.frameRef(href, "Status", href, "1",
action.short)
code(' $ref')
if next != state:
if trans.actions:
code('/')
click = "%s_table_%s.html" % (self.ident, next.ident)
over = "%s_State_%s.html" % (self.ident, next.ident)
ref = self.frameRef(click, "Table", over, "1", next.short)
code("$ref")
code("</TD>")
# -- Each row
if state == active_state:
color = "yellow"
else:
color = "white"
click = "%s_table_%s.html" % (self.ident, state.ident)
over = "%s_State_%s.html" % (self.ident, state.ident)
ref = self.frameRef(click, "Table", over, "1", state.short)
code('''
<TH bgcolor=$color>$ref</TH>
</TR>
''')
code('''
<!- Column footer->
<TR>
<TH> </TH>
''')
for event in self.events.itervalues():
href = "%s_Event_%s.html" % (self.ident, event.ident)
ref = self.frameRef(href, "Status", href, "1", event.short)
code('<TH bgcolor=white>$ref</TH>')
code('''
</TR>
</TABLE>
</BODY></HTML>
''')
if active_state:
name = "%s_table_%s.html" % (self.ident, active_state.ident)
else:
name = "%s_table.html" % self.ident
code.write(path, name)
__all__ = [ "StateMachine" ]
| |
# Copyright (c) 2011-2013 University of Southern California / ISI
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Class for Tilera bare-metal nodes.
"""
import base64
import os
import jinja2
from oslo.config import cfg
from nova.compute import flavors
from nova import exception
from nova.openstack.common.db import exception as db_exc
from nova.openstack.common import fileutils
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova import utils
from nova.virt.baremetal import baremetal_states
from nova.virt.baremetal import base
from nova.virt.baremetal import db
from nova.virt.baremetal import utils as bm_utils
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.import_opt('use_ipv6', 'nova.netconf')
CONF.import_opt('net_config_template', 'nova.virt.baremetal.pxe',
group='baremetal')
def build_network_config(network_info):
interfaces = bm_utils.map_network_interfaces(network_info, CONF.use_ipv6)
tmpl_path, tmpl_file = os.path.split(CONF.baremetal.net_config_template)
env = jinja2.Environment(loader=jinja2.FileSystemLoader(tmpl_path))
template = env.get_template(tmpl_file)
return template.render({'interfaces': interfaces,
'use_ipv6': CONF.use_ipv6})
def get_image_dir_path(instance):
"""Generate the dir for an instances disk."""
return os.path.join(CONF.instances_path, instance['name'])
def get_image_file_path(instance):
"""Generate the full path for an instances disk."""
return os.path.join(CONF.instances_path, instance['name'], 'disk')
def get_tilera_nfs_path(node_id):
"""Generate the path for an instances Tilera nfs."""
tilera_nfs_dir = "fs_" + str(node_id)
return os.path.join(CONF.baremetal.tftp_root, tilera_nfs_dir)
def get_partition_sizes(instance):
flavor = flavors.extract_flavor(instance)
root_mb = flavor['root_gb'] * 1024
swap_mb = flavor['swap']
if swap_mb < 1:
swap_mb = 1
return (root_mb, swap_mb)
def get_tftp_image_info(instance):
"""Generate the paths for tftp files for this instance.
Raises NovaException if
- instance does not contain kernel_id
"""
image_info = {
'kernel': [None, None],
}
try:
image_info['kernel'][0] = str(instance['kernel_id'])
except KeyError:
pass
missing_labels = []
for label in image_info.keys():
(uuid, path) = image_info[label]
if not uuid:
missing_labels.append(label)
else:
image_info[label][1] = os.path.join(CONF.baremetal.tftp_root,
instance['uuid'], label)
if missing_labels:
raise exception.NovaException(_(
"Can not activate Tilera bootloader. "
"The following boot parameters "
"were not passed to baremetal driver: %s") % missing_labels)
return image_info
class Tilera(base.NodeDriver):
"""Tilera bare metal driver."""
def __init__(self, virtapi):
super(Tilera, self).__init__(virtapi)
def _collect_mac_addresses(self, context, node):
macs = set()
for nic in db.bm_interface_get_all_by_bm_node_id(context, node['id']):
if nic['address']:
macs.add(nic['address'])
return sorted(macs)
def _cache_tftp_images(self, context, instance, image_info):
"""Fetch the necessary kernels and ramdisks for the instance."""
fileutils.ensure_tree(
os.path.join(CONF.baremetal.tftp_root, instance['uuid']))
LOG.debug(_("Fetching kernel and ramdisk for instance %s") %
instance['name'])
for label in image_info.keys():
(uuid, path) = image_info[label]
bm_utils.cache_image(
context=context,
target=path,
image_id=uuid,
user_id=instance['user_id'],
project_id=instance['project_id'],
)
def _cache_image(self, context, instance, image_meta):
"""Fetch the instance's image from Glance
This method pulls the relevant AMI and associated kernel and ramdisk,
and the deploy kernel and ramdisk from Glance, and writes them
to the appropriate places on local disk.
Both sets of kernel and ramdisk are needed for Tilera booting, so these
are stored under CONF.baremetal.tftp_root.
At present, the AMI is cached and certain files are injected.
Debian/ubuntu-specific assumptions are made regarding the injected
files. In a future revision, this functionality will be replaced by a
more scalable and os-agnostic approach: the deployment ramdisk will
fetch from Glance directly, and write its own last-mile configuration.
"""
fileutils.ensure_tree(get_image_dir_path(instance))
image_path = get_image_file_path(instance)
LOG.debug(_("Fetching image %(ami)s for instance %(name)s") %
{'ami': image_meta['id'], 'name': instance['name']})
bm_utils.cache_image(context=context,
target=image_path,
image_id=image_meta['id'],
user_id=instance['user_id'],
project_id=instance['project_id'],
clean=True,
)
return [image_meta['id'], image_path]
def _inject_into_image(self, context, node, instance, network_info,
injected_files=None, admin_password=None):
"""Inject last-mile configuration into instances image
Much of this method is a hack around DHCP and cloud-init
not working together with baremetal provisioning yet.
"""
partition = None
if not instance['kernel_id']:
partition = "1"
ssh_key = None
if 'key_data' in instance and instance['key_data']:
ssh_key = str(instance['key_data'])
if injected_files is None:
injected_files = []
else:
injected_files = list(injected_files)
net_config = build_network_config(network_info)
if instance['hostname']:
injected_files.append(('/etc/hostname', instance['hostname']))
LOG.debug(_("Injecting files into image for instance %(name)s") %
{'name': instance['name']})
bm_utils.inject_into_image(
image=get_image_file_path(instance),
key=ssh_key,
net=net_config,
metadata=utils.instance_meta(instance),
admin_password=admin_password,
files=injected_files,
partition=partition,
)
def cache_images(self, context, node, instance,
admin_password, image_meta, injected_files, network_info):
"""Prepare all the images for this instance."""
tftp_image_info = get_tftp_image_info(instance)
self._cache_tftp_images(context, instance, tftp_image_info)
self._cache_image(context, instance, image_meta)
self._inject_into_image(context, node, instance, network_info,
injected_files, admin_password)
def destroy_images(self, context, node, instance):
"""Delete instance's image file."""
bm_utils.unlink_without_raise(get_image_file_path(instance))
bm_utils.rmtree_without_raise(get_image_dir_path(instance))
def activate_bootloader(self, context, node, instance, network_info):
"""Configure Tilera boot loader for an instance
Kernel and ramdisk images are downloaded by cache_tftp_images,
and stored in /tftpboot/{uuid}/
This method writes the instances config file, and then creates
symlinks for each MAC address in the instance.
By default, the complete layout looks like this:
/tftpboot/
./{uuid}/
kernel
./fs_node_id/
"""
image_info = get_tftp_image_info(instance)
(root_mb, swap_mb) = get_partition_sizes(instance)
tilera_nfs_path = get_tilera_nfs_path(node['id'])
image_file_path = get_image_file_path(instance)
deployment_key = bm_utils.random_alnum(32)
db.bm_node_update(context, node['id'],
{'deploy_key': deployment_key,
'image_path': image_file_path,
'pxe_config_path': tilera_nfs_path,
'root_mb': root_mb,
'swap_mb': swap_mb})
if os.path.exists(image_file_path) and \
os.path.exists(tilera_nfs_path):
utils.execute('mount', '-o', 'loop', image_file_path,
tilera_nfs_path, run_as_root=True)
def deactivate_bootloader(self, context, node, instance):
"""Delete Tilera bootloader images and config."""
try:
db.bm_node_update(context, node['id'],
{'deploy_key': None,
'image_path': None,
'pxe_config_path': None,
'root_mb': 0,
'swap_mb': 0})
except exception.NodeNotFound:
pass
tilera_nfs_path = get_tilera_nfs_path(node['id'])
if os.path.ismount(tilera_nfs_path):
utils.execute('rpc.mountd', run_as_root=True)
utils.execute('umount', '-f', tilera_nfs_path, run_as_root=True)
try:
image_info = get_tftp_image_info(instance)
except exception.NovaException:
pass
else:
for label in image_info.keys():
(uuid, path) = image_info[label]
bm_utils.unlink_without_raise(path)
try:
macs = self._collect_mac_addresses(context, node)
except db_exc.DBError:
pass
if os.path.exists(os.path.join(CONF.baremetal.tftp_root,
instance['uuid'])):
bm_utils.rmtree_without_raise(
os.path.join(CONF.baremetal.tftp_root, instance['uuid']))
def _iptables_set(self, node_ip, user_data):
"""Sets security setting (iptables:port) if needed.
iptables -A INPUT -p tcp ! -s $IP --dport $PORT -j DROP
/tftpboot/iptables_rule script sets iptables rule on the given node.
"""
rule_path = CONF.baremetal.tftp_root + "/iptables_rule"
if user_data is not None:
open_ip = base64.b64decode(user_data)
utils.execute(rule_path, node_ip, open_ip)
def activate_node(self, context, node, instance):
"""Wait for Tilera deployment to complete."""
locals = {'error': '', 'started': False}
try:
row = db.bm_node_get(context, node['id'])
if instance['uuid'] != row.get('instance_uuid'):
locals['error'] = _("Node associated with another instance"
" while waiting for deploy of %s")
status = row.get('task_state')
if (status == baremetal_states.DEPLOYING and
locals['started'] == False):
LOG.info(_('Tilera deploy started for instance %s')
% instance['uuid'])
locals['started'] = True
elif status in (baremetal_states.DEPLOYDONE,
baremetal_states.BUILDING,
baremetal_states.ACTIVE):
LOG.info(_("Tilera deploy completed for instance %s")
% instance['uuid'])
node_ip = node['pm_address']
user_data = instance['user_data']
try:
self._iptables_set(node_ip, user_data)
except Exception:
self.deactivate_bootloader(context, node, instance)
raise exception.NovaException(_("Node is "
"unknown error state."))
elif status == baremetal_states.DEPLOYFAIL:
locals['error'] = _("Tilera deploy failed for instance %s")
except exception.NodeNotFound:
locals['error'] = _("Baremetal node deleted while waiting "
"for deployment of instance %s")
if locals['error']:
raise exception.InstanceDeployFailure(
locals['error'] % instance['uuid'])
def deactivate_node(self, context, node, instance):
pass
| |
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import array
import six.moves.cPickle as pickle
import json
from collections import defaultdict
from gzip import GzipFile
from os.path import getmtime
import struct
from time import time
import os
from io import BufferedReader
from hashlib import md5
from itertools import chain
from tempfile import NamedTemporaryFile
import sys
from six.moves import range
from swift.common.exceptions import RingLoadError
from swift.common.utils import hash_path, validate_configuration
from swift.common.ring.utils import tiers_for_dev
class RingData(object):
"""Partitioned consistent hashing ring data (used for serialization)."""
def __init__(self, replica2part2dev_id, devs, part_shift,
next_part_power=None):
self.devs = devs
self._replica2part2dev_id = replica2part2dev_id
self._part_shift = part_shift
self.next_part_power = next_part_power
for dev in self.devs:
if dev is not None:
dev.setdefault("region", 1)
@classmethod
def deserialize_v1(cls, gz_file, metadata_only=False):
"""
Deserialize a v1 ring file into a dictionary with `devs`, `part_shift`,
and `replica2part2dev_id` keys.
If the optional kwarg `metadata_only` is True, then the
`replica2part2dev_id` is not loaded and that key in the returned
dictionary just has the value `[]`.
:param file gz_file: An opened file-like object which has already
consumed the 6 bytes of magic and version.
:param bool metadata_only: If True, only load `devs` and `part_shift`
:returns: A dict containing `devs`, `part_shift`, and
`replica2part2dev_id`
"""
json_len, = struct.unpack('!I', gz_file.read(4))
ring_dict = json.loads(gz_file.read(json_len))
ring_dict['replica2part2dev_id'] = []
if metadata_only:
return ring_dict
byteswap = (ring_dict.get('byteorder', sys.byteorder) != sys.byteorder)
partition_count = 1 << (32 - ring_dict['part_shift'])
for x in range(ring_dict['replica_count']):
part2dev = array.array('H', gz_file.read(2 * partition_count))
if byteswap:
part2dev.byteswap()
ring_dict['replica2part2dev_id'].append(part2dev)
return ring_dict
@classmethod
def load(cls, filename, metadata_only=False):
"""
Load ring data from a file.
:param filename: Path to a file serialized by the save() method.
:param bool metadata_only: If True, only load `devs` and `part_shift`.
:returns: A RingData instance containing the loaded data.
"""
gz_file = GzipFile(filename, 'rb')
# Python 2.6 GzipFile doesn't support BufferedIO
if hasattr(gz_file, '_checkReadable'):
gz_file = BufferedReader(gz_file)
# See if the file is in the new format
magic = gz_file.read(4)
if magic == 'R1NG':
format_version, = struct.unpack('!H', gz_file.read(2))
if format_version == 1:
ring_data = cls.deserialize_v1(
gz_file, metadata_only=metadata_only)
else:
raise Exception('Unknown ring format version %d' %
format_version)
else:
# Assume old-style pickled ring
gz_file.seek(0)
ring_data = pickle.load(gz_file)
if not hasattr(ring_data, 'devs'):
ring_data = RingData(ring_data['replica2part2dev_id'],
ring_data['devs'], ring_data['part_shift'],
ring_data.get('next_part_power'))
return ring_data
def serialize_v1(self, file_obj):
# Write out new-style serialization magic and version:
file_obj.write(struct.pack('!4sH', 'R1NG', 1))
ring = self.to_dict()
# Only include next_part_power if it is set in the
# builder, otherwise just ignore it
_text = {'devs': ring['devs'], 'part_shift': ring['part_shift'],
'replica_count': len(ring['replica2part2dev_id']),
'byteorder': sys.byteorder}
next_part_power = ring.get('next_part_power')
if next_part_power is not None:
_text['next_part_power'] = next_part_power
json_encoder = json.JSONEncoder(sort_keys=True)
json_text = json_encoder.encode(_text)
json_len = len(json_text)
file_obj.write(struct.pack('!I', json_len))
file_obj.write(json_text)
for part2dev_id in ring['replica2part2dev_id']:
file_obj.write(part2dev_id.tostring())
def save(self, filename, mtime=1300507380.0):
"""
Serialize this RingData instance to disk.
:param filename: File into which this instance should be serialized.
:param mtime: time used to override mtime for gzip, default or None
if the caller wants to include time
"""
# Override the timestamp so that the same ring data creates
# the same bytes on disk. This makes a checksum comparison a
# good way to see if two rings are identical.
tempf = NamedTemporaryFile(dir=".", prefix=filename, delete=False)
gz_file = GzipFile(filename, mode='wb', fileobj=tempf, mtime=mtime)
self.serialize_v1(gz_file)
gz_file.close()
tempf.flush()
os.fsync(tempf.fileno())
tempf.close()
os.chmod(tempf.name, 0o644)
os.rename(tempf.name, filename)
def to_dict(self):
return {'devs': self.devs,
'replica2part2dev_id': self._replica2part2dev_id,
'part_shift': self._part_shift,
'next_part_power': self.next_part_power}
class Ring(object):
"""
Partitioned consistent hashing ring.
:param serialized_path: path to serialized RingData instance
:param reload_time: time interval in seconds to check for a ring change
:param ring_name: ring name string (basically specified from policy)
:param validation_hook: hook point to validate ring configuration ontime
:raises RingLoadError: if the loaded ring data violates its constraint
"""
def __init__(self, serialized_path, reload_time=15, ring_name=None,
validation_hook=lambda ring_data: None):
# can't use the ring unless HASH_PATH_SUFFIX is set
validate_configuration()
if ring_name:
self.serialized_path = os.path.join(serialized_path,
ring_name + '.ring.gz')
else:
self.serialized_path = os.path.join(serialized_path)
self.reload_time = reload_time
self._validation_hook = validation_hook
self._reload(force=True)
def _reload(self, force=False):
self._rtime = time() + self.reload_time
if force or self.has_changed():
ring_data = RingData.load(self.serialized_path)
try:
self._validation_hook(ring_data)
except RingLoadError:
if force:
raise
else:
# In runtime reload at working server, it's ok to use old
# ring data if the new ring data is invalid.
return
self._mtime = getmtime(self.serialized_path)
self._devs = ring_data.devs
# NOTE(akscram): Replication parameters like replication_ip
# and replication_port are required for
# replication process. An old replication
# ring doesn't contain this parameters into
# device. Old-style pickled rings won't have
# region information.
for dev in self._devs:
if dev:
dev.setdefault('region', 1)
if 'ip' in dev:
dev.setdefault('replication_ip', dev['ip'])
if 'port' in dev:
dev.setdefault('replication_port', dev['port'])
self._replica2part2dev_id = ring_data._replica2part2dev_id
self._part_shift = ring_data._part_shift
self._rebuild_tier_data()
# Do this now, when we know the data has changed, rather than
# doing it on every call to get_more_nodes().
#
# Since this is to speed up the finding of handoffs, we only
# consider devices with at least one partition assigned. This
# way, a region, zone, or server with no partitions assigned
# does not count toward our totals, thereby keeping the early
# bailouts in get_more_nodes() working.
dev_ids_with_parts = set()
for part2dev_id in self._replica2part2dev_id:
for dev_id in part2dev_id:
dev_ids_with_parts.add(dev_id)
regions = set()
zones = set()
ips = set()
self._num_devs = 0
for dev in self._devs:
if dev and dev['id'] in dev_ids_with_parts:
regions.add(dev['region'])
zones.add((dev['region'], dev['zone']))
ips.add((dev['region'], dev['zone'], dev['ip']))
self._num_devs += 1
self._num_regions = len(regions)
self._num_zones = len(zones)
self._num_ips = len(ips)
self._next_part_power = ring_data.next_part_power
@property
def next_part_power(self):
return self._next_part_power
@property
def part_power(self):
return 32 - self._part_shift
def _rebuild_tier_data(self):
self.tier2devs = defaultdict(list)
for dev in self._devs:
if not dev:
continue
for tier in tiers_for_dev(dev):
self.tier2devs[tier].append(dev)
tiers_by_length = defaultdict(list)
for tier in self.tier2devs:
tiers_by_length[len(tier)].append(tier)
self.tiers_by_length = sorted(tiers_by_length.values(),
key=lambda x: len(x[0]))
for tiers in self.tiers_by_length:
tiers.sort()
@property
def replica_count(self):
"""Number of replicas (full or partial) used in the ring."""
return len(self._replica2part2dev_id)
@property
def partition_count(self):
"""Number of partitions in the ring."""
return len(self._replica2part2dev_id[0])
@property
def devs(self):
"""devices in the ring"""
if time() > self._rtime:
self._reload()
return self._devs
def has_changed(self):
"""
Check to see if the ring on disk is different than the current one in
memory.
:returns: True if the ring on disk has changed, False otherwise
"""
return getmtime(self.serialized_path) != self._mtime
def _get_part_nodes(self, part):
part_nodes = []
seen_ids = set()
for r2p2d in self._replica2part2dev_id:
if part < len(r2p2d):
dev_id = r2p2d[part]
if dev_id not in seen_ids:
part_nodes.append(self.devs[dev_id])
seen_ids.add(dev_id)
return [dict(node, index=i) for i, node in enumerate(part_nodes)]
def get_part(self, account, container=None, obj=None):
"""
Get the partition for an account/container/object.
:param account: account name
:param container: container name
:param obj: object name
:returns: the partition number
"""
key = hash_path(account, container, obj, raw_digest=True)
if time() > self._rtime:
self._reload()
part = struct.unpack_from('>I', key)[0] >> self._part_shift
return part
def get_part_nodes(self, part):
"""
Get the nodes that are responsible for the partition. If one
node is responsible for more than one replica of the same
partition, it will only appear in the output once.
:param part: partition to get nodes for
:returns: list of node dicts
See :func:`get_nodes` for a description of the node dicts.
"""
if time() > self._rtime:
self._reload()
return self._get_part_nodes(part)
def get_nodes(self, account, container=None, obj=None):
"""
Get the partition and nodes for an account/container/object.
If a node is responsible for more than one replica, it will
only appear in the output once.
:param account: account name
:param container: container name
:param obj: object name
:returns: a tuple of (partition, list of node dicts)
Each node dict will have at least the following keys:
====== ===============================================================
id unique integer identifier amongst devices
index offset into the primary node list for the partition
weight a float of the relative weight of this device as compared to
others; this indicates how many partitions the builder will try
to assign to this device
zone integer indicating which zone the device is in; a given
partition will not be assigned to multiple devices within the
same zone
ip the ip address of the device
port the tcp port of the device
device the device's name on disk (sdb1, for example)
meta general use 'extra' field; for example: the online date, the
hardware description
====== ===============================================================
"""
part = self.get_part(account, container, obj)
return part, self._get_part_nodes(part)
def get_more_nodes(self, part):
"""
Generator to get extra nodes for a partition for hinted handoff.
The handoff nodes will try to be in zones other than the
primary zones, will take into account the device weights, and
will usually keep the same sequences of handoffs even with
ring changes.
:param part: partition to get handoff nodes for
:returns: generator of node dicts
See :func:`get_nodes` for a description of the node dicts.
"""
if time() > self._rtime:
self._reload()
primary_nodes = self._get_part_nodes(part)
used = set(d['id'] for d in primary_nodes)
same_regions = set(d['region'] for d in primary_nodes)
same_zones = set((d['region'], d['zone']) for d in primary_nodes)
same_ips = set(
(d['region'], d['zone'], d['ip']) for d in primary_nodes)
parts = len(self._replica2part2dev_id[0])
start = struct.unpack_from(
'>I', md5(str(part)).digest())[0] >> self._part_shift
inc = int(parts / 65536) or 1
# Multiple loops for execution speed; the checks and bookkeeping get
# simpler as you go along
hit_all_regions = len(same_regions) == self._num_regions
for handoff_part in chain(range(start, parts, inc),
range(inc - ((parts - start) % inc),
start, inc)):
if hit_all_regions:
# At this point, there are no regions left untouched, so we
# can stop looking.
break
for part2dev_id in self._replica2part2dev_id:
if handoff_part < len(part2dev_id):
dev_id = part2dev_id[handoff_part]
dev = self._devs[dev_id]
region = dev['region']
if dev_id not in used and region not in same_regions:
yield dev
used.add(dev_id)
same_regions.add(region)
zone = dev['zone']
ip = (region, zone, dev['ip'])
same_zones.add((region, zone))
same_ips.add(ip)
if len(same_regions) == self._num_regions:
hit_all_regions = True
break
hit_all_zones = len(same_zones) == self._num_zones
for handoff_part in chain(range(start, parts, inc),
range(inc - ((parts - start) % inc),
start, inc)):
if hit_all_zones:
# Much like we stopped looking for fresh regions before, we
# can now stop looking for fresh zones; there are no more.
break
for part2dev_id in self._replica2part2dev_id:
if handoff_part < len(part2dev_id):
dev_id = part2dev_id[handoff_part]
dev = self._devs[dev_id]
zone = (dev['region'], dev['zone'])
if dev_id not in used and zone not in same_zones:
yield dev
used.add(dev_id)
same_zones.add(zone)
ip = zone + (dev['ip'],)
same_ips.add(ip)
if len(same_zones) == self._num_zones:
hit_all_zones = True
break
hit_all_ips = len(same_ips) == self._num_ips
for handoff_part in chain(range(start, parts, inc),
range(inc - ((parts - start) % inc),
start, inc)):
if hit_all_ips:
# We've exhausted the pool of unused backends, so stop
# looking.
break
for part2dev_id in self._replica2part2dev_id:
if handoff_part < len(part2dev_id):
dev_id = part2dev_id[handoff_part]
dev = self._devs[dev_id]
ip = (dev['region'], dev['zone'], dev['ip'])
if dev_id not in used and ip not in same_ips:
yield dev
used.add(dev_id)
same_ips.add(ip)
if len(same_ips) == self._num_ips:
hit_all_ips = True
break
hit_all_devs = len(used) == self._num_devs
for handoff_part in chain(range(start, parts, inc),
range(inc - ((parts - start) % inc),
start, inc)):
if hit_all_devs:
# We've used every device we have, so let's stop looking for
# unused devices now.
break
for part2dev_id in self._replica2part2dev_id:
if handoff_part < len(part2dev_id):
dev_id = part2dev_id[handoff_part]
if dev_id not in used:
yield self._devs[dev_id]
used.add(dev_id)
if len(used) == self._num_devs:
hit_all_devs = True
break
| |
# coding=utf-8
# Copyright 2022 RigL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implements RigL."""
import gin
from rigl.rigl_tf2 import utils
import tensorflow as tf
def get_all_layers(model, filter_fn=lambda _: True):
"""Gets all layers of a model and layers of a layer if it is a keras.Model."""
all_layers = []
for l in model.layers:
if hasattr(l, 'layers'):
all_layers.extend(get_all_layers(l, filter_fn=filter_fn))
elif filter_fn(l):
all_layers.append(l)
return all_layers
def is_pruned(layer):
return isinstance(layer, utils.PRUNING_WRAPPER) and layer.trainable
class MaskUpdater(object):
"""Base class for mask update algorithms.
Attributes:
model: tf.keras.Model
optimizer: tf.train.Optimizer
use_stateless: bool, if True stateless operations are used. This is
important for multi-worker jobs not to diverge.
stateless_seed_offset: int, added to the seed of stateless operations.
Use this to create randomness without divergence across workers.
"""
def __init__(self, model, optimizer, use_stateless=True,
stateless_seed_offset=0, loss_fn=None):
self._model = model
self._optimizer = optimizer
self._use_stateless = use_stateless
self._stateless_seed_offset = stateless_seed_offset
self._loss_fn = loss_fn
self.val_x = self.val_y = None
def prune_masks(self, prune_fraction):
"""Updates a fraction of weights in each layer."""
all_masks, all_vars = self.get_vars_and_masks()
drop_scores = self.get_drop_scores(all_vars, all_masks)
grow_score = None
for mask, var, drop_score in zip(all_masks, all_vars, drop_scores):
self.generic_mask_update(mask, var, drop_score, grow_score,
prune_fraction)
def update_masks(self, drop_fraction):
"""Updates a fraction of weights in each layer."""
all_masks, all_vars = self.get_vars_and_masks()
drop_scores = self.get_drop_scores(all_vars, all_masks)
grow_scores = self.get_grow_scores(all_vars, all_masks)
for mask, var, drop_score, grow_score in zip(all_masks, all_vars,
drop_scores, grow_scores):
self.generic_mask_update(mask, var, drop_score, grow_score, drop_fraction)
def get_all_pruning_layers(self):
"""Returns all pruned layers from the model."""
if hasattr(self._model, 'layers'):
return get_all_layers(self._model, filter_fn=is_pruned)
else:
return [self._model] if is_pruned(self._model) else []
def get_vars_and_masks(self):
"""Gets all masked variables and corresponding masks."""
all_masks = []
all_vars = []
for layer in self.get_all_pruning_layers():
for var, mask, _ in layer.pruning_vars:
all_vars.append(var)
all_masks.append(mask)
return all_masks, all_vars
def get_drop_scores(self, all_vars, all_masks):
raise NotImplementedError
def get_grow_scores(self, all_vars, all_masks):
raise NotImplementedError
def generic_mask_update(self, mask, var, score_drop, score_grow,
drop_fraction, reinit_when_same=False):
"""Prunes+grows connections, all tensors same shape."""
n_total = tf.size(score_drop)
n_ones = tf.cast(tf.reduce_sum(mask), dtype=tf.int32)
n_prune = tf.cast(
tf.cast(n_ones, dtype=tf.float32) * drop_fraction, tf.int32)
n_keep = n_ones - n_prune
# Sort the entire array since the k needs to be constant for TPU.
_, sorted_indices = tf.math.top_k(
tf.reshape(score_drop, [-1]), k=n_total)
sorted_indices_ex = tf.expand_dims(sorted_indices, 1)
# We will have zeros after having `n_keep` many ones.
new_values = tf.where(
tf.range(n_total) < n_keep,
tf.ones_like(sorted_indices, dtype=mask.dtype),
tf.zeros_like(sorted_indices, dtype=mask.dtype))
mask1 = tf.scatter_nd(sorted_indices_ex, new_values,
new_values.shape)
if score_grow is not None:
# Flatten the scores.
score_grow = tf.reshape(score_grow, [-1])
# Set scores of the enabled connections(ones) to min(s) - 1, so that they
# have the lowest scores.
score_grow_lifted = tf.where(
tf.math.equal(mask1, 1),
tf.ones_like(mask1) * (tf.reduce_min(score_grow) - 1), score_grow)
_, sorted_indices = tf.math.top_k(score_grow_lifted, k=n_total)
sorted_indices_ex = tf.expand_dims(sorted_indices, 1)
new_values = tf.where(
tf.range(n_total) < n_prune,
tf.ones_like(sorted_indices, dtype=mask.dtype),
tf.zeros_like(sorted_indices, dtype=mask.dtype))
mask2 = tf.scatter_nd(sorted_indices_ex, new_values, new_values.shape)
# Ensure masks are disjoint.
tf.debugging.assert_near(tf.reduce_sum(mask1 * mask2), 0.)
# Let's set the weights of the growed connections.
mask2_reshaped = tf.reshape(mask2, mask.shape)
# Set the values of the new connections.
grow_tensor = tf.zeros_like(var, dtype=var.dtype)
if reinit_when_same:
# If dropped and grown, we re-initialize.
new_connections = tf.math.equal(mask2_reshaped, 1)
else:
new_connections = tf.math.logical_and(
tf.math.equal(mask2_reshaped, 1), tf.math.equal(mask, 0))
new_weights = tf.where(new_connections, grow_tensor, var)
var.assign(new_weights)
# Ensure there is no momentum value for new connections
self.reset_momentum(var, new_connections)
mask_combined = tf.reshape(mask1 + mask2, mask.shape)
else:
mask_combined = tf.reshape(mask1, mask.shape)
mask.assign(mask_combined)
def reset_momentum(self, var, new_connections):
for s_name in self._optimizer.get_slot_names():
# Momentum variable for example, we reset the aggregated values to zero.
optim_var = self._optimizer.get_slot(var, s_name)
new_values = tf.where(new_connections,
tf.zeros_like(optim_var), optim_var)
optim_var.assign(new_values)
def _random_uniform(self, *args, **kwargs):
if self._use_stateless:
c_seed = self._stateless_seed_offset + kwargs['seed']
kwargs['seed'] = tf.cast(
tf.stack([c_seed, self._optimizer.iterations]), tf.int32)
return tf.random.stateless_uniform(*args, **kwargs)
else:
return tf.random.uniform(*args, **kwargs)
def _random_normal(self, *args, **kwargs):
if self._use_stateless:
c_seed = self._stateless_seed_offset + kwargs['seed']
kwargs['seed'] = tf.cast(
tf.stack([c_seed, self._optimizer.iterations]), tf.int32)
return tf.random.stateless_normal(*args, **kwargs)
else:
return tf.random.normal(*args, **kwargs)
def set_validation_data(self, val_x, val_y):
self.val_x, self.val_y = val_x, val_y
def _get_gradients(self, all_vars):
"""Returns the gradients of the given weights using the validation data."""
with tf.GradientTape() as tape:
batch_loss = self._loss_fn(self.val_x, self.val_y)
grads = tape.gradient(batch_loss, all_vars)
if grads:
grads = tf.distribute.get_replica_context().all_reduce('sum', grads)
return grads
class SET(MaskUpdater):
"""Implementation of dynamic sparsity optimizers.
Implementation of SET.
See https://www.nature.com/articles/s41467-018-04316-3
This optimizer wraps a regular optimizer and performs updates on the masks
according to schedule given.
"""
def get_drop_scores(self, all_vars, all_masks, noise_std=0):
def score_fn(mask, var):
score = tf.math.abs(mask*var)
if noise_std != 0:
score += self._random_normal(
score.shape, stddev=noise_std, dtype=score.dtype,
seed=(hash(var.name + 'drop')))
return score
return [score_fn(mask, var) for mask, var in zip(all_masks, all_vars)]
def get_grow_scores(self, all_vars, all_masks):
return [self._random_uniform(var.shape, seed=hash(var.name + 'grow'))
for var in all_vars]
class RigL(MaskUpdater):
"""Implementation of dynamic sparsity optimizers.
Implementation of RigL.
"""
def get_drop_scores(self, all_vars, all_masks, noise_std=0):
def score_fn(mask, var):
score = tf.math.abs(mask*var)
if noise_std != 0:
score += self._random_normal(
score.shape, stddev=noise_std, dtype=score.dtype,
seed=(hash(var.name + 'drop')))
return score
return [score_fn(mask, var) for mask, var in zip(all_masks, all_vars)]
def get_grow_scores(self, all_vars, all_masks):
return [tf.abs(g) for g in self._get_gradients(all_vars)]
class RigLInverted(RigL):
"""Implementation of dynamic sparsity optimizers.
Implementation of RigL.
"""
def get_grow_scores(self, all_vars, all_masks):
return [-tf.abs(g) for g in self._get_gradients(all_vars)]
class UpdateSchedule(object):
"""Base class for mask update algorithms.
Attributes:
mask_updater: MaskUpdater, to invoke.
update_freq: int, frequency of mask updates.
init_drop_fraction: float, initial drop fraction.
"""
def __init__(self, mask_updater, init_drop_fraction, update_freq,
last_update_step):
self._mask_updater = mask_updater
self.update_freq = update_freq
self.last_update_step = last_update_step
self.init_drop_fraction = init_drop_fraction
self.last_drop_fraction = 0
def get_drop_fraction(self, step):
raise NotImplementedError
def is_update_iter(self, step):
"""Returns true if it is a valid mask update step."""
# last_update_step < 0 means, there is no last step.
# last_update_step = 0 means, never update.
tf.debugging.Assert(step >= 0, [step])
if self.last_update_step < 0:
is_valid_step = True
elif self.last_update_step == 0:
is_valid_step = False
else:
is_valid_step = step <= self.last_update_step
return tf.logical_and(is_valid_step, step % self.update_freq == 0)
def update(self, step, check_update_iter=True):
if check_update_iter:
tf.debugging.Assert(self.is_update_iter(step), [step])
self.last_drop_fraction = self.get_drop_fraction(step)
def true_fn():
self._mask_updater.update_masks(self.last_drop_fraction)
tf.cond(self.last_drop_fraction > 0., true_fn, lambda: None)
def prune(self, prune_fraction):
self.last_drop_fraction = prune_fraction
self._mask_updater.prune_masks(self.last_drop_fraction)
def set_validation_data(self, val_x, val_y):
self._mask_updater.set_validation_data(val_x, val_y)
class ConstantUpdateSchedule(UpdateSchedule):
"""Updates a constant fraction of connections."""
def get_drop_fraction(self, step):
return self.init_drop_fraction
class CosineUpdateSchedule(UpdateSchedule):
"""Updates a constant fraction of connections."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._drop_fraction_fn = tf.keras.experimental.CosineDecay(
self.init_drop_fraction,
self.last_update_step,
alpha=0.0,
name='cosine_drop_fraction')
def get_drop_fraction(self, step):
return self._drop_fraction_fn(step)
class ScaledLRUpdateSchedule(UpdateSchedule):
"""Scales the drop fraction with learning rate."""
def __init__(self, mask_updater, init_drop_fraction, update_freq,
last_update_step, optimizer):
self._optimizer = optimizer
self._initial_lr = self._get_lr(0)
super(ScaledLRUpdateSchedule, self).__init__(
mask_updater, init_drop_fraction, update_freq, last_update_step)
def _get_lr(self, step):
if isinstance(self._optimizer.lr, tf.Variable):
return self._optimizer.lr.numpy()
else:
return self._optimizer.lr(step)
def get_drop_fraction(self, step):
current_lr = self._get_lr(step)
return (self.init_drop_fraction / self._initial_lr) * current_lr
@gin.configurable(
'mask_updater',
allowlist=[
'update_alg',
'schedule_alg',
'update_freq',
'init_drop_fraction',
'last_update_step',
'use_stateless',
])
def get_mask_updater(
model,
optimizer,
loss_fn,
update_alg='',
schedule_alg='lr',
update_freq=100,
init_drop_fraction=0.3,
last_update_step=-1,
use_stateless=True):
"""Retrieves the update algorithm and passes it to the schedule object."""
if not update_alg:
return None
elif update_alg == 'set':
mask_updater = SET(model, optimizer, use_stateless=use_stateless)
elif update_alg == 'rigl':
mask_updater = RigL(
model, optimizer, loss_fn=loss_fn, use_stateless=use_stateless)
elif update_alg == 'rigl_inverted':
mask_updater = RigLInverted(
model, optimizer, loss_fn=loss_fn, use_stateless=use_stateless)
else:
raise ValueError('update_alg:%s is not valid.' % update_alg)
if schedule_alg == 'lr':
update_schedule = ScaledLRUpdateSchedule(
mask_updater, init_drop_fraction, update_freq, last_update_step,
optimizer)
elif schedule_alg == 'cosine':
update_schedule = CosineUpdateSchedule(
mask_updater, init_drop_fraction, update_freq, last_update_step)
elif schedule_alg == 'constant':
update_schedule = ConstantUpdateSchedule(mask_updater, init_drop_fraction,
update_freq, last_update_step)
else:
raise ValueError('schedule_alg:%s is not valid.' % schedule_alg)
return update_schedule
| |
""" Swift tests """
import os
import copy
import logging
from sys import exc_info
from contextlib import contextmanager
from collections import defaultdict
from tempfile import NamedTemporaryFile
from eventlet.green import socket
from tempfile import mkdtemp
from shutil import rmtree
from test import get_config
from swift.common.utils import config_true_value
from hashlib import md5
from eventlet import sleep, Timeout
import logging.handlers
from httplib import HTTPException
class FakeRing(object):
def __init__(self, replicas=3, max_more_nodes=0):
# 9 total nodes (6 more past the initial 3) is the cap, no matter if
# this is set higher, or R^2 for R replicas
self.replicas = replicas
self.max_more_nodes = max_more_nodes
self.devs = {}
def set_replicas(self, replicas):
self.replicas = replicas
self.devs = {}
@property
def replica_count(self):
return self.replicas
def get_part(self, account, container=None, obj=None):
return 1
def get_nodes(self, account, container=None, obj=None):
devs = []
for x in xrange(self.replicas):
devs.append(self.devs.get(x))
if devs[x] is None:
self.devs[x] = devs[x] = \
{'ip': '10.0.0.%s' % x,
'port': 1000 + x,
'device': 'sd' + (chr(ord('a') + x)),
'zone': x % 3,
'region': x % 2,
'id': x}
return 1, devs
def get_part_nodes(self, part):
return self.get_nodes('blah')[1]
def get_more_nodes(self, part):
# replicas^2 is the true cap
for x in xrange(self.replicas, min(self.replicas + self.max_more_nodes,
self.replicas * self.replicas)):
yield {'ip': '10.0.0.%s' % x,
'port': 1000 + x,
'device': 'sda',
'zone': x % 3,
'region': x % 2,
'id': x}
class FakeMemcache(object):
def __init__(self):
self.store = {}
def get(self, key):
return self.store.get(key)
def keys(self):
return self.store.keys()
def set(self, key, value, time=0):
self.store[key] = value
return True
def incr(self, key, time=0):
self.store[key] = self.store.setdefault(key, 0) + 1
return self.store[key]
@contextmanager
def soft_lock(self, key, timeout=0, retries=5):
yield True
def delete(self, key):
try:
del self.store[key]
except Exception:
pass
return True
def readuntil2crlfs(fd):
rv = ''
lc = ''
crlfs = 0
while crlfs < 2:
c = fd.read(1)
if not c:
raise ValueError("didn't get two CRLFs; just got %r" % rv)
rv = rv + c
if c == '\r' and lc != '\n':
crlfs = 0
if lc == '\r' and c == '\n':
crlfs += 1
lc = c
return rv
def connect_tcp(hostport):
rv = socket.socket()
rv.connect(hostport)
return rv
@contextmanager
def tmpfile(content):
with NamedTemporaryFile('w', delete=False) as f:
file_name = f.name
f.write(str(content))
try:
yield file_name
finally:
os.unlink(file_name)
xattr_data = {}
def _get_inode(fd):
if not isinstance(fd, int):
try:
fd = fd.fileno()
except AttributeError:
return os.stat(fd).st_ino
return os.fstat(fd).st_ino
def _setxattr(fd, k, v):
inode = _get_inode(fd)
data = xattr_data.get(inode, {})
data[k] = v
xattr_data[inode] = data
def _getxattr(fd, k):
inode = _get_inode(fd)
data = xattr_data.get(inode, {}).get(k)
if not data:
raise IOError
return data
import xattr
xattr.setxattr = _setxattr
xattr.getxattr = _getxattr
@contextmanager
def temptree(files, contents=''):
# generate enough contents to fill the files
c = len(files)
contents = (list(contents) + [''] * c)[:c]
tempdir = mkdtemp()
for path, content in zip(files, contents):
if os.path.isabs(path):
path = '.' + path
new_path = os.path.join(tempdir, path)
subdir = os.path.dirname(new_path)
if not os.path.exists(subdir):
os.makedirs(subdir)
with open(new_path, 'w') as f:
f.write(str(content))
try:
yield tempdir
finally:
rmtree(tempdir)
class NullLoggingHandler(logging.Handler):
def emit(self, record):
pass
class FakeLogger(object):
# a thread safe logger
def __init__(self, *args, **kwargs):
self._clear()
self.level = logging.NOTSET
if 'facility' in kwargs:
self.facility = kwargs['facility']
def _clear(self):
self.log_dict = defaultdict(list)
def _store_in(store_name):
def stub_fn(self, *args, **kwargs):
self.log_dict[store_name].append((args, kwargs))
return stub_fn
error = _store_in('error')
info = _store_in('info')
warning = _store_in('warning')
debug = _store_in('debug')
def exception(self, *args, **kwargs):
self.log_dict['exception'].append((args, kwargs, str(exc_info()[1])))
print 'FakeLogger Exception: %s' % self.log_dict
# mock out the StatsD logging methods:
increment = _store_in('increment')
decrement = _store_in('decrement')
timing = _store_in('timing')
timing_since = _store_in('timing_since')
update_stats = _store_in('update_stats')
set_statsd_prefix = _store_in('set_statsd_prefix')
def get_increments(self):
return [call[0][0] for call in self.log_dict['increment']]
def get_increment_counts(self):
counts = {}
for metric in self.get_increments():
if metric not in counts:
counts[metric] = 0
counts[metric] += 1
return counts
def setFormatter(self, obj):
self.formatter = obj
def close(self):
self._clear()
def set_name(self, name):
# don't touch _handlers
self._name = name
def acquire(self):
pass
def release(self):
pass
def createLock(self):
pass
def emit(self, record):
pass
def handle(self, record):
pass
def flush(self):
pass
def handleError(self, record):
pass
original_syslog_handler = logging.handlers.SysLogHandler
def fake_syslog_handler():
for attr in dir(original_syslog_handler):
if attr.startswith('LOG'):
setattr(FakeLogger, attr,
copy.copy(getattr(logging.handlers.SysLogHandler, attr)))
FakeLogger.priority_map = \
copy.deepcopy(logging.handlers.SysLogHandler.priority_map)
logging.handlers.SysLogHandler = FakeLogger
if config_true_value(get_config('unit_test').get('fake_syslog', 'False')):
fake_syslog_handler()
class MockTrue(object):
"""
Instances of MockTrue evaluate like True
Any attr accessed on an instance of MockTrue will return a MockTrue
instance. Any method called on an instance of MockTrue will return
a MockTrue instance.
>>> thing = MockTrue()
>>> thing
True
>>> thing == True # True == True
True
>>> thing == False # True == False
False
>>> thing != True # True != True
False
>>> thing != False # True != False
True
>>> thing.attribute
True
>>> thing.method()
True
>>> thing.attribute.method()
True
>>> thing.method().attribute
True
"""
def __getattribute__(self, *args, **kwargs):
return self
def __call__(self, *args, **kwargs):
return self
def __repr__(*args, **kwargs):
return repr(True)
def __eq__(self, other):
return other is True
def __ne__(self, other):
return other is not True
@contextmanager
def mock(update):
returns = []
deletes = []
for key, value in update.items():
imports = key.split('.')
attr = imports.pop(-1)
module = __import__(imports[0], fromlist=imports[1:])
for modname in imports[1:]:
module = getattr(module, modname)
if hasattr(module, attr):
returns.append((module, attr, getattr(module, attr)))
else:
deletes.append((module, attr))
setattr(module, attr, value)
yield True
for module, attr, value in returns:
setattr(module, attr, value)
for module, attr in deletes:
delattr(module, attr)
def fake_http_connect(*code_iter, **kwargs):
class FakeConn(object):
def __init__(self, status, etag=None, body='', timestamp='1',
expect_status=None, headers=None):
self.status = status
if expect_status is None:
self.expect_status = self.status
else:
self.expect_status = expect_status
self.reason = 'Fake'
self.host = '1.2.3.4'
self.port = '1234'
self.sent = 0
self.received = 0
self.etag = etag
self.body = body
self.headers = headers or {}
self.timestamp = timestamp
def getresponse(self):
if kwargs.get('raise_exc'):
raise Exception('test')
if kwargs.get('raise_timeout_exc'):
raise Timeout()
return self
def getexpect(self):
if self.expect_status == -2:
raise HTTPException()
if self.expect_status == -3:
return FakeConn(507)
if self.expect_status == -4:
return FakeConn(201)
return FakeConn(100)
def getheaders(self):
etag = self.etag
if not etag:
if isinstance(self.body, str):
etag = '"' + md5(self.body).hexdigest() + '"'
else:
etag = '"68b329da9893e34099c7d8ad5cb9c940"'
headers = {'content-length': len(self.body),
'content-type': 'x-application/test',
'x-timestamp': self.timestamp,
'last-modified': self.timestamp,
'x-object-meta-test': 'testing',
'x-delete-at': '9876543210',
'etag': etag,
'x-works': 'yes'}
if self.status // 100 == 2:
headers['x-account-container-count'] = \
kwargs.get('count', 12345)
if not self.timestamp:
del headers['x-timestamp']
try:
if container_ts_iter.next() is False:
headers['x-container-timestamp'] = '1'
except StopIteration:
pass
if 'slow' in kwargs:
headers['content-length'] = '4'
headers.update(self.headers)
return headers.items()
def read(self, amt=None):
if 'slow' in kwargs:
if self.sent < 4:
self.sent += 1
sleep(0.1)
return ' '
rv = self.body[:amt]
self.body = self.body[amt:]
return rv
def send(self, amt=None):
if 'slow' in kwargs:
if self.received < 4:
self.received += 1
sleep(0.1)
def getheader(self, name, default=None):
return dict(self.getheaders()).get(name.lower(), default)
timestamps_iter = iter(kwargs.get('timestamps') or ['1'] * len(code_iter))
etag_iter = iter(kwargs.get('etags') or [None] * len(code_iter))
if isinstance(kwargs.get('headers'), list):
headers_iter = iter(kwargs['headers'])
else:
headers_iter = iter([kwargs.get('headers', {})] * len(code_iter))
x = kwargs.get('missing_container', [False] * len(code_iter))
if not isinstance(x, (tuple, list)):
x = [x] * len(code_iter)
container_ts_iter = iter(x)
code_iter = iter(code_iter)
static_body = kwargs.get('body', None)
body_iter = kwargs.get('body_iter', None)
if body_iter:
body_iter = iter(body_iter)
def connect(*args, **ckwargs):
if 'give_content_type' in kwargs:
if len(args) >= 7 and 'Content-Type' in args[6]:
kwargs['give_content_type'](args[6]['Content-Type'])
else:
kwargs['give_content_type']('')
if 'give_connect' in kwargs:
kwargs['give_connect'](*args, **ckwargs)
status = code_iter.next()
if isinstance(status, tuple):
status, expect_status = status
else:
expect_status = status
etag = etag_iter.next()
headers = headers_iter.next()
timestamp = timestamps_iter.next()
if status <= 0:
raise HTTPException()
if body_iter is None:
body = static_body or ''
else:
body = body_iter.next()
return FakeConn(status, etag, body=body, timestamp=timestamp,
expect_status=expect_status, headers=headers)
return connect
| |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""Driver for partitioning and building a Relay module for CUTLASS offload."""
import logging
import os
import multiprocessing
import tvm
from tvm import runtime, relay
from tvm.contrib.nvcc import find_cuda_path, get_cuda_version
from .gen_gemm import CutlassGemmProfiler
from .gen_conv2d import CutlassConv2DProfiler
logger = logging.getLogger("cutlass")
def _get_cutlass_path():
tvm_root = os.path.join(os.path.dirname(os.path.realpath(__file__)), "../../../../")
cutlass_path = os.path.join(tvm_root, "3rdparty/cutlass")
assert os.path.exists(
cutlass_path
), """The CUTLASS root directory not found in {}.
Currently, using CUTLASS requires building TVM from source.""".format(
cutlass_path
)
return cutlass_path
def _get_cutlass_compile_options(sm, threads, use_fast_math=False):
cutlass_root = _get_cutlass_path()
cutlass_include = os.path.join(cutlass_root, "include")
cutlass_util_include = os.path.join(cutlass_root, "tools/util/include")
kwargs = {}
kwargs["cc"] = "nvcc"
kwargs["options"] = [
"-DCUTLASS_ENABLE_TENSOR_CORE_MMA=1",
"-gencode=arch=compute_%d,code=[sm_%d,compute_%d]" % (sm, sm, sm),
"-Xcompiler=-fPIC",
"-Xcompiler=-Wconversion",
"-Xcompiler=-fno-strict-aliasing",
"-O3",
"-std=c++14",
"-I" + cutlass_include,
"-I" + cutlass_util_include,
]
if use_fast_math:
kwargs["options"].append("-DCUTLASS_USE_TANH_FOR_SIGMOID")
cuda_path = find_cuda_path()
cuda_ver = get_cuda_version(cuda_path)
if cuda_ver >= 11.2:
ncpu = multiprocessing.cpu_count() if threads < 0 else threads
kwargs["options"].append("-t %d" % ncpu)
return kwargs
class OpAnnotator(tvm.relay.ExprVisitor):
"""Annotates partitioned functions with shape and dtype information."""
def __init__(self):
super().__init__()
self.signature = {}
def visit_call(self, call):
op = call.op
if isinstance(op, relay.Function) and "PartitionedFromPattern" in op.attrs:
self.signature["op_type"] = op.attrs["Composite"]
for i, arg in enumerate(op.params):
self.signature["arg%d_shape" % i] = arg.checked_type.shape
self.signature["arg%d_dtype" % i] = arg.checked_type.dtype
self.signature["ret_shape"] = op.ret_type.shape
self.signature["ret_dtype"] = op.ret_type.dtype
self.visit(op.body)
if str(op) == "nn.conv2d":
self.op_attrs = call.attrs
for arg in call.args:
self.visit(arg)
def select_gemm_kernel(
cutlass_profiler, op_type, MM, KK, NN, out_dtype, batched, profile_all, use_multiprocessing
):
"""Run CUTLASS profiler to select the best kernel, or return the default one for dynamic
workloads."""
if any(isinstance(s, tvm.tir.Any) for s in [MM, KK, NN]):
out = cutlass_profiler.get_default(op_type, out_dtype, batched=batched)
name, cutlass_op_def = out["name"], out["opdef"]
logger.info("Picked the default kernel %s", name)
else:
name, cutlass_op_def, _ = cutlass_profiler.profile(
op_type,
MM,
NN,
KK,
out_dtype,
batched=batched,
profile_all=profile_all,
use_multiprocessing=use_multiprocessing,
)
if profile_all:
logger.info("The best kernel is %s", name)
else:
logger.info("Picked the first kernel found %s", name)
return name, cutlass_op_def
def handle_batch_matmul(
cutlass_profiler, op_type, arg0_shape, arg1_shape, out_dtype, profile_all, use_multiprocessing
):
"""Profile and select a kernel for batch_matmul op workload."""
MM = arg0_shape[1]
KK = arg0_shape[2]
NN = arg1_shape[1]
name, cutlass_op_def = select_gemm_kernel(
cutlass_profiler, op_type, MM, KK, NN, out_dtype, True, profile_all, use_multiprocessing
)
return {
"batch": arg0_shape[0],
"batch_stride_A": arg0_shape[1] * arg0_shape[2],
"batch_stride_B": arg1_shape[1] * arg1_shape[2],
"batch_stride_C": arg0_shape[1] * arg1_shape[1],
"cutlass_op_def": cutlass_op_def,
"cutlass_op_name": name,
"lda": "K",
"ldb": "K",
"ldc": "N",
}
def handle_dense(
cutlass_profiler, op_type, arg0_shape, arg1_shape, out_dtype, profile_all, use_multiprocessing
):
"""Profile and select a kernel for dense op workload."""
MM = arg0_shape[0]
KK = arg0_shape[1]
NN = arg1_shape[0]
name, cutlass_op_def = select_gemm_kernel(
cutlass_profiler, op_type, MM, KK, NN, out_dtype, False, profile_all, use_multiprocessing
)
assert "tn_align" in name, "Only supports (row_major, col_major) input layout for now."
return {
"cutlass_op_def": cutlass_op_def,
"cutlass_op_name": name,
"lda": "K",
"ldb": "K",
"ldc": "N",
}
def handle_conv2d(
cutlass_profiler,
op_type,
d_shape,
w_shape,
padding,
strides,
dilation,
out_dtype,
profile_all,
use_multiprocessing,
):
"""Profile and select a kernel for conv2d op workload."""
if any(isinstance(s, tvm.tir.Any) for s in d_shape):
out = cutlass_profiler.get_default(op_type, out_dtype)
name, cutlass_op_def = out["name"], out["opdef"]
logger.info("Picked the default kernel %s", name)
else:
name, cutlass_op_def, _ = cutlass_profiler.profile(
op_type,
d_shape,
w_shape,
padding,
strides,
dilation,
out_dtype,
profile_all=profile_all,
use_multiprocessing=use_multiprocessing,
)
if profile_all:
logger.info("The best kernel is %s", name)
else:
logger.info("Picked the first kernel found %s", name)
return {
"cutlass_op_def": cutlass_op_def,
"cutlass_op_name": name,
}
def tune_cutlass_kernels(mod, sm, profile_all=True, use_multiprocessing=False, tmp_dir="./tmp"):
"""Given a module partitioned for CUTLASS offloading, profile each workload to select which
kernels to emit.
Parameters
----------
mod : IRModule
The Relay module with cutlass partitions.
sm : int
An integer specifying the compute capability. For example, 75 for Turing and
80 or 86 for Ampere.
profile_all : bool
Whether or not profile all candidate kernels, or stop profiling after
the first applicable kernel is found.
use_multiprocessing : bool
Whether or not compile profiler executables for different kernels in parallel.
tmp_dir : string, optional
A temporary directory where intermediate compiled artifacts will be stored.
Returns
-------
mod : IRModule
The updated module annotated with cutlass profiling information.
num_cutlass_partition : int
The number of partitioned functions created for CUTLASS.
"""
gemm_profiler = CutlassGemmProfiler(sm, _get_cutlass_path(), tmp_dir)
conv2d_profiler = CutlassConv2DProfiler(sm, _get_cutlass_path(), tmp_dir)
num_cutlass_partition = 0
for var in mod.get_global_vars():
fun_name = var.name_hint
func = mod[fun_name]
annotator = OpAnnotator()
if "cutlass" in fun_name:
num_cutlass_partition += 1
annotator.visit(func)
out_dtype = annotator.signature["ret_dtype"]
op_type = annotator.signature["op_type"]
new_attrs = {"op_type": op_type}
new_attrs.update(annotator.signature)
new_attrs.update(func.attrs)
arg0_shape = new_attrs["arg0_shape"]
arg1_shape = new_attrs["arg1_shape"]
if "conv2d" in op_type:
new_attrs["padding"] = annotator.op_attrs.padding
new_attrs["strides"] = annotator.op_attrs.strides
new_attrs["dilation"] = annotator.op_attrs.dilation
new_attrs.update(
handle_conv2d(
conv2d_profiler,
op_type,
arg0_shape,
arg1_shape,
annotator.op_attrs.padding,
annotator.op_attrs.strides,
annotator.op_attrs.dilation,
out_dtype,
profile_all,
use_multiprocessing,
)
)
elif "batch_matmul" in op_type:
new_attrs.update(
handle_batch_matmul(
gemm_profiler,
op_type,
arg0_shape,
arg1_shape,
out_dtype,
profile_all,
use_multiprocessing,
)
)
elif "dense" in op_type:
new_attrs.update(
handle_dense(
gemm_profiler,
op_type,
arg0_shape,
arg1_shape,
out_dtype,
profile_all,
use_multiprocessing,
)
)
else:
raise ValueError("%s unsupported composite" % op_type)
new_attrs = tvm.ir.make_node("DictAttrs", **new_attrs)
new_func = relay.Function(
func.params,
func.body,
ret_type=func.ret_type,
type_params=func.type_params,
attrs=new_attrs,
)
mod.update_func(var, new_func)
return mod, num_cutlass_partition
def build_cutlass_kernels(
lib, sm, tmp_dir="./tmp", lib_path="compile.so", threads=-1, use_fast_math=False
):
"""Compile CUTLASS kernels in lib and return the runtime module ready to run.
Parameters
----------
lib : GraphExecutorFactoryModule
The output from relay.build containing compiled host code and non-cutlass kernels.
sm : int
An integer specifying the compute capability. For example, 75 for Turing and
80 or 86 for Ampere.
tmp_dir : string, optional
A temporary directory where intermediate compiled artifacts will be stored.
lib_path : string, optional
The path to a shared library which will be generated as the result of the build process.
threads : int, optional
The number of threads to use for compiling generated kernels. Only available for
CUDA 11.2 or later. Use all physical cores by default.
use_fast_math : bool, optional
Whether or not to use faster but less accurate math intrinsics.
Returns
-------
updated_lib : runtime.Module
The updated module with compiled cutlass kernels.
"""
kwargs = _get_cutlass_compile_options(sm, threads, use_fast_math)
lib.export_library(lib_path, workspace_dir=tmp_dir, **kwargs)
return runtime.load_module(lib_path)
def build_cutlass_kernels_vm(
vm_exec,
sm,
tmp_dir="./tmp",
lib_path="compile.so",
vmcode_path="vmcode.ro",
threads=-1,
use_fast_math=False,
):
"""Compile CUTLASS kernels in vm_exec and return a VM executable ready to run.
Parameters
----------
vm_exec : vm.Executable
The output from relay.vm.compile containing compiled host code and non-cutlass kernels.
sm : int
An integer specifying the compute capability. For example, 75 for Turing and
80 or 86 for Ampere.
tmp_dir : string, optional
A temporary directory where intermediate compiled artifacts will be stored.
lib_path : string, optional
The path to a shared library which will be generated as the result of the build process.
vmcode_path : string, optional
The path where the VM bytecode will be serialized to.
threads : int, optional
The number of threads to use for compiling generated kernels. Only available for
CUDA 11.2 or later. Use all physical cores by default.
use_fast_math : bool, optional
Whether or not to use faster but less accurate math intrinsics.
Returns
-------
updated_vm_exec: vm.Executable
The updated exectuable with compiled cutlass kernels.
"""
code, lib = vm_exec.save()
kwargs = _get_cutlass_compile_options(sm, threads, use_fast_math)
lib_path = os.path.join(tmp_dir, lib_path)
vmcode_path = os.path.join(tmp_dir, vmcode_path)
lib.export_library(lib_path, workspace_dir=tmp_dir, **kwargs)
with open(vmcode_path, "wb") as fo:
fo.write(code)
lib = tvm.runtime.load_module(lib_path)
code = bytearray(open(vmcode_path, "rb").read())
return tvm.runtime.vm.Executable.load_exec(code, lib)
| |
#!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_firewall_ipmacbinding_setting
short_description: Configure IP to MAC binding settings in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify firewall_ipmacbinding feature and setting category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.5
version_added: "2.8"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate IP address.
type: str
required: false
username:
description:
- FortiOS or FortiGate username.
type: str
required: false
password:
description:
- FortiOS or FortiGate password.
type: str
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS protocol.
type: bool
default: true
ssl_verify:
description:
- Ensures FortiGate certificate must be verified by a proper CA.
type: bool
default: true
version_added: 2.9
firewall_ipmacbinding_setting:
description:
- Configure IP to MAC binding settings.
default: null
type: dict
suboptions:
bindthroughfw:
description:
- Enable/disable use of IP/MAC binding to filter packets that would normally go through the firewall.
type: str
choices:
- enable
- disable
bindtofw:
description:
- Enable/disable use of IP/MAC binding to filter packets that would normally go to the firewall.
type: str
choices:
- enable
- disable
undefinedhost:
description:
- Select action to take on packets with IP/MAC addresses not in the binding list .
type: str
choices:
- allow
- block
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
ssl_verify: "False"
tasks:
- name: Configure IP to MAC binding settings.
fortios_firewall_ipmacbinding_setting:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
firewall_ipmacbinding_setting:
bindthroughfw: "enable"
bindtofw: "enable"
undefinedhost: "allow"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG
def login(data, fos):
host = data['host']
username = data['username']
password = data['password']
ssl_verify = data['ssl_verify']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password, verify=ssl_verify)
def filter_firewall_ipmacbinding_setting_data(json):
option_list = ['bindthroughfw', 'bindtofw', 'undefinedhost']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for elem in data:
elem = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def firewall_ipmacbinding_setting(data, fos):
vdom = data['vdom']
firewall_ipmacbinding_setting_data = data['firewall_ipmacbinding_setting']
filtered_data = underscore_to_hyphen(filter_firewall_ipmacbinding_setting_data(firewall_ipmacbinding_setting_data))
return fos.set('firewall.ipmacbinding',
'setting',
data=filtered_data,
vdom=vdom)
def is_successful_status(status):
return status['status'] == "success" or \
status['http_method'] == "DELETE" and status['http_status'] == 404
def fortios_firewall_ipmacbinding(data, fos):
if data['firewall_ipmacbinding_setting']:
resp = firewall_ipmacbinding_setting(data, fos)
return not is_successful_status(resp), \
resp['status'] == "success", \
resp
def main():
fields = {
"host": {"required": False, "type": "str"},
"username": {"required": False, "type": "str"},
"password": {"required": False, "type": "str", "default": "", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"ssl_verify": {"required": False, "type": "bool", "default": True},
"firewall_ipmacbinding_setting": {
"required": False, "type": "dict", "default": None,
"options": {
"bindthroughfw": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"bindtofw": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"undefinedhost": {"required": False, "type": "str",
"choices": ["allow", "block"]}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
# legacy_mode refers to using fortiosapi instead of HTTPAPI
legacy_mode = 'host' in module.params and module.params['host'] is not None and \
'username' in module.params and module.params['username'] is not None and \
'password' in module.params and module.params['password'] is not None
if not legacy_mode:
if module._socket_path:
connection = Connection(module._socket_path)
fos = FortiOSHandler(connection)
is_error, has_changed, result = fortios_firewall_ipmacbinding(module.params, fos)
else:
module.fail_json(**FAIL_SOCKET_MSG)
else:
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
fos = FortiOSAPI()
login(module.params, fos)
is_error, has_changed, result = fortios_firewall_ipmacbinding(module.params, fos)
fos.logout()
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
| |
import math
from ed2d import window
from ed2d import sysevents
from ed2d.events import Events
from ed2d import context
from ed2d import timing
from ed2d import files
from ed2d import shaders
from ed2d.opengl import gl
from ed2d.opengl import pgl
from gem import vector
from gem import matrix
from ed2d import mesh
from ed2d import text
from ed2d import camera
from ed2d.scenegraph import SceneGraph
from ed2d.assets import objloader
from ed2d import cursor
from ed2d import view
class Viewport(object):
'''Basic data container to allow changing the viewport to simpler.'''
def __init__(self, name, camera):
self.name = name
self.camera = camera
self.width = 0
self.height = 0
self.x = 0
self.y = 0
self.screenSize = None
def set_rect(self, x, y, width, height):
self.width = width
self.height = height
self.x = x
self.y = y
def make_current(self):
if self.camera:
if self.camera.get_mode() == camera.MODE_PERSPECTIVE:
self.camera.set_projection(75.0, float(self.width) / float(self.height), 1e-6, 1e27)
else:
self.camera.set_projection(0.0, self.width, self.height, 0.0, -1.0, 1.0)
self.camera.make_current()
gl.glViewport(self.x, self.screenSize[1]-self.y-self.height, self.width, self.height)
class ViewportManager(object):
'''Viewport Manager is for handling multiple split views '''
def __init__(self):
self.view = view.View()
self.viewports = []
self.screenSize = (0, 0)
def update_screen(self, width, height):
self.screenSize = (width, height)
for vp in self.viewports:
vp.screenSize = self.screenSize
def create_viewport(self, name, camera):
camera.set_view(self.view)
vp = Viewport(name, camera)
vp.screenSize = self.screenSize
self.viewports.append(vp)
return vp
class GameManager(object):
''' Entry point into the game, and manages the game in general '''
def __init__(self):
self.width = 1920
self.height = 1080
self.title = "ed2d"
self.running = False
self.fpsTimer = timing.FpsCounter()
self.fpsEstimate = 0
self.sysEvents = sysevents.SystemEvents()
self.window = window.Window(self.title, self.width, self.height, window.WindowedMode)
self.context = context.Context(3, 3, 2)
self.context.window = self.window
Events.add_listener(self.process_event)
self.keys = []
# Mouse Information
self.mousePos = [0.0, 0.0]
self.mouseButtons = []
self.mouseRelX = 0
self.mouseRelY = 0
self.mousePosX = 0
self.mousePosY = 0
cursor.set_relative_mode(False)
cursor.show_cursor()
gl.init()
major = pgl.glGetInteger(gl.GL_MAJOR_VERSION)
minor = pgl.glGetInteger(gl.GL_MINOR_VERSION)
print('OpenGL Version: {}.{}'.format(major, minor))
gl.glViewport(0, 0, self.width, self.height)
# For CSG to work properly
gl.glBlendFunc(gl.GL_SRC_ALPHA, gl.GL_ONE_MINUS_SRC_ALPHA)
gl.glEnable(gl.GL_DEPTH_TEST)
gl.glEnable(gl.GL_CULL_FACE)
gl.glEnable(gl.GL_MULTISAMPLE)
gl.glClearColor(0.0, 0.0, 0.4, 0.0)
vsPath = files.resolve_path('data', 'shaders', 'main2.vs')
fsPath = files.resolve_path('data', 'shaders', 'main2.fs')
vertex = shaders.VertexShader(vsPath)
fragment = shaders.FragmentShader(fsPath)
self.program = shaders.ShaderProgram(vertex, fragment)
self.program.use()
#self.testID1 = self.program.new_uniform(b'perp')
self.testID2 = self.program.new_uniform(b'view')
self.vao = pgl.glGenVertexArrays(1)
self.scenegraph = SceneGraph()
# Creating a object steps:
# Create a mesh object to render
objFL = objloader.OBJ('buildings')
self.meshTest = mesh.Mesh()
self.meshTest.fromData(objFL)
self.meshTest.addProgram(self.program)
self.meshTestID = self.scenegraph.establish(self.meshTest)
self.meshTest.translate(0.0, 0.0, 0.0)
objBox = objloader.OBJ('box')
self.boxMesh = mesh.Mesh()
self.boxMesh.fromData(objBox)
self.boxMesh.addProgram(self.program)
self.boxMesh.scale(0.25)
self.boxMeshID = self.scenegraph.establish(self.boxMesh)
self.loadText()
self.vpManager = ViewportManager()
self.vpManager.update_screen(self.width, self.height)
self.cameraOrtho = camera.Camera(camera.MODE_ORTHOGRAPHIC)
self.cameraOrtho.set_view(self.vpManager.view)
self.cameraOrtho.set_program(self.textProgram)
self.vpFull = Viewport('full', self.cameraOrtho)
self.vpFull.screenSize = (self.width, self.height)
for i in range(4):
cam = camera.Camera(camera.MODE_PERSPECTIVE)
vp = self.vpManager.create_viewport('sceneview{0}'.format(i), cam)
cam.setPosition(vector.Vector(3, data=[0.5, -2.0, 10.0]))
cam.set_program( self.program)
halfWidth = int(self.width /2)
halfHeight = int(self.height/2)
vps = self.vpManager.viewports
vps[0].set_rect(0, 0, halfWidth, halfHeight)
vps[1].set_rect(0, halfHeight, halfWidth, halfHeight)
vps[2].set_rect(halfWidth, halfHeight, halfWidth, halfHeight)
vps[3].set_rect(halfWidth, 0, halfWidth, halfHeight)
self.camera = vps[0].camera
self.vpFull.set_rect(0, 0, self.width, self.height)
self.model = matrix.Matrix(4)
#self.model = matrix.Matrix(4).translate(vector.Vector(3, data=[4.0, -2.0, -8]))
glerr = gl.glGetError()
if glerr != 0:
print('GLError:', glerr)
def loadText(self):
vsPath = files.resolve_path('data', 'shaders', 'font.vs')
fsPath = files.resolve_path('data', 'shaders', 'font.fs')
vertex = shaders.VertexShader(vsPath)
fragment = shaders.FragmentShader(fsPath)
self.textProgram = shaders.ShaderProgram(vertex, fragment)
fontPath = files.resolve_path('data', 'SourceCodePro-Regular.ttf')
self.font = text.Font(12, fontPath)
self.text = text.Text(self.textProgram, self.font)
def resize(self, width, height):
self.width = width
self.height = height
self.vpManager.update_screen(self.width, self.height)
self.vpFull.screenSize = (self.width, self.height)
halfWidth = int(self.width /2)
halfHeight = int(self.height/2)
vps = self.vpManager.viewports
vps[0].set_rect(0, 0, halfWidth, halfHeight)
vps[1].set_rect(halfWidth, 0, halfWidth, halfHeight)
vps[2].set_rect(halfWidth, halfHeight, halfWidth, halfHeight)
vps[3].set_rect(0, halfHeight, halfWidth, halfHeight)
self.vpFull.set_rect(0, 0, self.width, self.height)
def process_event(self, event, data):
if event == 'quit' or event == 'window_close':
self.running = False
elif event == 'window_resized':
winID, x, y = data
self.resize(x, y)
elif event == 'mouse_move':
if cursor.is_relative():
self.mouseRelX, self.mouseRelY = data
else:
self.mousePosX, self.mousePosY = data
elif event == 'key_down':
if data[0] == 'c':
cursor.set_relative_mode(True)
elif data[0] == 'r':
cursor.set_relative_mode(False)
cursor.move_cursor(self.mousePosX, self.mousePosY)
self.keys.append(data[0])
print(self.keys)
elif event == 'key_up':
self.keys.remove(data[0])
elif event == 'mouse_button_down':
self.mouseButtons.append(data[0])
print(self.mouseButtons)
elif event == 'mouse_button_up':
self.mouseButtons.remove(data[0])
def keyUpdate(self):
moveAmount = 0.5 * self.fpsTimer.tickDelta
for key in self.keys:
if key == 'w':
self.camera.move(self.camera.vec_back, moveAmount)
elif key == 's':
self.camera.move(self.camera.vec_forward, moveAmount)
elif key == 'a':
self.camera.move(self.camera.vec_left, moveAmount)
elif key == 'd':
self.camera.move(self.camera.vec_right, moveAmount)
elif key == 'q':
self.camera.move(self.camera.vec_up, moveAmount)
elif key == 'e':
self.camera.move(self.camera.vec_down, moveAmount)
elif key == 'UP':
self.camera.rotate(self.camera.vec_right, moveAmount * 0.05)
elif key == 'DOWN':
self.camera.rotate(self.camera.vec_left, moveAmount * 0.05)
elif key == 'LEFT':
self.camera.rotate(self.camera.vec_up, moveAmount * 0.05)
elif key == 'RIGHT':
self.camera.rotate(self.camera.vec_down, moveAmount * 0.05)
def mouseUpdate(self):
if 1 in self.mouseButtons:
if not cursor.is_relative():
cursor.set_relative_mode(True)
tick = self.fpsTimer.tickDelta
sensitivity = 0.5
if self.mouseRelX != 0:
self.camera.rotate(self.camera.yAxis, math.radians(-self.mouseRelX * sensitivity * tick))
if self.mouseRelY != 0:
self.camera.rotate(self.camera.vec_right, math.radians(-self.mouseRelY * sensitivity * tick))
self.mouseRelX, self.mouseRelY = 0, 0
else:
if cursor.is_relative():
cursor.set_relative_mode(False)
cursor.move_cursor(self.mousePosX, self.mousePosY)
def update(self):
posVec = self.camera.position.vector
self.boxMesh.translate(posVec[0], posVec[1], posVec[2]-2.0)
if not cursor.is_relative():
for vp in self.vpManager.viewports:
w = vp.width
h = vp.height
x = vp.x
y = vp.y
if (self.mousePosX >= x and self.mousePosX < x+w and
self.mousePosY >= y and self.mousePosY < y+h):
self.camera = vp.camera
break
self.mouseUpdate()
self.keyUpdate()
self.scenegraph.update()
def render(self):
# We need this viewport clear the whole screen (I think)
self.vpFull.make_current()
gl.glEnable(gl.GL_DEPTH_TEST)
gl.glClearColor(0.3, 0.3, 0.3, 1.0)
gl.glClear(gl.GL_COLOR_BUFFER_BIT | gl.GL_DEPTH_BUFFER_BIT)
# Change view to perspective projection
gl.glDisable(gl.GL_BLEND)
for vp in self.vpManager.viewports:
vp.make_current()
self.program.use()
view = vp.camera.getViewMatrix()
self.program.set_uniform_matrix(self.testID2, view)
# Draw 3D stuff
gl.glBindVertexArray(self.vao)
self.scenegraph.render()
gl.glBindVertexArray(0)
gl.glEnable(gl.GL_BLEND)
gl.glDisable(gl.GL_DEPTH_TEST)
gl.glBlendFunc(gl.GL_SRC_ALPHA, gl.GL_ONE_MINUS_SRC_ALPHA)
# Change to orthographic projection to draw the text
self.textProgram.use()
self.vpFull.make_current()
self.text.draw_text(str(self.fpsEstimate) + ' FPS', 0, 10)
gl.glDisable(gl.GL_BLEND)
gl.glEnable(gl.GL_DEPTH_TEST)
def do_run(self):
''' Process a single loop '''
self.sysEvents.process()
self.update()
self.render()
self.window.flip()
self.fpsTimer.tick()
if self.fpsTimer.fpsTime >= 2000:
self.fpsEstimate = self.fpsTimer.get_fps()
print("{:.2f} fps".format(self.fpsEstimate))
def run(self):
''' Called from launcher doesnt exit until the game is quit '''
self.running = True
while self.running:
self.do_run()
| |
"""Summary
"""
from PyQt5.QtWidgets import QGraphicsRectItem
from . import gridstyles as styles
from .gridextras import PreXoverItemGroup, WEDGE_RECT
from . import (
GridNucleicAcidPartItemT,
GridVirtualHelixItemT
)
_RADIUS = styles.GRID_HELIX_RADIUS
class PreXoverManager(QGraphicsRectItem):
"""Summary
Attributes:
active_group (TYPE): Description
active_neighbor_group (TYPE): Description
groups (dict): Description
neighbor_pairs (tuple): Description
neighbor_prexover_items (dict): Description
part_item (TYPE): Description
prexover_item_map (dict): Description
virtual_helix_item (cadnano.views.gridview.virtualhelixitem.VirtualHelixItem): Description
"""
def __init__(self, part_item: GridNucleicAcidPartItemT):
"""Summary
Args:
part_item (TYPE): Description
"""
super(PreXoverManager, self).__init__(part_item)
self.part_item = part_item
self.virtual_helix_item = None
self.active_group = None
self.active_neighbor_group = None
self.groups = {}
# dictionary of tuple of a
# (PreXoverItemGroup, PreXoverItemGroup, List[PreXoverItem])
# tracks connections between prexovers
self.prexover_item_map = {}
self.neighbor_prexover_items = {} # just a dictionary of neighbors
self.neighbor_pairs = () # accounting for neighbor pairing
self._active_items = []
# end def
def __repr__(self):
return "<{}>".format(self.__class__.__name__)
def partItem(self) -> GridNucleicAcidPartItemT:
"""
Returns:
The part item
"""
return self.part_item
# end def
def destroyItem(self):
print("destroying Grid PreXoverManager")
self.deactivateNeighbors()
self.clearPreXoverItemGroups()
self.neighbor_pairs = None
self.part_item = None
self.virtual_helix_item = None
self.scene().removeItem(self)
# end def
def clearPreXoverItemGroups(self):
"""Summary
Returns:
TYPE: Description
"""
groups = self.groups
while groups:
k, item = groups.popitem()
item.destroyItem()
if self.active_group is not None:
self.active_group.destroyItem()
self.active_group = None
self._active_items = []
self.prexover_item_map = {}
self.neighbor_prexover_items = {}
if self.virtual_helix_item is not None:
self.virtual_helix_item.setZValue(styles.ZGRIDHELIX)
# end def
def hideGroups(self):
"""Summary
Returns:
TYPE: Description
"""
self.clearPreXoverItemGroups()
if self.active_group is not None:
self.active_group.hide()
for group in self.groups.values():
group.hide()
self.virtual_helix_item = None
# end def
def activateVirtualHelix(self, virtual_helix_item: GridVirtualHelixItemT,
idx: int,
per_neighbor_hits,
pairs):
"""Create PreXoverItemGroups for the active virtual_helix_item and its
neighbors and connect the neighboring bases
Args:
virtual_helix_item: Description
idx: the base index within the virtual helix
per_neighbor_hits: Description
pairs: Description
"""
self.clearPreXoverItemGroups()
pxis = self.prexover_item_map
neighbor_pxis_dict = self.neighbor_prexover_items # for avoiding duplicates)
self.neighbor_pairs = pairs
self.virtual_helix_item = virtual_helix_item
part_item = self.part_item
groups = self.groups
self.active_group = agroup = PreXoverItemGroup(_RADIUS, WEDGE_RECT,
virtual_helix_item, True)
id_num = virtual_helix_item.idNum()
virtual_helix_item.setZValue(styles.ZGRIDHELIX + 10)
fwd_st_type, rev_st_type = True, False # for clarity in the call to constructors
for neighbor_id, hits in per_neighbor_hits.items():
nvhi = part_item.idToVirtualHelixItem(neighbor_id)
ngroup = PreXoverItemGroup(_RADIUS, WEDGE_RECT, nvhi, False)
groups[neighbor_id] = ngroup
fwd_axis_hits, rev_axis_hits = hits
# n_step_size = nvhi.getProperty('bases_per_repeat')
for idx, fwd_idxs, rev_idxs in fwd_axis_hits:
neighbor_pxis = []
# print((id_num, fwd_st_type, idx))
pxis[(id_num, fwd_st_type, idx)] = (agroup.getItemIdx(fwd_st_type, idx),
ngroup,
neighbor_pxis
)
for j in fwd_idxs:
nkey = (neighbor_id, fwd_st_type, j)
npxi = neighbor_pxis_dict.get(nkey)
if npxi is None:
npxi = ngroup.getItemIdx(fwd_st_type, j)
neighbor_pxis_dict[nkey] = npxi
neighbor_pxis.append(npxi)
for j in rev_idxs:
nkey = (neighbor_id, rev_st_type, j)
npxi = neighbor_pxis_dict.get(nkey)
if npxi is None:
npxi = ngroup.getItemIdx(rev_st_type, j)
neighbor_pxis_dict[nkey] = npxi
neighbor_pxis.append(npxi)
for idx, fwd_idxs, rev_idxs in rev_axis_hits:
neighbor_pxis = []
# print((id_num, rev_st_type, idx))
pxis[(id_num, rev_st_type, idx)] = (agroup.getItemIdx(rev_st_type, idx),
ngroup,
neighbor_pxis
)
for j in fwd_idxs:
nkey = (neighbor_id, fwd_st_type, j)
npxi = neighbor_pxis_dict.get(nkey)
if npxi is None:
npxi = ngroup.getItemIdx(fwd_st_type, j)
neighbor_pxis_dict[nkey] = npxi
neighbor_pxis.append(npxi)
for j in rev_idxs:
nkey = (neighbor_id, rev_st_type, j)
npxi = neighbor_pxis_dict.get(nkey)
if npxi is None:
npxi = ngroup.getItemIdx(rev_st_type, j)
neighbor_pxis_dict[nkey] = npxi
neighbor_pxis.append(npxi)
# end for per_neighbor_hits
# end def
def activateNeighbors(self, id_num: int, is_fwd: bool, idx: int):
"""Summary
Args:
id_num: VirtualHelix ID number. See `NucleicAcidPart` for description and related methods.
is_fwd: ``True`` if ``fwd`` (top) strand, ``False`` if ``rev`` (bottom) strand
idx: the base index within the virtual helix
Returns:
TYPE: Description
Raises:
ValueError: Description
"""
# print("ACTIVATING neighbors", id_num, idx)
if self.active_group is None:
return
agroup = self.active_group
if id_num != agroup.id_num:
raise ValueError("not active id_num {} != {}".format(id_num,
agroup.id_num))
active_items = self._active_items
item = self.prexover_item_map.get((id_num, is_fwd, idx))
if item is None:
apxi = agroup.getItemIdx(is_fwd, idx)
apxi.setActive5p(True) if is_fwd else apxi.setActive3p(True)
agroup.active_wedge_gizmo.pointToPreXoverItem(apxi)
active_items.append(apxi)
else:
apxi, npxig, neighbor_list = item
pairs = self.neighbor_pairs[0] if is_fwd else self.neighbor_pairs[1]
check_5prime = pairs.get(idx)
is_5prime_strand = None
if check_5prime is not None:
is_5prime_strand = check_5prime[0]
else:
if is_fwd and idx == 0:
is_5prime_strand = False
elif not is_5prime_strand and self.virtual_helix_item.getProperty('length') == idx + 1:
is_5prime_strand = False
else:
is_5prime_strand = True
agroup.active_wedge_gizmo.pointToPreXoverItem(apxi)
active_items.append(apxi)
self.active_neighbor_group = npxig
# print("Should have {} neighbors".format(len(neighbor_list)))
# color = neighbor_list[0].color if neighbor_list else '#aaaaa'
# angle = 0
for npxi in neighbor_list:
npxi.setActive3p(True, apxi) if is_5prime_strand else npxi.setActive5p(True, apxi)
active_items.append(npxi)
apxi.setActive5p(True, npxi) if is_5prime_strand else apxi.setActive3p(True, npxi)
# end def
def deactivateNeighbors(self):
"""Summary
Returns:
TYPE: Description
"""
while self._active_items:
npxi = self._active_items.pop()
npxi.setActive3p(False)
npxi.setActive5p(False)
if self.active_neighbor_group is None:
return
wg = self.active_neighbor_group.active_wedge_gizmo
if wg is not None:
wg.deactivate()
self.active_neighbor_group = None
# end def
# end class
| |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Recurrent layers backed by cuDNN.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from tensorflow.python.framework import constant_op
from tensorflow.python.keras import backend as K
from tensorflow.python.keras import constraints
from tensorflow.python.keras import initializers
from tensorflow.python.keras import regularizers
from tensorflow.python.keras.engine.input_spec import InputSpec
from tensorflow.python.keras.layers import recurrent_v2
from tensorflow.python.keras.layers.recurrent import RNN
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_cudnn_rnn_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.util.tf_export import keras_export
class _CuDNNRNN(RNN):
"""Private base class for CuDNNGRU and CuDNNLSTM layers.
Arguments:
return_sequences: Boolean. Whether to return the last output
in the output sequence, or the full sequence.
return_state: Boolean. Whether to return the last state
in addition to the output.
go_backwards: Boolean (default False).
If True, process the input sequence backwards and return the
reversed sequence.
stateful: Boolean (default False). If True, the last state
for each sample at index i in a batch will be used as initial
state for the sample of index i in the following batch.
time_major: Boolean (default False). If true, the inputs and outputs will be
in shape `(timesteps, batch, ...)`, whereas in the False case, it will
be `(batch, timesteps, ...)`.
"""
def __init__(self,
return_sequences=False,
return_state=False,
go_backwards=False,
stateful=False,
time_major=False,
**kwargs):
# We invoke the base layer's initializer directly here because we do not
# want to create RNN cell instance.
super(RNN, self).__init__(**kwargs) # pylint: disable=bad-super-call
self.return_sequences = return_sequences
self.return_state = return_state
self.go_backwards = go_backwards
self.stateful = stateful
self.time_major = time_major
self.supports_masking = False
self.input_spec = [InputSpec(ndim=3)]
if hasattr(self.cell.state_size, '__len__'):
state_size = self.cell.state_size
else:
state_size = [self.cell.state_size]
self.state_spec = [InputSpec(shape=(None, dim)) for dim in state_size]
self.constants_spec = None
self._states = None
self._num_constants = 0
self._vector_shape = constant_op.constant([-1])
def call(self, inputs, mask=None, training=None, initial_state=None):
if isinstance(mask, list):
mask = mask[0]
if mask is not None:
raise ValueError('Masking is not supported for CuDNN RNNs.')
# input shape: `(samples, time (padded with zeros), input_dim)`
# note that the .build() method of subclasses MUST define
# self.input_spec and self.state_spec with complete input shapes.
if isinstance(inputs, list):
initial_state = inputs[1:]
inputs = inputs[0]
elif initial_state is not None:
pass
elif self.stateful:
initial_state = self.states
else:
initial_state = self.get_initial_state(inputs)
if len(initial_state) != len(self.states):
raise ValueError('Layer has ' + str(len(self.states)) +
' states but was passed ' + str(len(initial_state)) +
' initial states.')
if self.go_backwards:
# Reverse time axis.
inputs = K.reverse(inputs, 1)
output, states = self._process_batch(inputs, initial_state)
if self.stateful:
updates = [
state_ops.assign(self_state, state)
for self_state, state in zip(self.states, states)
]
self.add_update(updates)
if self.return_state:
return [output] + states
else:
return output
def get_config(self):
config = {
'return_sequences': self.return_sequences,
'return_state': self.return_state,
'go_backwards': self.go_backwards,
'stateful': self.stateful,
'time_major': self.time_major,
}
base_config = super( # pylint: disable=bad-super-call
RNN, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config):
return cls(**config)
@property
def trainable_weights(self):
if self.trainable and self.built:
return [self.kernel, self.recurrent_kernel, self.bias]
return []
@property
def non_trainable_weights(self):
if not self.trainable and self.built:
return [self.kernel, self.recurrent_kernel, self.bias]
return []
@property
def losses(self):
return super(RNN, self).losses
def get_losses_for(self, inputs=None):
return super( # pylint: disable=bad-super-call
RNN, self).get_losses_for(inputs=inputs)
@keras_export(v1=['keras.layers.CuDNNGRU'])
class CuDNNGRU(_CuDNNRNN):
"""Fast GRU implementation backed by cuDNN.
More information about cuDNN can be found on the [NVIDIA
developer website](https://developer.nvidia.com/cudnn).
Can only be run on GPU.
Arguments:
units: Positive integer, dimensionality of the output space.
kernel_initializer: Initializer for the `kernel` weights matrix, used for
the linear transformation of the inputs.
recurrent_initializer: Initializer for the `recurrent_kernel` weights
matrix, used for the linear transformation of the recurrent state.
bias_initializer: Initializer for the bias vector.
kernel_regularizer: Regularizer function applied to the `kernel` weights
matrix.
recurrent_regularizer: Regularizer function applied to the
`recurrent_kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to the output of the
layer (its "activation").
kernel_constraint: Constraint function applied to the `kernel` weights
matrix.
recurrent_constraint: Constraint function applied to the
`recurrent_kernel` weights matrix.
bias_constraint: Constraint function applied to the bias vector.
return_sequences: Boolean. Whether to return the last output in the output
sequence, or the full sequence.
return_state: Boolean. Whether to return the last state in addition to the
output.
go_backwards: Boolean (default False). If True, process the input sequence
backwards and return the reversed sequence.
stateful: Boolean (default False). If True, the last state for each sample
at index i in a batch will be used as initial state for the sample of
index i in the following batch.
"""
def __init__(self,
units,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
return_sequences=False,
return_state=False,
go_backwards=False,
stateful=False,
**kwargs):
self.units = units
cell_spec = collections.namedtuple('cell', 'state_size')
self._cell = cell_spec(state_size=self.units)
super(CuDNNGRU, self).__init__(
return_sequences=return_sequences,
return_state=return_state,
go_backwards=go_backwards,
stateful=stateful,
**kwargs)
self.kernel_initializer = initializers.get(kernel_initializer)
self.recurrent_initializer = initializers.get(recurrent_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.recurrent_regularizer = regularizers.get(recurrent_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.recurrent_constraint = constraints.get(recurrent_constraint)
self.bias_constraint = constraints.get(bias_constraint)
@property
def cell(self):
return self._cell
def build(self, input_shape):
super(CuDNNGRU, self).build(input_shape)
if isinstance(input_shape, list):
input_shape = input_shape[0]
input_dim = int(input_shape[-1])
self.kernel = self.add_weight(
shape=(input_dim, self.units * 3),
name='kernel',
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units * 3),
name='recurrent_kernel',
initializer=self.recurrent_initializer,
regularizer=self.recurrent_regularizer,
constraint=self.recurrent_constraint)
self.bias = self.add_weight(
shape=(self.units * 6,),
name='bias',
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
self.built = True
def _process_batch(self, inputs, initial_state):
if not self.time_major:
inputs = array_ops.transpose(inputs, perm=(1, 0, 2))
input_h = initial_state[0]
input_h = array_ops.expand_dims(input_h, axis=0)
params = recurrent_v2._canonical_to_params( # pylint: disable=protected-access
weights=[
self.kernel[:, self.units:self.units * 2],
self.kernel[:, :self.units],
self.kernel[:, self.units * 2:],
self.recurrent_kernel[:, self.units:self.units * 2],
self.recurrent_kernel[:, :self.units],
self.recurrent_kernel[:, self.units * 2:],
],
biases=[
self.bias[self.units:self.units * 2],
self.bias[:self.units],
self.bias[self.units * 2:self.units * 3],
self.bias[self.units * 4:self.units * 5],
self.bias[self.units * 3:self.units * 4],
self.bias[self.units * 5:],
],
shape=self._vector_shape)
args = {
'input': inputs,
'input_h': input_h,
'input_c': 0,
'params': params,
'is_training': True,
'rnn_mode': 'gru',
}
outputs, h, _, _, _ = gen_cudnn_rnn_ops.cudnn_rnnv2(**args)
if self.stateful or self.return_state:
h = h[0]
if self.return_sequences:
if self.time_major:
output = outputs
else:
output = array_ops.transpose(outputs, perm=(1, 0, 2))
else:
output = outputs[-1]
return output, [h]
def get_config(self):
config = {
'units': self.units,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'recurrent_initializer':
initializers.serialize(self.recurrent_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'recurrent_regularizer':
regularizers.serialize(self.recurrent_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'activity_regularizer':
regularizers.serialize(self.activity_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'recurrent_constraint':
constraints.serialize(self.recurrent_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint)
}
base_config = super(CuDNNGRU, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export(v1=['keras.layers.CuDNNLSTM'])
class CuDNNLSTM(_CuDNNRNN):
"""Fast LSTM implementation backed by cuDNN.
More information about cuDNN can be found on the [NVIDIA
developer website](https://developer.nvidia.com/cudnn).
Can only be run on GPU.
Arguments:
units: Positive integer, dimensionality of the output space.
kernel_initializer: Initializer for the `kernel` weights matrix, used for
the linear transformation of the inputs.
unit_forget_bias: Boolean. If True, add 1 to the bias of the forget gate
at initialization. Setting it to true will also force
`bias_initializer="zeros"`. This is recommended in [Jozefowicz et
al.](http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf)
recurrent_initializer: Initializer for the `recurrent_kernel` weights
matrix, used for the linear transformation of the recurrent state.
bias_initializer: Initializer for the bias vector.
kernel_regularizer: Regularizer function applied to the `kernel` weights
matrix.
recurrent_regularizer: Regularizer function applied to the
`recurrent_kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to the output of the
layer (its "activation").
kernel_constraint: Constraint function applied to the `kernel` weights
matrix.
recurrent_constraint: Constraint function applied to the
`recurrent_kernel` weights matrix.
bias_constraint: Constraint function applied to the bias vector.
return_sequences: Boolean. Whether to return the last output. in the
output sequence, or the full sequence.
return_state: Boolean. Whether to return the last state in addition to the
output.
go_backwards: Boolean (default False). If True, process the input sequence
backwards and return the reversed sequence.
stateful: Boolean (default False). If True, the last state for each sample
at index i in a batch will be used as initial state for the sample of
index i in the following batch.
"""
def __init__(self,
units,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
unit_forget_bias=True,
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
return_sequences=False,
return_state=False,
go_backwards=False,
stateful=False,
**kwargs):
self.units = units
cell_spec = collections.namedtuple('cell', 'state_size')
self._cell = cell_spec(state_size=(self.units, self.units))
super(CuDNNLSTM, self).__init__(
return_sequences=return_sequences,
return_state=return_state,
go_backwards=go_backwards,
stateful=stateful,
**kwargs)
self.kernel_initializer = initializers.get(kernel_initializer)
self.recurrent_initializer = initializers.get(recurrent_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.unit_forget_bias = unit_forget_bias
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.recurrent_regularizer = regularizers.get(recurrent_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.recurrent_constraint = constraints.get(recurrent_constraint)
self.bias_constraint = constraints.get(bias_constraint)
@property
def cell(self):
return self._cell
def build(self, input_shape):
super(CuDNNLSTM, self).build(input_shape)
if isinstance(input_shape, list):
input_shape = input_shape[0]
input_dim = int(input_shape[-1])
self.kernel = self.add_weight(
shape=(input_dim, self.units * 4),
name='kernel',
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units * 4),
name='recurrent_kernel',
initializer=self.recurrent_initializer,
regularizer=self.recurrent_regularizer,
constraint=self.recurrent_constraint)
if self.unit_forget_bias:
def bias_initializer(_, *args, **kwargs):
return array_ops.concat([
self.bias_initializer((self.units * 5,), *args, **kwargs),
initializers.Ones()((self.units,), *args, **kwargs),
self.bias_initializer((self.units * 2,), *args, **kwargs),
], axis=0)
else:
bias_initializer = self.bias_initializer
self.bias = self.add_weight(
shape=(self.units * 8,),
name='bias',
initializer=bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
self.built = True
def _process_batch(self, inputs, initial_state):
if not self.time_major:
inputs = array_ops.transpose(inputs, perm=(1, 0, 2))
input_h = initial_state[0]
input_c = initial_state[1]
input_h = array_ops.expand_dims(input_h, axis=0)
input_c = array_ops.expand_dims(input_c, axis=0)
params = recurrent_v2._canonical_to_params( # pylint: disable=protected-access
weights=[
self.kernel[:, :self.units],
self.kernel[:, self.units:self.units * 2],
self.kernel[:, self.units * 2:self.units * 3],
self.kernel[:, self.units * 3:],
self.recurrent_kernel[:, :self.units],
self.recurrent_kernel[:, self.units:self.units * 2],
self.recurrent_kernel[:, self.units * 2:self.units * 3],
self.recurrent_kernel[:, self.units * 3:],
],
biases=[
self.bias[:self.units],
self.bias[self.units:self.units * 2],
self.bias[self.units * 2:self.units * 3],
self.bias[self.units * 3:self.units * 4],
self.bias[self.units * 4:self.units * 5],
self.bias[self.units * 5:self.units * 6],
self.bias[self.units * 6:self.units * 7],
self.bias[self.units * 7:],
],
shape=self._vector_shape)
args = {
'input': inputs,
'input_h': input_h,
'input_c': input_c,
'params': params,
'is_training': True,
}
outputs, h, c, _, _ = gen_cudnn_rnn_ops.cudnn_rnnv2(**args)
if self.stateful or self.return_state:
h = h[0]
c = c[0]
if self.return_sequences:
if self.time_major:
output = outputs
else:
output = array_ops.transpose(outputs, perm=(1, 0, 2))
else:
output = outputs[-1]
return output, [h, c]
def get_config(self):
config = {
'units': self.units,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'recurrent_initializer':
initializers.serialize(self.recurrent_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'unit_forget_bias': self.unit_forget_bias,
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'recurrent_regularizer':
regularizers.serialize(self.recurrent_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'activity_regularizer':
regularizers.serialize(self.activity_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'recurrent_constraint':
constraints.serialize(self.recurrent_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint)
}
base_config = super(CuDNNLSTM, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| |
#!/usr/bin/env python
#
# Copyright 2015 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""HTTP wrapper for apitools.
This library wraps the underlying http library we use, which is
currently httplib2.
"""
import collections
import contextlib
import logging
import socket
import time
import httplib2
import oauth2client
import six
from six.moves import http_client
from six.moves.urllib import parse
from apitools.base.py import exceptions
from apitools.base.py import util
__all__ = [
'CheckResponse',
'GetHttp',
'HandleExceptionsAndRebuildHttpConnections',
'MakeRequest',
'RebuildHttpConnections',
'Request',
'Response',
'RethrowExceptionHandler',
]
# 308 and 429 don't have names in httplib.
RESUME_INCOMPLETE = 308
TOO_MANY_REQUESTS = 429
_REDIRECT_STATUS_CODES = (
http_client.MOVED_PERMANENTLY,
http_client.FOUND,
http_client.SEE_OTHER,
http_client.TEMPORARY_REDIRECT,
RESUME_INCOMPLETE,
)
# http: An httplib2.Http instance.
# http_request: A http_wrapper.Request.
# exc: Exception being raised.
# num_retries: Number of retries consumed; used for exponential backoff.
ExceptionRetryArgs = collections.namedtuple(
'ExceptionRetryArgs', ['http', 'http_request', 'exc', 'num_retries',
'max_retry_wait', 'total_wait_sec'])
@contextlib.contextmanager
def _Httplib2Debuglevel(http_request, level, http=None):
"""Temporarily change the value of httplib2.debuglevel, if necessary.
If http_request has a `loggable_body` distinct from `body`, then we
need to prevent httplib2 from logging the full body. This sets
httplib2.debuglevel for the duration of the `with` block; however,
that alone won't change the value of existing HTTP connections. If
an httplib2.Http object is provided, we'll also change the level on
any cached connections attached to it.
Args:
http_request: a Request we're logging.
level: (int) the debuglevel for logging.
http: (optional) an httplib2.Http whose connections we should
set the debuglevel on.
Yields:
None.
"""
if http_request.loggable_body is None:
yield
return
old_level = httplib2.debuglevel
http_levels = {}
httplib2.debuglevel = level
if http is not None:
for connection_key, connection in http.connections.items():
# httplib2 stores two kinds of values in this dict, connection
# classes and instances. Since the connection types are all
# old-style classes, we can't easily distinguish by connection
# type -- so instead we use the key pattern.
if ':' not in connection_key:
continue
http_levels[connection_key] = connection.debuglevel
connection.set_debuglevel(level)
yield
httplib2.debuglevel = old_level
if http is not None:
for connection_key, old_level in http_levels.items():
if connection_key in http.connections:
http.connections[connection_key].set_debuglevel(old_level)
class Request(object):
"""Class encapsulating the data for an HTTP request."""
def __init__(self, url='', http_method='GET', headers=None, body=''):
self.url = url
self.http_method = http_method
self.headers = headers or {}
self.__body = None
self.__loggable_body = None
self.body = body
@property
def loggable_body(self):
return self.__loggable_body
@loggable_body.setter
def loggable_body(self, value):
if self.body is None:
raise exceptions.RequestError(
'Cannot set loggable body on request with no body')
self.__loggable_body = value
@property
def body(self):
return self.__body
@body.setter
def body(self, value):
"""Sets the request body; handles logging and length measurement."""
self.__body = value
if value is not None:
# Avoid calling len() which cannot exceed 4GiB in 32-bit python.
body_length = getattr(
self.__body, 'length', None) or len(self.__body)
self.headers['content-length'] = str(body_length)
else:
self.headers.pop('content-length', None)
# This line ensures we don't try to print large requests.
if not isinstance(value, (type(None), six.string_types)):
self.loggable_body = '<media body>'
# Note: currently the order of fields here is important, since we want
# to be able to pass in the result from httplib2.request.
class Response(collections.namedtuple(
'HttpResponse', ['info', 'content', 'request_url'])):
"""Class encapsulating data for an HTTP response."""
__slots__ = ()
def __len__(self):
return self.length
@property
def length(self):
"""Return the length of this response.
We expose this as an attribute since using len() directly can fail
for responses larger than sys.maxint.
Returns:
Response length (as int or long)
"""
def ProcessContentRange(content_range):
_, _, range_spec = content_range.partition(' ')
byte_range, _, _ = range_spec.partition('/')
start, _, end = byte_range.partition('-')
return int(end) - int(start) + 1
if '-content-encoding' in self.info and 'content-range' in self.info:
# httplib2 rewrites content-length in the case of a compressed
# transfer; we can't trust the content-length header in that
# case, but we *can* trust content-range, if it's present.
return ProcessContentRange(self.info['content-range'])
elif 'content-length' in self.info:
return int(self.info.get('content-length'))
elif 'content-range' in self.info:
return ProcessContentRange(self.info['content-range'])
return len(self.content)
@property
def status_code(self):
return int(self.info['status'])
@property
def retry_after(self):
if 'retry-after' in self.info:
return int(self.info['retry-after'])
@property
def is_redirect(self):
return (self.status_code in _REDIRECT_STATUS_CODES and
'location' in self.info)
def CheckResponse(response):
if response is None:
# Caller shouldn't call us if the response is None, but handle anyway.
raise exceptions.RequestError(
'Request to url %s did not return a response.' %
response.request_url)
elif (response.status_code >= 500 or
response.status_code == TOO_MANY_REQUESTS):
raise exceptions.BadStatusCodeError.FromResponse(response)
elif response.retry_after:
raise exceptions.RetryAfterError.FromResponse(response)
def RebuildHttpConnections(http):
"""Rebuilds all http connections in the httplib2.Http instance.
httplib2 overloads the map in http.connections to contain two different
types of values:
{ scheme string: connection class } and
{ scheme + authority string : actual http connection }
Here we remove all of the entries for actual connections so that on the
next request httplib2 will rebuild them from the connection types.
Args:
http: An httplib2.Http instance.
"""
if getattr(http, 'connections', None):
for conn_key in list(http.connections.keys()):
if ':' in conn_key:
del http.connections[conn_key]
def RethrowExceptionHandler(*unused_args):
# pylint: disable=misplaced-bare-raise
raise
def HandleExceptionsAndRebuildHttpConnections(retry_args):
"""Exception handler for http failures.
This catches known failures and rebuilds the underlying HTTP connections.
Args:
retry_args: An ExceptionRetryArgs tuple.
"""
# If the server indicates how long to wait, use that value. Otherwise,
# calculate the wait time on our own.
retry_after = None
# Transport failures
if isinstance(retry_args.exc, (http_client.BadStatusLine,
http_client.IncompleteRead,
http_client.ResponseNotReady)):
logging.debug('Caught HTTP error %s, retrying: %s',
type(retry_args.exc).__name__, retry_args.exc)
elif isinstance(retry_args.exc, socket.error):
logging.debug('Caught socket error, retrying: %s', retry_args.exc)
elif isinstance(retry_args.exc, socket.gaierror):
logging.debug(
'Caught socket address error, retrying: %s', retry_args.exc)
elif isinstance(retry_args.exc, socket.timeout):
logging.debug(
'Caught socket timeout error, retrying: %s', retry_args.exc)
elif isinstance(retry_args.exc, httplib2.ServerNotFoundError):
logging.debug(
'Caught server not found error, retrying: %s', retry_args.exc)
elif isinstance(retry_args.exc, ValueError):
# oauth2client tries to JSON-decode the response, which can result
# in a ValueError if the response was invalid. Until that is fixed in
# oauth2client, need to handle it here.
logging.debug('Response content was invalid (%s), retrying',
retry_args.exc)
elif (isinstance(retry_args.exc,
oauth2client.client.HttpAccessTokenRefreshError) and
(retry_args.exc.status == TOO_MANY_REQUESTS or
retry_args.exc.status >= 500)):
logging.debug(
'Caught transient credential refresh error (%s), retrying',
retry_args.exc)
elif isinstance(retry_args.exc, exceptions.RequestError):
logging.debug('Request returned no response, retrying')
# API-level failures
elif isinstance(retry_args.exc, exceptions.BadStatusCodeError):
logging.debug('Response returned status %s, retrying',
retry_args.exc.status_code)
elif isinstance(retry_args.exc, exceptions.RetryAfterError):
logging.debug('Response returned a retry-after header, retrying')
retry_after = retry_args.exc.retry_after
else:
raise # pylint: disable=misplaced-bare-raise
RebuildHttpConnections(retry_args.http)
logging.debug('Retrying request to url %s after exception %s',
retry_args.http_request.url, retry_args.exc)
time.sleep(
retry_after or util.CalculateWaitForRetry(
retry_args.num_retries, max_wait=retry_args.max_retry_wait))
def MakeRequest(http, http_request, retries=7, max_retry_wait=60,
redirections=5,
retry_func=HandleExceptionsAndRebuildHttpConnections,
check_response_func=CheckResponse):
"""Send http_request via the given http, performing error/retry handling.
Args:
http: An httplib2.Http instance, or a http multiplexer that delegates to
an underlying http, for example, HTTPMultiplexer.
http_request: A Request to send.
retries: (int, default 7) Number of retries to attempt on retryable
replies (such as 429 or 5XX).
max_retry_wait: (int, default 60) Maximum number of seconds to wait
when retrying.
redirections: (int, default 5) Number of redirects to follow.
retry_func: Function to handle retries on exceptions. Argument is an
ExceptionRetryArgs tuple.
check_response_func: Function to validate the HTTP response.
Arguments are (Response, response content, url).
Raises:
InvalidDataFromServerError: if there is no response after retries.
Returns:
A Response object.
"""
retry = 0
first_req_time = time.time()
while True:
try:
return _MakeRequestNoRetry(
http, http_request, redirections=redirections,
check_response_func=check_response_func)
# retry_func will consume the exception types it handles and raise.
# pylint: disable=broad-except
except Exception as e:
retry += 1
if retry >= retries:
raise
else:
total_wait_sec = time.time() - first_req_time
retry_func(ExceptionRetryArgs(http, http_request, e, retry,
max_retry_wait, total_wait_sec))
def _MakeRequestNoRetry(http, http_request, redirections=5,
check_response_func=CheckResponse):
"""Send http_request via the given http.
This wrapper exists to handle translation between the plain httplib2
request/response types and the Request and Response types above.
Args:
http: An httplib2.Http instance, or a http multiplexer that delegates to
an underlying http, for example, HTTPMultiplexer.
http_request: A Request to send.
redirections: (int, default 5) Number of redirects to follow.
check_response_func: Function to validate the HTTP response.
Arguments are (Response, response content, url).
Returns:
A Response object.
Raises:
RequestError if no response could be parsed.
"""
connection_type = None
# Handle overrides for connection types. This is used if the caller
# wants control over the underlying connection for managing callbacks
# or hash digestion.
if getattr(http, 'connections', None):
url_scheme = parse.urlsplit(http_request.url).scheme
if url_scheme and url_scheme in http.connections:
connection_type = http.connections[url_scheme]
# Custom printing only at debuglevel 4
new_debuglevel = 4 if httplib2.debuglevel == 4 else 0
with _Httplib2Debuglevel(http_request, new_debuglevel, http=http):
info, content = http.request(
str(http_request.url), method=str(http_request.http_method),
body=http_request.body, headers=http_request.headers,
redirections=redirections, connection_type=connection_type)
if info is None:
raise exceptions.RequestError()
response = Response(info, content, http_request.url)
check_response_func(response)
return response
_HTTP_FACTORIES = []
def _RegisterHttpFactory(factory):
_HTTP_FACTORIES.append(factory)
def GetHttp(**kwds):
for factory in _HTTP_FACTORIES:
http = factory(**kwds)
if http is not None:
return http
return httplib2.Http(**kwds)
| |
#!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_firewall_profile_group
short_description: Configure profile groups in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify firewall feature and profile_group category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.5
version_added: "2.8"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate IP address.
type: str
required: false
username:
description:
- FortiOS or FortiGate username.
type: str
required: false
password:
description:
- FortiOS or FortiGate password.
type: str
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS protocol.
type: bool
default: true
ssl_verify:
description:
- Ensures FortiGate certificate must be verified by a proper CA.
type: bool
default: true
version_added: 2.9
state:
description:
- Indicates whether to create or remove the object.
This attribute was present already in previous version in a deeper level.
It has been moved out to this outer level.
type: str
required: false
choices:
- present
- absent
version_added: 2.9
firewall_profile_group:
description:
- Configure profile groups.
default: null
type: dict
suboptions:
state:
description:
- B(Deprecated)
- Starting with Ansible 2.9 we recommend using the top-level 'state' parameter.
- HORIZONTALLINE
- Indicates whether to create or remove the object.
type: str
required: false
choices:
- present
- absent
application_list:
description:
- Name of an existing Application list. Source application.list.name.
type: str
av_profile:
description:
- Name of an existing Antivirus profile. Source antivirus.profile.name.
type: str
dlp_sensor:
description:
- Name of an existing DLP sensor. Source dlp.sensor.name.
type: str
dnsfilter_profile:
description:
- Name of an existing DNS filter profile. Source dnsfilter.profile.name.
type: str
icap_profile:
description:
- Name of an existing ICAP profile. Source icap.profile.name.
type: str
ips_sensor:
description:
- Name of an existing IPS sensor. Source ips.sensor.name.
type: str
name:
description:
- Profile group name.
required: true
type: str
profile_protocol_options:
description:
- Name of an existing Protocol options profile. Source firewall.profile-protocol-options.name.
type: str
spamfilter_profile:
description:
- Name of an existing Spam filter profile. Source spamfilter.profile.name.
type: str
ssh_filter_profile:
description:
- Name of an existing SSH filter profile. Source ssh-filter.profile.name.
type: str
ssl_ssh_profile:
description:
- Name of an existing SSL SSH profile. Source firewall.ssl-ssh-profile.name.
type: str
voip_profile:
description:
- Name of an existing VoIP profile. Source voip.profile.name.
type: str
waf_profile:
description:
- Name of an existing Web application firewall profile. Source waf.profile.name.
type: str
webfilter_profile:
description:
- Name of an existing Web filter profile. Source webfilter.profile.name.
type: str
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
ssl_verify: "False"
tasks:
- name: Configure profile groups.
fortios_firewall_profile_group:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
state: "present"
firewall_profile_group:
application_list: "<your_own_value> (source application.list.name)"
av_profile: "<your_own_value> (source antivirus.profile.name)"
dlp_sensor: "<your_own_value> (source dlp.sensor.name)"
dnsfilter_profile: "<your_own_value> (source dnsfilter.profile.name)"
icap_profile: "<your_own_value> (source icap.profile.name)"
ips_sensor: "<your_own_value> (source ips.sensor.name)"
name: "default_name_9"
profile_protocol_options: "<your_own_value> (source firewall.profile-protocol-options.name)"
spamfilter_profile: "<your_own_value> (source spamfilter.profile.name)"
ssh_filter_profile: "<your_own_value> (source ssh-filter.profile.name)"
ssl_ssh_profile: "<your_own_value> (source firewall.ssl-ssh-profile.name)"
voip_profile: "<your_own_value> (source voip.profile.name)"
waf_profile: "<your_own_value> (source waf.profile.name)"
webfilter_profile: "<your_own_value> (source webfilter.profile.name)"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG
def login(data, fos):
host = data['host']
username = data['username']
password = data['password']
ssl_verify = data['ssl_verify']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password, verify=ssl_verify)
def filter_firewall_profile_group_data(json):
option_list = ['application_list', 'av_profile', 'dlp_sensor',
'dnsfilter_profile', 'icap_profile', 'ips_sensor',
'name', 'profile_protocol_options', 'spamfilter_profile',
'ssh_filter_profile', 'ssl_ssh_profile', 'voip_profile',
'waf_profile', 'webfilter_profile']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for elem in data:
elem = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def firewall_profile_group(data, fos):
vdom = data['vdom']
if 'state' in data and data['state']:
state = data['state']
elif 'state' in data['firewall_profile_group'] and data['firewall_profile_group']:
state = data['firewall_profile_group']['state']
else:
state = True
firewall_profile_group_data = data['firewall_profile_group']
filtered_data = underscore_to_hyphen(filter_firewall_profile_group_data(firewall_profile_group_data))
if state == "present":
return fos.set('firewall',
'profile-group',
data=filtered_data,
vdom=vdom)
elif state == "absent":
return fos.delete('firewall',
'profile-group',
mkey=filtered_data['name'],
vdom=vdom)
def is_successful_status(status):
return status['status'] == "success" or \
status['http_method'] == "DELETE" and status['http_status'] == 404
def fortios_firewall(data, fos):
if data['firewall_profile_group']:
resp = firewall_profile_group(data, fos)
return not is_successful_status(resp), \
resp['status'] == "success", \
resp
def main():
fields = {
"host": {"required": False, "type": "str"},
"username": {"required": False, "type": "str"},
"password": {"required": False, "type": "str", "default": "", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"ssl_verify": {"required": False, "type": "bool", "default": True},
"state": {"required": False, "type": "str",
"choices": ["present", "absent"]},
"firewall_profile_group": {
"required": False, "type": "dict", "default": None,
"options": {
"state": {"required": False, "type": "str",
"choices": ["present", "absent"]},
"application_list": {"required": False, "type": "str"},
"av_profile": {"required": False, "type": "str"},
"dlp_sensor": {"required": False, "type": "str"},
"dnsfilter_profile": {"required": False, "type": "str"},
"icap_profile": {"required": False, "type": "str"},
"ips_sensor": {"required": False, "type": "str"},
"name": {"required": True, "type": "str"},
"profile_protocol_options": {"required": False, "type": "str"},
"spamfilter_profile": {"required": False, "type": "str"},
"ssh_filter_profile": {"required": False, "type": "str"},
"ssl_ssh_profile": {"required": False, "type": "str"},
"voip_profile": {"required": False, "type": "str"},
"waf_profile": {"required": False, "type": "str"},
"webfilter_profile": {"required": False, "type": "str"}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
# legacy_mode refers to using fortiosapi instead of HTTPAPI
legacy_mode = 'host' in module.params and module.params['host'] is not None and \
'username' in module.params and module.params['username'] is not None and \
'password' in module.params and module.params['password'] is not None
if not legacy_mode:
if module._socket_path:
connection = Connection(module._socket_path)
fos = FortiOSHandler(connection)
is_error, has_changed, result = fortios_firewall(module.params, fos)
else:
module.fail_json(**FAIL_SOCKET_MSG)
else:
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
fos = FortiOSAPI()
login(module.params, fos)
is_error, has_changed, result = fortios_firewall(module.params, fos)
fos.logout()
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
| |
import errno
from contextlib import contextmanager
from plumbum.path.base import Path, FSUser
from plumbum.lib import _setdoc, six
from plumbum.commands import shquote, ProcessExecutionError
import sys
try: # Py3
import urllib.request as urllib
except ImportError:
import urllib # type: ignore
class StatRes(object):
"""POSIX-like stat result"""
def __init__(self, tup):
self._tup = tuple(tup)
def __getitem__(self, index):
return self._tup[index]
st_mode = mode = property(lambda self: self[0])
st_ino = ino = property(lambda self: self[1])
st_dev = dev = property(lambda self: self[2])
st_nlink = nlink = property(lambda self: self[3])
st_uid = uid = property(lambda self: self[4])
st_gid = gid = property(lambda self: self[5])
st_size = size = property(lambda self: self[6])
st_atime = atime = property(lambda self: self[7])
st_mtime = mtime = property(lambda self: self[8])
st_ctime = ctime = property(lambda self: self[9])
class RemotePath(Path):
"""The class implementing remote-machine paths"""
def __new__(cls, remote, *parts):
if not parts:
raise TypeError("At least one path part is required (none given)")
windows = (remote.uname.lower() == "windows")
normed = []
parts = tuple(map(str, parts)) # force the paths into string, so subscription works properly
# Simple skip if path is absolute
if parts[0] and parts[0][0] not in ("/", "\\"):
cwd = (remote._cwd if hasattr(remote, '_cwd') else
remote._session.run("pwd")[1].strip())
parts = (cwd, ) + parts
for p in parts:
if windows:
plist = str(p).replace("\\", "/").split("/")
else:
plist = str(p).split("/")
if not plist[0]:
plist.pop(0)
del normed[:]
for item in plist:
if item == "" or item == ".":
continue
if item == "..":
if normed:
normed.pop(-1)
else:
normed.append(item)
if windows:
self = super(RemotePath, cls).__new__(cls, "\\".join(normed))
self.CASE_SENSITIVE = False # On this object only
else:
self = super(RemotePath, cls).__new__(cls, "/" + "/".join(normed))
self.CASE_SENSITIVE = True
self.remote = remote
return self
def _form(self, *parts):
return RemotePath(self.remote, *parts)
@property
def _path(self):
return str(self)
@property # type: ignore
@_setdoc(Path)
def name(self):
if not "/" in str(self):
return str(self)
return str(self).rsplit("/", 1)[1]
@property # type: ignore
@_setdoc(Path)
def dirname(self):
if not "/" in str(self):
return str(self)
return self.__class__(self.remote, str(self).rsplit("/", 1)[0])
@property # type: ignore
@_setdoc(Path)
def suffix(self):
return '.' + self.name.rsplit('.', 1)[1]
@property # type: ignore
@_setdoc(Path)
def suffixes(self):
name = self.name
exts = []
while '.' in name:
name, ext = name.rsplit('.', 1)
exts.append('.' + ext)
return list(reversed(exts))
@property # type: ignore
@_setdoc(Path)
def uid(self):
uid, name = self.remote._path_getuid(self)
return FSUser(int(uid), name)
@property # type: ignore
@_setdoc(Path)
def gid(self):
gid, name = self.remote._path_getgid(self)
return FSUser(int(gid), name)
def _get_info(self):
return (self.remote, self._path)
@_setdoc(Path)
def join(self, *parts):
return RemotePath(self.remote, self, *parts)
@_setdoc(Path)
def list(self):
if not self.is_dir():
return []
return [self.join(fn) for fn in self.remote._path_listdir(self)]
@_setdoc(Path)
def iterdir(self):
if not self.is_dir():
return ()
return (self.join(fn) for fn in self.remote._path_listdir(self))
@_setdoc(Path)
def is_dir(self):
res = self.remote._path_stat(self)
if not res:
return False
return res.text_mode == "directory"
@_setdoc(Path)
def is_file(self):
res = self.remote._path_stat(self)
if not res:
return False
return res.text_mode in ("regular file", "regular empty file")
@_setdoc(Path)
def is_symlink(self):
res = self.remote._path_stat(self)
if not res:
return False
return res.text_mode == "symbolic link"
@_setdoc(Path)
def exists(self):
return self.remote._path_stat(self) is not None
@_setdoc(Path)
def stat(self):
res = self.remote._path_stat(self)
if res is None:
raise OSError(errno.ENOENT)
return res
@_setdoc(Path)
def with_name(self, name):
return self.__class__(self.remote, self.dirname) / name
@_setdoc(Path)
def with_suffix(self, suffix, depth=1):
if (suffix and not suffix.startswith('.') or suffix == '.'):
raise ValueError("Invalid suffix %r" % (suffix))
name = self.name
depth = len(self.suffixes) if depth is None else min(
depth, len(self.suffixes))
for i in range(depth):
name, ext = name.rsplit('.', 1)
return self.__class__(self.remote, self.dirname) / (name + suffix)
@_setdoc(Path)
def glob(self, pattern):
fn = lambda pat: [RemotePath(self.remote, m) for m in self.remote._path_glob(self, pat)]
return self._glob(pattern, fn)
@_setdoc(Path)
def delete(self):
if not self.exists():
return
self.remote._path_delete(self)
unlink = delete
@_setdoc(Path)
def move(self, dst):
if isinstance(dst, RemotePath):
if dst.remote is not self.remote:
raise TypeError("dst points to a different remote machine")
elif not isinstance(dst, six.string_types):
raise TypeError(
"dst must be a string or a RemotePath (to the same remote machine), "
"got %r" % (dst, ))
self.remote._path_move(self, dst)
@_setdoc(Path)
def copy(self, dst, override=False):
if isinstance(dst, RemotePath):
if dst.remote is not self.remote:
raise TypeError("dst points to a different remote machine")
elif not isinstance(dst, six.string_types):
raise TypeError(
"dst must be a string or a RemotePath (to the same remote machine), "
"got %r" % (dst, ))
if override:
if isinstance(dst, six.string_types):
dst = RemotePath(self.remote, dst)
dst.remove()
else:
if isinstance(dst, six.string_types):
dst = RemotePath(self.remote, dst)
if dst.exists():
raise TypeError("Override not specified and dst exists")
self.remote._path_copy(self, dst)
@_setdoc(Path)
def mkdir(self, mode=None, parents=True, exist_ok=True):
if parents and exist_ok:
self.remote._path_mkdir(self, mode=mode, minus_p=True)
else:
if parents and len(self.parts) > 1:
self.remote._path_mkdir(self.parent, mode=mode, minus_p=True)
try:
self.remote._path_mkdir(self, mode=mode, minus_p=False)
except ProcessExecutionError:
_, ex, _ = sys.exc_info()
if "File exists" in ex.stderr:
if not exist_ok:
raise OSError(
errno.EEXIST, "File exists (on remote end)",
str(self))
else:
raise
@_setdoc(Path)
def read(self, encoding=None):
data = self.remote._path_read(self)
if encoding:
data = data.decode(encoding)
return data
@_setdoc(Path)
def write(self, data, encoding=None):
if encoding:
data = data.encode(encoding)
self.remote._path_write(self, data)
@_setdoc(Path)
def touch(self):
self.remote._path_touch(str(self))
@_setdoc(Path)
def chown(self, owner=None, group=None, recursive=None):
self.remote._path_chown(
self, owner, group,
self.is_dir() if recursive is None else recursive)
@_setdoc(Path)
def chmod(self, mode):
self.remote._path_chmod(mode, self)
@_setdoc(Path)
def access(self, mode=0):
mode = self._access_mode_to_flags(mode)
res = self.remote._path_stat(self)
if res is None:
return False
mask = res.st_mode & 0x1ff
return ((mask >> 6) & mode) or ((mask >> 3) & mode)
@_setdoc(Path)
def link(self, dst):
if isinstance(dst, RemotePath):
if dst.remote is not self.remote:
raise TypeError("dst points to a different remote machine")
elif not isinstance(dst, six.string_types):
raise TypeError(
"dst must be a string or a RemotePath (to the same remote machine), "
"got %r" % (dst, ))
self.remote._path_link(self, dst, False)
@_setdoc(Path)
def symlink(self, dst):
if isinstance(dst, RemotePath):
if dst.remote is not self.remote:
raise TypeError("dst points to a different remote machine")
elif not isinstance(dst, six.string_types):
raise TypeError(
"dst must be a string or a RemotePath (to the same remote machine), "
"got %r" % (dst, ))
self.remote._path_link(self, dst, True)
def open(self, mode="r", bufsize=-1):
"""
Opens this path as a file.
Only works for ParamikoMachine-associated paths for now.
"""
if hasattr(self.remote, "sftp") and hasattr(self.remote.sftp, "open"):
return self.remote.sftp.open(self, mode, bufsize)
else:
raise NotImplementedError(
"RemotePath.open only works for ParamikoMachine-associated "
"paths for now")
@_setdoc(Path)
def as_uri(self, scheme='ssh'):
return '{0}://{1}{2}'.format(scheme, self.remote._fqhost,
urllib.pathname2url(str(self)))
@property # type: ignore
@_setdoc(Path)
def stem(self):
return self.name.rsplit('.')[0]
@property # type: ignore
@_setdoc(Path)
def root(self):
return '/'
@property # type: ignore
@_setdoc(Path)
def drive(self):
return ''
class RemoteWorkdir(RemotePath):
"""Remote working directory manipulator"""
def __new__(cls, remote):
self = super(RemoteWorkdir, cls).__new__(
cls, remote,
remote._session.run("pwd")[1].strip())
return self
def __hash__(self):
raise TypeError("unhashable type")
def chdir(self, newdir):
"""Changes the current working directory to the given one"""
self.remote._session.run("cd %s" % (shquote(newdir), ))
if hasattr(self.remote, '_cwd'):
del self.remote._cwd
return self.__class__(self.remote)
def getpath(self):
"""Returns the current working directory as a
`remote path <plumbum.path.remote.RemotePath>` object"""
return RemotePath(self.remote, self)
@contextmanager
def __call__(self, newdir):
"""A context manager used to ``chdir`` into a directory and then ``chdir`` back to
the previous location; much like ``pushd``/``popd``.
:param newdir: The destination director (a string or a
:class:`RemotePath <plumbum.path.remote.RemotePath>`)
"""
prev = self._path
changed_dir = self.chdir(newdir)
try:
yield changed_dir
finally:
self.chdir(prev)
| |
import re
from fixture.variables import Profinity
from tests_contract.contact_helper import Contact
class ContactBase():
def __init__(self, app):
self.app = app
def count(self):
wd = self.app.wd
self.open_contract_page()
return len(wd.find_elements_by_name("selected[]"))
def open_contract_page(self):
wd = self.app.wd
if not (wd.current_url.endswith('addressbook/')): #and len(wd.find_elements_by_name('new')) > 0):
wd.find_element_by_link_text("home").click()
def validation_of_contact_exist(self):
if self.count() == 0:
self.create((Contact(first_name=Profinity.correct_data, last_name=Profinity.correct_data,
middle_name=Profinity.correct_data, nickname=Profinity.correct_data,
phone=Profinity.correct_phone_number, work=Profinity.correct_phone_number,
home=Profinity.correct_phone_number,mobile=Profinity.correct_phone_number)))
def edit_contract(self, Contract):
wd = self.app.wd
self.open_contract_page()
wd.find_element_by_css_selector("img[alt=\"Edit\"]").click()
self.contract_field(Contract)
wd.find_element_by_xpath("//div[@id='content']/form[1]/input[22]").click()
self.open_contract_page()
contract_cache = None
def contract_field(self, Contact):
wd = self.app.wd
a = dir(Contact)
'''for k in dir(Contact)[5:-1]:
if k != 'add_year':
wd.find_element_by_name("%s" % k).click()
wd.find_element_by_name("%s" % k).clear()
wd.find_element_by_name("%s" % k).send_keys("%s" % Contact.key(k))'''
if Contact.first_name:
wd.find_element_by_name("firstname").click()
wd.find_element_by_name("firstname").clear()
wd.find_element_by_name("firstname").send_keys("%s" % Contact.first_name)
if Contact.middle_name:
wd.find_element_by_name("middlename").click()
wd.find_element_by_name("middlename").clear()
wd.find_element_by_name("middlename").send_keys("%s" % Contact.middle_name)
if Contact.last_name:
wd.find_element_by_name("lastname").click()
wd.find_element_by_name("lastname").clear()
wd.find_element_by_name("lastname").send_keys("%s" % Contact.last_name)
if Contact.nickname:
wd.find_element_by_name("nickname").click()
wd.find_element_by_name("nickname").clear()
wd.find_element_by_name("nickname").send_keys("%s" % Contact.nickname)
if Contact.title:
wd.find_element_by_name("title").click()
wd.find_element_by_name("title").clear()
wd.find_element_by_name("title").send_keys("%s" % Contact.title)
if Contact.company_name:
wd.find_element_by_name("company").click()
wd.find_element_by_name("company").clear()
wd.find_element_by_name("company").send_keys("%s" % Contact.company_name)
if Contact.address_name:
wd.find_element_by_name("address").click()
wd.find_element_by_name("address").clear()
wd.find_element_by_name("address").send_keys("%s" % Contact.address_name)
if Contact.home:
wd.find_element_by_name("home").click()
wd.find_element_by_name("home").clear()
wd.find_element_by_name("home").send_keys("%s" % Contact.home)
if Contact.mobile:
wd.find_element_by_name("mobile").click()
wd.find_element_by_name("mobile").clear()
wd.find_element_by_name("mobile").send_keys("%s" % Contact.mobile)
if Contact.work:
wd.find_element_by_name("work").click()
wd.find_element_by_name("work").clear()
wd.find_element_by_name("work").send_keys("%s" % Contact.work)
if Contact.fax:
wd.find_element_by_name("fax").click()
wd.find_element_by_name("fax").clear()
wd.find_element_by_name("fax").send_keys("%s" % Contact.fax)
if Contact.email1:
wd.find_element_by_name("email").click()
wd.find_element_by_name("email").click()
wd.find_element_by_name("email").clear()
wd.find_element_by_name("email").send_keys("%s" % Contact.email1)
if Contact.email2:
wd.find_element_by_name("email2").click()
wd.find_element_by_name("email2").clear()
wd.find_element_by_name("email2").send_keys("%s" % Contact.email2)
if Contact.email3:
wd.find_element_by_name("email3").click()
wd.find_element_by_name("email3").clear()
wd.find_element_by_name("email3").send_keys("%s" % Contact.email3)
if Contact.homepage:
wd.find_element_by_name("homepage").click()
wd.find_element_by_name("homepage").clear()
wd.find_element_by_name("homepage").send_keys("%s" % Contact.homepage)
if Contact.add_year:
# in futures we can made function where we will sent date and it choose it with similar way as previous
if not wd.find_element_by_xpath("//div[@id='content']/form/select[1]//option[3]").is_selected():
wd.find_element_by_xpath("//div[@id='content']/form/select[1]//option[3]").click()
if not wd.find_element_by_xpath("//div[@id='content']/form/select[2]//option[2]").is_selected():
wd.find_element_by_xpath("//div[@id='content']/form/select[2]//option[2]").click()
wd.find_element_by_name("byear").click()
wd.find_element_by_name("byear").clear()
wd.find_element_by_name("byear").send_keys("1999")
if not wd.find_element_by_xpath("//div[@id='content']/form/select[3]//option[3]").is_selected():
wd.find_element_by_xpath("//div[@id='content']/form/select[3]//option[3]").click()
if not wd.find_element_by_xpath("//div[@id='content']/form/select[4]//option[2]").is_selected():
wd.find_element_by_xpath("//div[@id='content']/form/select[4]//option[2]").click()
wd.find_element_by_name("ayear").click()
wd.find_element_by_name("ayear").clear()
wd.find_element_by_name("ayear").send_keys("1999")
if Contact.address:
wd.find_element_by_name("address2").click()
wd.find_element_by_name("address2").clear()
wd.find_element_by_name("address2").send_keys("%s" % Contact.address)
if Contact.phone:
wd.find_element_by_name("phone2").click()
wd.find_element_by_name("phone2").clear()
wd.find_element_by_name("phone2").send_keys("%s" % Contact.phone)
if Contact.notes:
wd.find_element_by_name("notes").click()
wd.find_element_by_name("notes").clear()
wd.find_element_by_name("notes").send_keys("%s" % Contact.notes)
def create(self, Contact):
wd = self.app.wd
wd.find_element_by_link_text("add new").click()
self.contract_field(Contact)
wd.find_element_by_xpath("//div[@id='content']/form/input[21]").click()
contract_cache = None
def delete_contact(self):
self.delete_contact_by_index(0)
contract_cache = None
def get_contact_list(self):
if self.contract_cache is None:
wd = self.app.wd
self.open_contract_page()
contract_cache = []
for row in wd.find_elements_by_name('entry'):
cells = row.find_elements_by_tag_name('td')
my_id = cells[0].find_element_by_tag_name('input').get_attribute('value')
all_phones = cells[5].text.splitlines()
all_email = cells[4].text.splitlines()
adress = cells[3].text.splitlines()
a = None
b = None
c = None
d = None
e = None
f = None
g = None
h = None
if all_phones:
if len(all_phones)>0 and all_phones[0]:
a=all_phones[0]
if len(all_phones)>1 and all_phones[1]:
b=all_phones[1]
if len(all_phones)>2 and all_phones[2]:
c = all_phones[2]
if len(all_phones)>3 and all_phones[3]:
d = all_phones[3]
if all_email:
if len(all_email)>0 and all_email[0]:
e = all_email[0]
if len(all_email)>1 and all_email[1]:
f = all_email[1]
if len(all_email)>2 and all_email[2]:
g = all_email[2]
if adress:
h = adress[0]
contract_cache.append(Contact(first_name=cells[1].text, last_name=cells[2].text, id=my_id, home=a,
mobile = b, work = c, phone = d, email1 = e, email2= f,
email3 = g , address = h))
return contract_cache
def get_contact_list_without_none(self):
if self.contract_cache is None:
wd = self.app.wd
self.open_contract_page()
contract_cache = []
for row in wd.find_elements_by_name('entry'):
cells = row.find_elements_by_tag_name('td')
my_id = cells[0].find_element_by_tag_name('input').get_attribute('value')
all_phones = cells[5].text.splitlines()
all_email = cells[4].text.splitlines()
adress = cells[3].text.splitlines()
a = ''
b = ''
c = ''
d = ''
e = ''
f = ''
g = ''
h = ''
if all_phones:
if len(all_phones)>0 and all_phones[0]:
a=all_phones[0]
if len(all_phones)>1 and all_phones[1]:
b=all_phones[1]
if len(all_phones)>2 and all_phones[2]:
c = all_phones[2]
if len(all_phones)>3 and all_phones[3]:
d = all_phones[3]
if all_email:
if len(all_email)>0 and all_email[0]:
e = all_email[0]
if len(all_email)>1 and all_email[1]:
f = all_email[1]
if len(all_email)>2 and all_email[2]:
g = all_email[2]
if adress:
h = adress[0]
contract_cache.append(Contact(first_name=cells[1].text, last_name=cells[2].text, id=my_id, home=a,
mobile = b, work = c, phone = d, email1 = e, email2= f,
email3 = g , address = h))
return contract_cache
def delete_contact_by_index(self, index):
wd = self.app.wd
self.open_contract_page()
wd.find_elements_by_name("selected[]")[index].click()
wd.find_element_by_xpath("//div[@id='content']/form[2]/div[2]/input").click()
wd.switch_to_alert().accept()
contract_cache = None
def edit_contract_by_index(self, Contract, index):
wd = self.app.wd
self.open_contract_page()
wd.find_elements_by_css_selector('img[alt="Edit"]')[index].click()
self.contract_field(Contract)
wd.find_element_by_xpath("//div[@id='content']/form[1]/input[22]").click()
self.open_contract_page()
contract_cache = None
def open_contact_to_edit_by_index(self, index):
wd = self.app.wd
self.open_contract_page()
row = wd.find_elements_by_name('entry')[index]
cell = row.find_elements_by_tag_name('td')[7]
cell.find_element_by_tag_name('a').click()
def open_contact_view_by_index(self, index):
wd = self.app.wd
self.open_contract_page()
row = wd.find_elements_by_name('entry')[index]
cell = row.find_elements_by_tag_name('td')[6]
cell.find_element_by_tag_name('a').click()
def get_contact_info_from_edit_page(self, index):
wd = self.app.wd
self.open_contact_to_edit_by_index(index)
first_name = wd.find_element_by_name('firstname').get_attribute('value')
last_name = wd.find_element_by_name('lastname').get_attribute('value')
my_id = wd.find_element_by_name('id').get_attribute('value')
home_phone = wd.find_element_by_name('home').get_attribute('value')
work_phone = wd.find_element_by_name('work').get_attribute('value')
mobile_phone = wd.find_element_by_name('mobile').get_attribute('value')
second_phone = wd.find_element_by_name('phone2').get_attribute('value')
email1 = wd.find_element_by_name('email').get_attribute('value')
email2 = wd.find_element_by_name('email2').get_attribute('value')
email3 = wd.find_element_by_name('email3').get_attribute('value')
address = wd.find_element_by_name('address').get_attribute('value')
fax = wd.find_element_by_name('fax').get_attribute('value')
return(Contact(first_name=first_name, last_name=last_name, id=my_id, home=home_phone,
work=work_phone,mobile=mobile_phone, phone=second_phone, email1 = email1, email2 = email2,
email3 = email3, address=address, fax = fax))
def get_contact_info_from_view_page(self, index):
wd = self.app.wd
self.open_contact_view_by_index(index)
text = wd.find_element_by_id('content').text
home_phone = ''
mobile_phone = ''
work_phone = ''
second_phone = ''
fax = ''
if re.search('H: (.*)', text):
home_phone = re.search('H: (.*)', text).group(1)
if re.search('M: (.*)', text):
mobile_phone = re.search('M: (.*)', text).group(1)
if re.search('W: (.*)', text):
work_phone = re.search('W: (.*)', text).group(1)
if re.search('P: (.*)', text):
second_phone = re.search('P: (.*)', text).group(1)
if re.search('F: (.*)', text):
fax = re.search('F: (.*)', text).group(1)
return Contact(home=home_phone,work=work_phone,mobile=mobile_phone, phone=second_phone, fax = fax)
# all data old
'''
Contact(first_name=Profinity.correct_data, last_name=Profinity.correct_data,
middle_name=Profinity.correct_data, nickname=Profinity.correct_data,
title=Profinity.correct_data, company_name=Profinity.correct_data,
address_name=Profinity.correct_data, work=Profinity.correct_phone_number,
fax=Profinity.correct_phone_number, home=Profinity.correct_phone_number,
mobile=Profinity.correct_phone_number, email1=Profinity.correct_email,
email2=Profinity.correct_email, email3=Profinity.correct_email, homepage=Profinity.correct_data,
add_year=True, address=Profinity.correct_data, phone=Profinity.correct_data,
notes=Profinity.correct_data)
'''
| |
"""Common functions for RFLink component tests and generic platform tests."""
from unittest.mock import Mock
import pytest
from voluptuous.error import MultipleInvalid
from homeassistant.bootstrap import async_setup_component
from homeassistant.components.rflink import (
CONF_RECONNECT_INTERVAL,
DATA_ENTITY_LOOKUP,
EVENT_KEY_COMMAND,
EVENT_KEY_SENSOR,
SERVICE_SEND_COMMAND,
TMP_ENTITY,
RflinkCommand,
)
from homeassistant.const import ATTR_ENTITY_ID, SERVICE_STOP_COVER, SERVICE_TURN_OFF
async def mock_rflink(
hass, config, domain, monkeypatch, failures=None, failcommand=False
):
"""Create mock RFLink asyncio protocol, test component setup."""
transport, protocol = (Mock(), Mock())
async def send_command_ack(*command):
return not failcommand
protocol.send_command_ack = Mock(wraps=send_command_ack)
def send_command(*command):
return not failcommand
protocol.send_command = Mock(wraps=send_command)
async def create_rflink_connection(*args, **kwargs):
"""Return mocked transport and protocol."""
# failures can be a list of booleans indicating in which sequence
# creating a connection should success or fail
if failures:
fail = failures.pop()
else:
fail = False
if fail:
raise ConnectionRefusedError
else:
return transport, protocol
mock_create = Mock(wraps=create_rflink_connection)
monkeypatch.setattr(
"homeassistant.components.rflink.create_rflink_connection", mock_create
)
await async_setup_component(hass, "rflink", config)
await async_setup_component(hass, domain, config)
await hass.async_block_till_done()
# hook into mock config for injecting events
event_callback = mock_create.call_args_list[0][1]["event_callback"]
assert event_callback
disconnect_callback = mock_create.call_args_list[0][1]["disconnect_callback"]
return event_callback, mock_create, protocol, disconnect_callback
async def test_version_banner(hass, monkeypatch):
"""Test sending unknown commands doesn't cause issues."""
# use sensor domain during testing main platform
domain = "sensor"
config = {
"rflink": {"port": "/dev/ttyABC0"},
domain: {
"platform": "rflink",
"devices": {"test": {"name": "test", "sensor_type": "temperature"}},
},
}
# setup mocking rflink module
event_callback, _, _, _ = await mock_rflink(hass, config, domain, monkeypatch)
event_callback(
{
"hardware": "Nodo RadioFrequencyLink",
"firmware": "RFLink Gateway",
"version": "1.1",
"revision": "45",
}
)
async def test_send_no_wait(hass, monkeypatch):
"""Test command sending without ack."""
domain = "switch"
config = {
"rflink": {"port": "/dev/ttyABC0", "wait_for_ack": False},
domain: {
"platform": "rflink",
"devices": {
"protocol_0_0": {"name": "test", "aliases": ["test_alias_0_0"]}
},
},
}
# setup mocking rflink module
_, _, protocol, _ = await mock_rflink(hass, config, domain, monkeypatch)
await hass.services.async_call(
domain, SERVICE_TURN_OFF, {ATTR_ENTITY_ID: "switch.test"}
)
await hass.async_block_till_done()
assert protocol.send_command.call_args_list[0][0][0] == "protocol_0_0"
assert protocol.send_command.call_args_list[0][0][1] == "off"
async def test_cover_send_no_wait(hass, monkeypatch):
"""Test command sending to a cover device without ack."""
domain = "cover"
config = {
"rflink": {"port": "/dev/ttyABC0", "wait_for_ack": False},
domain: {
"platform": "rflink",
"devices": {
"RTS_0100F2_0": {"name": "test", "aliases": ["test_alias_0_0"]}
},
},
}
# setup mocking rflink module
_, _, protocol, _ = await mock_rflink(hass, config, domain, monkeypatch)
await hass.services.async_call(
domain, SERVICE_STOP_COVER, {ATTR_ENTITY_ID: "cover.test"}
)
await hass.async_block_till_done()
assert protocol.send_command.call_args_list[0][0][0] == "RTS_0100F2_0"
assert protocol.send_command.call_args_list[0][0][1] == "STOP"
async def test_send_command(hass, monkeypatch):
"""Test send_command service."""
domain = "rflink"
config = {"rflink": {"port": "/dev/ttyABC0"}}
# setup mocking rflink module
_, _, protocol, _ = await mock_rflink(hass, config, domain, monkeypatch)
await hass.services.async_call(
domain,
SERVICE_SEND_COMMAND,
{"device_id": "newkaku_0000c6c2_1", "command": "on"},
)
await hass.async_block_till_done()
assert protocol.send_command_ack.call_args_list[0][0][0] == "newkaku_0000c6c2_1"
assert protocol.send_command_ack.call_args_list[0][0][1] == "on"
async def test_send_command_invalid_arguments(hass, monkeypatch):
"""Test send_command service."""
domain = "rflink"
config = {"rflink": {"port": "/dev/ttyABC0"}}
# setup mocking rflink module
_, _, protocol, _ = await mock_rflink(hass, config, domain, monkeypatch)
# one argument missing
with pytest.raises(MultipleInvalid):
await hass.services.async_call(domain, SERVICE_SEND_COMMAND, {"command": "on"})
with pytest.raises(MultipleInvalid):
await hass.services.async_call(
domain, SERVICE_SEND_COMMAND, {"device_id": "newkaku_0000c6c2_1"}
)
# no arguments
with pytest.raises(MultipleInvalid):
await hass.services.async_call(domain, SERVICE_SEND_COMMAND, {})
await hass.async_block_till_done()
assert protocol.send_command_ack.call_args_list == []
# bad command (no_command)
success = await hass.services.async_call(
domain,
SERVICE_SEND_COMMAND,
{"device_id": "newkaku_0000c6c2_1", "command": "no_command"},
)
assert not success, "send command should not succeed for unknown command"
async def test_send_command_event_propagation(hass, monkeypatch):
"""Test event propagation for send_command service."""
domain = "light"
config = {
"rflink": {"port": "/dev/ttyABC0"},
domain: {
"platform": "rflink",
"devices": {
"protocol_0_1": {"name": "test1"},
},
},
}
# setup mocking rflink module
_, _, protocol, _ = await mock_rflink(hass, config, domain, monkeypatch)
# default value = 'off'
assert hass.states.get(f"{domain}.test1").state == "off"
await hass.services.async_call(
"rflink",
SERVICE_SEND_COMMAND,
{"device_id": "protocol_0_1", "command": "on"},
blocking=True,
)
await hass.async_block_till_done()
assert protocol.send_command_ack.call_args_list[0][0][0] == "protocol_0_1"
assert protocol.send_command_ack.call_args_list[0][0][1] == "on"
assert hass.states.get(f"{domain}.test1").state == "on"
await hass.services.async_call(
"rflink",
SERVICE_SEND_COMMAND,
{"device_id": "protocol_0_1", "command": "alloff"},
blocking=True,
)
await hass.async_block_till_done()
assert protocol.send_command_ack.call_args_list[1][0][0] == "protocol_0_1"
assert protocol.send_command_ack.call_args_list[1][0][1] == "alloff"
assert hass.states.get(f"{domain}.test1").state == "off"
async def test_reconnecting_after_disconnect(hass, monkeypatch):
"""An unexpected disconnect should cause a reconnect."""
domain = "sensor"
config = {
"rflink": {"port": "/dev/ttyABC0", CONF_RECONNECT_INTERVAL: 0},
domain: {"platform": "rflink"},
}
# setup mocking rflink module
_, mock_create, _, disconnect_callback = await mock_rflink(
hass, config, domain, monkeypatch
)
assert disconnect_callback, "disconnect callback not passed to rflink"
# rflink initiated disconnect
disconnect_callback(None)
await hass.async_block_till_done()
# we expect 2 call, the initial and reconnect
assert mock_create.call_count == 2
async def test_reconnecting_after_failure(hass, monkeypatch):
"""A failure to reconnect should be retried."""
domain = "sensor"
config = {
"rflink": {"port": "/dev/ttyABC0", CONF_RECONNECT_INTERVAL: 0},
domain: {"platform": "rflink"},
}
# success first time but fail second
failures = [False, True, False]
# setup mocking rflink module
_, mock_create, _, disconnect_callback = await mock_rflink(
hass, config, domain, monkeypatch, failures=failures
)
# rflink initiated disconnect
disconnect_callback(None)
# wait for reconnects to have happened
await hass.async_block_till_done()
await hass.async_block_till_done()
# we expect 3 calls, the initial and 2 reconnects
assert mock_create.call_count == 3
async def test_error_when_not_connected(hass, monkeypatch):
"""Sending command should error when not connected."""
domain = "switch"
config = {
"rflink": {"port": "/dev/ttyABC0", CONF_RECONNECT_INTERVAL: 0},
domain: {
"platform": "rflink",
"devices": {
"protocol_0_0": {"name": "test", "aliases": ["test_alias_0_0"]}
},
},
}
# success first time but fail second
failures = [False, True, False]
# setup mocking rflink module
_, _, _, disconnect_callback = await mock_rflink(
hass, config, domain, monkeypatch, failures=failures
)
# rflink initiated disconnect
disconnect_callback(None)
success = await hass.services.async_call(
domain, SERVICE_TURN_OFF, {ATTR_ENTITY_ID: "switch.test"}
)
assert not success, "changing state should not succeed when disconnected"
async def test_async_send_command_error(hass, monkeypatch):
"""Sending command should error when protocol fails."""
domain = "rflink"
config = {"rflink": {"port": "/dev/ttyABC0"}}
# setup mocking rflink module
_, _, protocol, _ = await mock_rflink(
hass, config, domain, monkeypatch, failcommand=True
)
success = await hass.services.async_call(
domain,
SERVICE_SEND_COMMAND,
{"device_id": "newkaku_0000c6c2_1", "command": SERVICE_TURN_OFF},
)
await hass.async_block_till_done()
assert not success, "send command should not succeed if failcommand=True"
assert protocol.send_command_ack.call_args_list[0][0][0] == "newkaku_0000c6c2_1"
assert protocol.send_command_ack.call_args_list[0][0][1] == SERVICE_TURN_OFF
async def test_race_condition(hass, monkeypatch):
"""Test race condition for unknown components."""
domain = "light"
config = {"rflink": {"port": "/dev/ttyABC0"}, domain: {"platform": "rflink"}}
tmp_entity = TMP_ENTITY.format("test3")
# setup mocking rflink module
event_callback, _, _, _ = await mock_rflink(hass, config, domain, monkeypatch)
# test event for new unconfigured sensor
event_callback({"id": "test3", "command": "off"})
event_callback({"id": "test3", "command": "on"})
# tmp_entity added to EVENT_KEY_COMMAND
assert tmp_entity in hass.data[DATA_ENTITY_LOOKUP][EVENT_KEY_COMMAND]["test3"]
# tmp_entity must no be added to EVENT_KEY_SENSOR
assert tmp_entity not in hass.data[DATA_ENTITY_LOOKUP][EVENT_KEY_SENSOR]["test3"]
await hass.async_block_till_done()
# test state of new sensor
new_sensor = hass.states.get(f"{domain}.test3")
assert new_sensor
assert new_sensor.state == "off"
event_callback({"id": "test3", "command": "on"})
await hass.async_block_till_done()
# tmp_entity must be deleted from EVENT_KEY_COMMAND
assert tmp_entity not in hass.data[DATA_ENTITY_LOOKUP][EVENT_KEY_COMMAND]["test3"]
# test state of new sensor
new_sensor = hass.states.get(f"{domain}.test3")
assert new_sensor
assert new_sensor.state == "on"
async def test_not_connected(hass, monkeypatch):
"""Test Error when sending commands to a disconnected device."""
import pytest
from homeassistant.core import HomeAssistantError
test_device = RflinkCommand("DUMMY_DEVICE")
RflinkCommand.set_rflink_protocol(None)
with pytest.raises(HomeAssistantError):
await test_device._async_handle_command("turn_on")
| |
from __future__ import absolute_import
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import logging, os, socket, time, types
from heapq import heappush, heappop, nsmallest
from proton import Collector, Connection, ConnectionException, Delivery, Described, dispatch
from proton import Endpoint, Event, EventBase, EventType, generate_uuid, Handler, Link, Message
from proton import ProtonException, PN_ACCEPTED, PN_PYREF, SASL, Session, SSL, SSLDomain, SSLUnavailable, symbol
from proton import Terminus, Timeout, Transport, TransportException, ulong, Url
from select import select
from proton.handlers import OutgoingMessageHandler
from proton import unicode2utf8, utf82unicode
import traceback
from proton import WrappedHandler, _chandler, secs2millis, millis2secs, timeout2millis, millis2timeout, Selectable
from .wrapper import Wrapper, PYCTX
from cproton import *
from . import _compat
try:
import Queue
except ImportError:
import queue as Queue
class Task(Wrapper):
@staticmethod
def wrap(impl):
if impl is None:
return None
else:
return Task(impl)
def __init__(self, impl):
Wrapper.__init__(self, impl, pn_task_attachments)
def _init(self):
pass
def cancel(self):
pn_task_cancel(self._impl)
class Acceptor(Wrapper):
def __init__(self, impl):
Wrapper.__init__(self, impl)
def set_ssl_domain(self, ssl_domain):
pn_acceptor_set_ssl_domain(self._impl, ssl_domain._domain)
def close(self):
pn_acceptor_close(self._impl)
class Reactor(Wrapper):
@staticmethod
def wrap(impl):
if impl is None:
return None
else:
record = pn_reactor_attachments(impl)
attrs = pn_void2py(pn_record_get(record, PYCTX))
if attrs and 'subclass' in attrs:
return attrs['subclass'](impl=impl)
else:
return Reactor(impl=impl)
def __init__(self, *handlers, **kwargs):
Wrapper.__init__(self, kwargs.get("impl", pn_reactor), pn_reactor_attachments)
for h in handlers:
self.handler.add(h)
def _init(self):
self.errors = []
def on_error(self, info):
self.errors.append(info)
self.yield_()
def _get_global(self):
return WrappedHandler.wrap(pn_reactor_get_global_handler(self._impl), self.on_error)
def _set_global(self, handler):
impl = _chandler(handler, self.on_error)
pn_reactor_set_global_handler(self._impl, impl)
pn_decref(impl)
global_handler = property(_get_global, _set_global)
def _get_timeout(self):
return millis2timeout(pn_reactor_get_timeout(self._impl))
def _set_timeout(self, secs):
return pn_reactor_set_timeout(self._impl, timeout2millis(secs))
timeout = property(_get_timeout, _set_timeout)
def yield_(self):
pn_reactor_yield(self._impl)
def mark(self):
return pn_reactor_mark(self._impl)
def _get_handler(self):
return WrappedHandler.wrap(pn_reactor_get_handler(self._impl), self.on_error)
def _set_handler(self, handler):
impl = _chandler(handler, self.on_error)
pn_reactor_set_handler(self._impl, impl)
pn_decref(impl)
handler = property(_get_handler, _set_handler)
def run(self):
self.timeout = 3.14159265359
self.start()
while self.process(): pass
self.stop()
def wakeup(self):
n = pn_reactor_wakeup(self._impl)
if n: raise IOError(pn_error_text(pn_io_error(pn_reactor_io(self._impl))))
def start(self):
pn_reactor_start(self._impl)
@property
def quiesced(self):
return pn_reactor_quiesced(self._impl)
def _check_errors(self):
if self.errors:
for exc, value, tb in self.errors[:-1]:
traceback.print_exception(exc, value, tb)
exc, value, tb = self.errors[-1]
_compat.raise_(exc, value, tb)
def process(self):
result = pn_reactor_process(self._impl)
self._check_errors()
return result
def stop(self):
pn_reactor_stop(self._impl)
self._check_errors()
def schedule(self, delay, task):
impl = _chandler(task, self.on_error)
task = Task.wrap(pn_reactor_schedule(self._impl, secs2millis(delay), impl))
pn_decref(impl)
return task
def acceptor(self, host, port, handler=None):
impl = _chandler(handler, self.on_error)
aimpl = pn_reactor_acceptor(self._impl, unicode2utf8(host), str(port), impl)
pn_decref(impl)
if aimpl:
return Acceptor(aimpl)
else:
raise IOError("%s (%s:%s)" % (pn_error_text(pn_io_error(pn_reactor_io(self._impl))), host, port))
def connection(self, handler=None):
impl = _chandler(handler, self.on_error)
result = Connection.wrap(pn_reactor_connection(self._impl, impl))
pn_decref(impl)
return result
def selectable(self, handler=None):
impl = _chandler(handler, self.on_error)
result = Selectable.wrap(pn_reactor_selectable(self._impl))
if impl:
record = pn_selectable_attachments(result._impl)
pn_record_set_handler(record, impl)
pn_decref(impl)
return result
def update(self, sel):
pn_reactor_update(self._impl, sel._impl)
def push_event(self, obj, etype):
pn_collector_put(pn_reactor_collector(self._impl), PN_PYREF, pn_py2void(obj), etype.number)
from proton import wrappers as _wrappers
_wrappers["pn_reactor"] = lambda x: Reactor.wrap(pn_cast_pn_reactor(x))
_wrappers["pn_task"] = lambda x: Task.wrap(pn_cast_pn_task(x))
class EventInjector(object):
"""
Can be added to a reactor to allow events to be triggered by an
external thread but handled on the event thread associated with
the reactor. An instance of this class can be passed to the
Reactor.selectable() method of the reactor in order to activate
it. The close() method should be called when it is no longer
needed, to allow the event loop to end if needed.
"""
def __init__(self):
self.queue = Queue.Queue()
self.pipe = os.pipe()
self._closed = False
def trigger(self, event):
"""
Request that the given event be dispatched on the event thread
of the reactor to which this EventInjector was added.
"""
self.queue.put(event)
os.write(self.pipe[1], _compat.str2bin("!"))
def close(self):
"""
Request that this EventInjector be closed. Existing events
will be dispctahed on the reactors event dispactch thread,
then this will be removed from the set of interest.
"""
self._closed = True
os.write(self.pipe[1], _compat.str2bin("!"))
def fileno(self):
return self.pipe[0]
def on_selectable_init(self, event):
sel = event.context
sel.fileno(self.fileno())
sel.reading = True
event.reactor.update(sel)
def on_selectable_readable(self, event):
os.read(self.pipe[0], 512)
while not self.queue.empty():
requested = self.queue.get()
event.reactor.push_event(requested.context, requested.type)
if self._closed:
s = event.context
s.terminate()
event.reactor.update(s)
class ApplicationEvent(EventBase):
"""
Application defined event, which can optionally be associated with
an engine object and or an arbitrary subject
"""
def __init__(self, typename, connection=None, session=None, link=None, delivery=None, subject=None):
super(ApplicationEvent, self).__init__(PN_PYREF, self, EventType(typename))
self.connection = connection
self.session = session
self.link = link
self.delivery = delivery
if self.delivery:
self.link = self.delivery.link
if self.link:
self.session = self.link.session
if self.session:
self.connection = self.session.connection
self.subject = subject
def __repr__(self):
objects = [self.connection, self.session, self.link, self.delivery, self.subject]
return "%s(%s)" % (typename, ", ".join([str(o) for o in objects if o is not None]))
class Transaction(object):
"""
Class to track state of an AMQP 1.0 transaction.
"""
def __init__(self, txn_ctrl, handler, settle_before_discharge=False):
self.txn_ctrl = txn_ctrl
self.handler = handler
self.id = None
self._declare = None
self._discharge = None
self.failed = False
self._pending = []
self.settle_before_discharge = settle_before_discharge
self.declare()
def commit(self):
self.discharge(False)
def abort(self):
self.discharge(True)
def declare(self):
self._declare = self._send_ctrl(symbol(u'amqp:declare:list'), [None])
def discharge(self, failed):
self.failed = failed
self._discharge = self._send_ctrl(symbol(u'amqp:discharge:list'), [self.id, failed])
def _send_ctrl(self, descriptor, value):
delivery = self.txn_ctrl.send(Message(body=Described(descriptor, value)))
delivery.transaction = self
return delivery
def send(self, sender, msg, tag=None):
dlv = sender.send(msg, tag=tag)
dlv.local.data = [self.id]
dlv.update(0x34)
return dlv
def accept(self, delivery):
self.update(delivery, PN_ACCEPTED)
if self.settle_before_discharge:
delivery.settle()
else:
self._pending.append(delivery)
def update(self, delivery, state=None):
if state:
delivery.local.data = [self.id, Described(ulong(state), [])]
delivery.update(0x34)
def _release_pending(self):
for d in self._pending:
d.update(Delivery.RELEASED)
d.settle()
self._clear_pending()
def _clear_pending(self):
self._pending = []
def handle_outcome(self, event):
if event.delivery == self._declare:
if event.delivery.remote.data:
self.id = event.delivery.remote.data[0]
self.handler.on_transaction_declared(event)
elif event.delivery.remote_state == Delivery.REJECTED:
self.handler.on_transaction_declare_failed(event)
else:
logging.warning("Unexpected outcome for declare: %s" % event.delivery.remote_state)
self.handler.on_transaction_declare_failed(event)
elif event.delivery == self._discharge:
if event.delivery.remote_state == Delivery.REJECTED:
if not self.failed:
self.handler.on_transaction_commit_failed(event)
self._release_pending() # make this optional?
else:
if self.failed:
self.handler.on_transaction_aborted(event)
self._release_pending()
else:
self.handler.on_transaction_committed(event)
self._clear_pending()
class LinkOption(object):
"""
Abstract interface for link configuration options
"""
def apply(self, link):
"""
Subclasses will implement any configuration logic in this
method
"""
pass
def test(self, link):
"""
Subclasses can override this to selectively apply an option
e.g. based on some link criteria
"""
return True
class AtMostOnce(LinkOption):
def apply(self, link):
link.snd_settle_mode = Link.SND_SETTLED
class AtLeastOnce(LinkOption):
def apply(self, link):
link.snd_settle_mode = Link.SND_UNSETTLED
link.rcv_settle_mode = Link.RCV_FIRST
class SenderOption(LinkOption):
def apply(self, sender): pass
def test(self, link): return link.is_sender
class ReceiverOption(LinkOption):
def apply(self, receiver): pass
def test(self, link): return link.is_receiver
class DynamicNodeProperties(LinkOption):
def __init__(self, props={}):
self.properties = {}
for k in props:
if isinstance(k, symbol):
self.properties[k] = props[k]
else:
self.properties[symbol(k)] = props[k]
def apply(self, link):
if link.is_receiver:
link.source.properties.put_dict(self.properties)
else:
link.target.properties.put_dict(self.properties)
class Filter(ReceiverOption):
def __init__(self, filter_set={}):
self.filter_set = filter_set
def apply(self, receiver):
receiver.source.filter.put_dict(self.filter_set)
class Selector(Filter):
"""
Configures a link with a message selector filter
"""
def __init__(self, value, name='selector'):
super(Selector, self).__init__({symbol(name): Described(symbol('apache.org:selector-filter:string'), value)})
class DurableSubscription(ReceiverOption):
def apply(self, receiver):
receiver.source.durability = Terminus.DELIVERIES
receiver.source.expiry_policy = Terminus.EXPIRE_NEVER
class Move(ReceiverOption):
def apply(self, receiver):
receiver.source.distribution_mode = Terminus.DIST_MODE_MOVE
class Copy(ReceiverOption):
def apply(self, receiver):
receiver.source.distribution_mode = Terminus.DIST_MODE_COPY
def _apply_link_options(options, link):
if options:
if isinstance(options, list):
for o in options:
if o.test(link): o.apply(link)
else:
if options.test(link): options.apply(link)
def _create_session(connection, handler=None):
session = connection.session()
session.open()
return session
def _get_attr(target, name):
if hasattr(target, name):
return getattr(target, name)
else:
return None
class SessionPerConnection(object):
def __init__(self):
self._default_session = None
def session(self, connection):
if not self._default_session:
self._default_session = _create_session(connection)
self._default_session.context = self
return self._default_session
def on_session_remote_close(self, event):
event.connection.close()
self._default_session = None
class GlobalOverrides(object):
"""
Internal handler that triggers the necessary socket connect for an
opened connection.
"""
def __init__(self, base):
self.base = base
def on_unhandled(self, name, event):
if not self._override(event):
event.dispatch(self.base)
def _override(self, event):
conn = event.connection
return conn and hasattr(conn, '_overrides') and event.dispatch(conn._overrides)
class Connector(Handler):
"""
Internal handler that triggers the necessary socket connect for an
opened connection.
"""
def __init__(self, connection):
self.connection = connection
self.address = None
self.heartbeat = None
self.reconnect = None
self.ssl_domain = None
def _connect(self, connection):
url = self.address.next()
# IoHandler uses the hostname to determine where to try to connect to
connection.hostname = "%s:%s" % (url.host, url.port)
logging.info("connecting to %s..." % connection.hostname)
if url.username:
connection.user = url.username
if url.password:
connection.password = url.password
transport = Transport()
transport.bind(connection)
if self.heartbeat:
transport.idle_timeout = self.heartbeat
if url.scheme == 'amqps' and self.ssl_domain:
self.ssl = SSL(transport, self.ssl_domain)
self.ssl.peer_hostname = url.host
def on_connection_local_open(self, event):
self._connect(event.connection)
def on_connection_remote_open(self, event):
logging.info("connected to %s" % event.connection.hostname)
if self.reconnect:
self.reconnect.reset()
self.transport = None
def on_transport_tail_closed(self, event):
self.on_transport_closed(event)
def on_transport_closed(self, event):
if self.connection and self.connection.state & Endpoint.LOCAL_ACTIVE:
if self.reconnect:
event.transport.unbind()
delay = self.reconnect.next()
if delay == 0:
logging.info("Disconnected, reconnecting...")
self._connect(self.connection)
else:
logging.info("Disconnected will try to reconnect after %s seconds" % delay)
event.reactor.schedule(delay, self)
else:
logging.info("Disconnected")
self.connection = None
def on_timer_task(self, event):
self._connect(self.connection)
def on_connection_remote_close(self, event):
self.connection = None
class Backoff(object):
"""
A reconnect strategy involving an increasing delay between
retries, up to a maximum or 10 seconds.
"""
def __init__(self):
self.delay = 0
def reset(self):
self.delay = 0
def next(self):
current = self.delay
if current == 0:
self.delay = 0.1
else:
self.delay = min(10, 2*current)
return current
class Urls(object):
def __init__(self, values):
self.values = [Url(v) for v in values]
self.i = iter(self.values)
def __iter__(self):
return self
def next(self):
try:
return next(self.i)
except StopIteration:
self.i = iter(self.values)
return next(self.i)
class SSLConfig(object):
def __init__(self):
self.client = SSLDomain(SSLDomain.MODE_CLIENT)
self.server = SSLDomain(SSLDomain.MODE_SERVER)
def set_credentials(self, cert_file, key_file, password):
self.client.set_credentials(cert_file, key_file, password)
self.server.set_credentials(cert_file, key_file, password)
def set_trusted_ca_db(self, certificate_db):
self.client.set_trusted_ca_db(certificate_db)
self.server.set_trusted_ca_db(certificate_db)
class Container(Reactor):
"""A representation of the AMQP concept of a 'container', which
lossely speaking is something that establishes links to or from
another container, over which messages are transfered. This is
an extension to the Reactor class that adds convenience methods
for creating connections and sender- or receiver- links.
"""
def __init__(self, *handlers, **kwargs):
super(Container, self).__init__(*handlers, **kwargs)
if "impl" not in kwargs:
try:
self.ssl = SSLConfig()
except SSLUnavailable:
self.ssl = None
self.global_handler = GlobalOverrides(kwargs.get('global_handler', self.global_handler))
self.trigger = None
self.container_id = str(generate_uuid())
Wrapper.__setattr__(self, 'subclass', self.__class__)
def connect(self, url=None, urls=None, address=None, handler=None, reconnect=None, heartbeat=None, ssl_domain=None):
"""
Initiates the establishment of an AMQP connection. Returns an
instance of proton.Connection.
"""
conn = self.connection(handler)
conn.container = self.container_id or str(generate_uuid())
connector = Connector(conn)
conn._overrides = connector
if url: connector.address = Urls([url])
elif urls: connector.address = Urls(urls)
elif address: connector.address = address
else: raise ValueError("One of url, urls or address required")
if heartbeat:
connector.heartbeat = heartbeat
if reconnect:
connector.reconnect = reconnect
elif reconnect is None:
connector.reconnect = Backoff()
connector.ssl_domain = ssl_domain or (self.ssl and self.ssl.client)
conn._session_policy = SessionPerConnection() #todo: make configurable
conn.open()
return conn
def _get_id(self, container, remote, local):
if local and remote: "%s-%s-%s" % (container, remote, local)
elif local: return "%s-%s" % (container, local)
elif remote: return "%s-%s" % (container, remote)
else: return "%s-%s" % (container, str(generate_uuid()))
def _get_session(self, context):
if isinstance(context, Url):
return self._get_session(self.connect(url=context))
elif isinstance(context, Session):
return context
elif isinstance(context, Connection):
if hasattr(context, '_session_policy'):
return context._session_policy.session(context)
else:
return _create_session(context)
else:
return context.session()
def create_sender(self, context, target=None, source=None, name=None, handler=None, tags=None, options=None):
"""
Initiates the establishment of a link over which messages can
be sent. Returns an instance of proton.Sender.
There are two patterns of use. (1) A connection can be passed
as the first argument, in which case the link is established
on that connection. In this case the target address can be
specified as the second argument (or as a keyword
argument). The source address can also be specified if
desired. (2) Alternatively a URL can be passed as the first
argument. In this case a new connection will be establised on
which the link will be attached. If a path is specified and
the target is not, then the path of the URL is used as the
target address.
The name of the link may be specified if desired, otherwise a
unique name will be generated.
Various LinkOptions can be specified to further control the
attachment.
"""
if isinstance(context, _compat.STRING_TYPES):
context = Url(context)
if isinstance(context, Url) and not target:
target = context.path
session = self._get_session(context)
snd = session.sender(name or self._get_id(session.connection.container, target, source))
if source:
snd.source.address = source
if target:
snd.target.address = target
if handler:
snd.handler = handler
if tags:
snd.tag_generator = tags
_apply_link_options(options, snd)
snd.open()
return snd
def create_receiver(self, context, source=None, target=None, name=None, dynamic=False, handler=None, options=None):
"""
Initiates the establishment of a link over which messages can
be received (aka a subscription). Returns an instance of
proton.Receiver.
There are two patterns of use. (1) A connection can be passed
as the first argument, in which case the link is established
on that connection. In this case the source address can be
specified as the second argument (or as a keyword
argument). The target address can also be specified if
desired. (2) Alternatively a URL can be passed as the first
argument. In this case a new connection will be establised on
which the link will be attached. If a path is specified and
the source is not, then the path of the URL is used as the
target address.
The name of the link may be specified if desired, otherwise a
unique name will be generated.
Various LinkOptions can be specified to further control the
attachment.
"""
if isinstance(context, _compat.STRING_TYPES):
context = Url(context)
if isinstance(context, Url) and not source:
source = context.path
session = self._get_session(context)
rcv = session.receiver(name or self._get_id(session.connection.container, source, target))
if source:
rcv.source.address = source
if dynamic:
rcv.source.dynamic = True
if target:
rcv.target.address = target
if handler:
rcv.handler = handler
_apply_link_options(options, rcv)
rcv.open()
return rcv
def declare_transaction(self, context, handler=None, settle_before_discharge=False):
if not _get_attr(context, '_txn_ctrl'):
class InternalTransactionHandler(OutgoingMessageHandler):
def __init__(self):
super(InternalTransactionHandler, self).__init__(auto_settle=True)
def on_settled(self, event):
if hasattr(event.delivery, "transaction"):
event.transaction = event.delivery.transaction
event.delivery.transaction.handle_outcome(event)
context._txn_ctrl = self.create_sender(context, None, name='txn-ctrl', handler=InternalTransactionHandler())
context._txn_ctrl.target.type = Terminus.COORDINATOR
context._txn_ctrl.target.capabilities.put_object(symbol(u'amqp:local-transactions'))
return Transaction(context._txn_ctrl, handler, settle_before_discharge)
def listen(self, url, ssl_domain=None):
"""
Initiates a server socket, accepting incoming AMQP connections
on the interface and port specified.
"""
url = Url(url)
acceptor = self.acceptor(url.host, url.port)
ssl_config = ssl_domain
if not ssl_config and url.scheme == 'amqps' and self.ssl:
ssl_config = self.ssl.server
if ssl_config:
acceptor.set_ssl_domain(ssl_config)
return acceptor
def do_work(self, timeout=None):
if timeout:
self.timeout = timeout
return self.process()
| |
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2019, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
''' Functions useful for dealing with hexagonal tilings.
For more information on the concepts employed here, see this informative page
https://www.redblobgames.com/grids/hexagons/
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
# External imports
import numpy as np
# Bokeh imports
from .dependencies import import_required
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
__all__ = (
'axial_to_cartesian',
'cartesian_to_axial',
'hexbin',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
def axial_to_cartesian(q, r, size, orientation, aspect_scale=1):
''' Map axial *(q,r)* coordinates to cartesian *(x,y)* coordinates of
tiles centers.
This function can be useful for positioning other Bokeh glyphs with
cartesian coordinates in relation to a hex tiling.
This function was adapted from:
https://www.redblobgames.com/grids/hexagons/#hex-to-pixel
Args:
q (array[float]) :
A NumPy array of q-coordinates for binning
r (array[float]) :
A NumPy array of r-coordinates for binning
size (float) :
The size of the hexagonal tiling.
The size is defined as the distance from the center of a hexagon
to the top corner for "pointytop" orientation, or from the center
to a side corner for "flattop" orientation.
orientation (str) :
Whether the hex tile orientation should be "pointytop" or
"flattop".
aspect_scale (float, optional) :
Scale the hexagons in the "cross" dimension.
For "pointytop" orientations, hexagons are scaled in the horizontal
direction. For "flattop", they are scaled in vertical direction.
When working with a plot with ``aspect_scale != 1``, it may be
useful to set this value to match the plot.
Returns:
(array[int], array[int])
'''
if orientation == "pointytop":
x = size * np.sqrt(3) * (q + r/2.0) / aspect_scale
y = -size * 3/2.0 * r
else:
x = size * 3/2.0 * q
y = -size * np.sqrt(3) * (r + q/2.0) * aspect_scale
return (x, y)
def cartesian_to_axial(x, y, size, orientation, aspect_scale=1):
''' Map Cartesion *(x,y)* points to axial *(q,r)* coordinates of enclosing
tiles.
This function was adapted from:
https://www.redblobgames.com/grids/hexagons/#pixel-to-hex
Args:
x (array[float]) :
A NumPy array of x-coordinates to convert
y (array[float]) :
A NumPy array of y-coordinates to convert
size (float) :
The size of the hexagonal tiling.
The size is defined as the distance from the center of a hexagon
to the top corner for "pointytop" orientation, or from the center
to a side corner for "flattop" orientation.
orientation (str) :
Whether the hex tile orientation should be "pointytop" or
"flattop".
aspect_scale (float, optional) :
Scale the hexagons in the "cross" dimension.
For "pointytop" orientations, hexagons are scaled in the horizontal
direction. For "flattop", they are scaled in vertical direction.
When working with a plot with ``aspect_scale != 1``, it may be
useful to set this value to match the plot.
Returns:
(array[int], array[int])
'''
HEX_FLAT = [2.0/3.0, 0.0, -1.0/3.0, np.sqrt(3.0)/3.0]
HEX_POINTY = [np.sqrt(3.0)/3.0, -1.0/3.0, 0.0, 2.0/3.0]
coords = HEX_FLAT if orientation == 'flattop' else HEX_POINTY
x = x / size * (aspect_scale if orientation == "pointytop" else 1)
y = -y / size / (aspect_scale if orientation == "flattop" else 1)
q = coords[0] * x + coords[1] * y
r = coords[2] * x + coords[3] * y
return _round_hex(q, r)
def hexbin(x, y, size, orientation="pointytop", aspect_scale=1):
''' Perform an equal-weight binning of data points into hexagonal tiles.
For more sophisticated use cases, e.g. weighted binning or scaling
individual tiles proportional to some other quantity, consider using
HoloViews.
Args:
x (array[float]) :
A NumPy array of x-coordinates for binning
y (array[float]) :
A NumPy array of y-coordinates for binning
size (float) :
The size of the hexagonal tiling.
The size is defined as the distance from the center of a hexagon
to the top corner for "pointytop" orientation, or from the center
to a side corner for "flattop" orientation.
orientation (str, optional) :
Whether the hex tile orientation should be "pointytop" or
"flattop". (default: "pointytop")
aspect_scale (float, optional) :
Match a plot's aspect ratio scaling.
When working with a plot with ``aspect_scale != 1``, this
parameter can be set to match the plot, in order to draw
regular hexagons (instead of "stretched" ones).
This is roughly equivalent to binning in "screen space", and
it may be better to use axis-aligned rectangular bins when
plot aspect scales are not one.
Returns:
DataFrame
The resulting DataFrame will have columns *q* and *r* that specify
hexagon tile locations in axial coordinates, and a column *counts* that
provides the count for each tile.
.. warning::
Hex binning only functions on linear scales, i.e. not on log plots.
'''
pd = import_required('pandas','hexbin requires pandas to be installed')
q, r = cartesian_to_axial(x, y, size, orientation, aspect_scale=aspect_scale)
df = pd.DataFrame(dict(r=r, q=q))
return df.groupby(['q', 'r']).size().reset_index(name='counts')
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
def _round_hex(q, r):
''' Round floating point axial hex coordinates to integer *(q,r)*
coordinates.
This code was adapted from:
https://www.redblobgames.com/grids/hexagons/#rounding
Args:
q (array[float]) :
NumPy array of Floating point axial *q* coordinates to round
r (array[float]) :
NumPy array of Floating point axial *q* coordinates to round
Returns:
(array[int], array[int])
'''
x = q
z = r
y = -x-z
rx = np.round(x)
ry = np.round(y)
rz = np.round(z)
dx = np.abs(rx - x)
dy = np.abs(ry - y)
dz = np.abs(rz - z)
cond = (dx > dy) & (dx > dz)
q = np.where(cond , -(ry + rz), rx)
r = np.where(~cond & ~(dy > dz), -(rx + ry), rz)
return q.astype(int), r.astype(int)
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
from glob import glob
from io import BytesIO
from numbers import Number
import numpy as np
from .. import backends, conventions, Dataset
from .common import ArrayWriter, GLOBAL_LOCK
from ..core import indexing
from ..core.combine import auto_combine
from ..core.utils import close_on_error, is_remote_uri
from ..core.pycompat import basestring, path_type
DATAARRAY_NAME = '__xarray_dataarray_name__'
DATAARRAY_VARIABLE = '__xarray_dataarray_variable__'
def _get_default_engine(path, allow_remote=False):
if allow_remote and is_remote_uri(path): # pragma: no cover
try:
import netCDF4
engine = 'netcdf4'
except ImportError:
try:
import pydap
engine = 'pydap'
except ImportError:
raise ValueError('netCDF4 or pydap is required for accessing '
'remote datasets via OPeNDAP')
else:
try:
import netCDF4
engine = 'netcdf4'
except ImportError: # pragma: no cover
try:
import scipy.io.netcdf
engine = 'scipy'
except ImportError:
raise ValueError('cannot read or write netCDF files without '
'netCDF4-python or scipy installed')
return engine
def _normalize_path(path):
if is_remote_uri(path):
return path
else:
return os.path.abspath(os.path.expanduser(path))
def _default_lock(filename, engine):
if filename.endswith('.gz'):
lock = False
else:
if engine is None:
engine = _get_default_engine(filename, allow_remote=True)
if engine == 'netcdf4':
if is_remote_uri(filename):
lock = False
else:
# TODO: identify netcdf3 files and don't use the global lock
# for them
lock = GLOBAL_LOCK
elif engine in {'h5netcdf', 'pynio'}:
lock = GLOBAL_LOCK
else:
lock = False
return lock
def _validate_dataset_names(dataset):
"""DataArray.name and Dataset keys must be a string or None"""
def check_name(name):
if isinstance(name, basestring):
if not name:
raise ValueError('Invalid name for DataArray or Dataset key: '
'string must be length 1 or greater for '
'serialization to netCDF files')
elif name is not None:
raise TypeError('DataArray.name or Dataset key must be either a '
'string or None for serialization to netCDF files')
for k in dataset.variables:
check_name(k)
def _validate_attrs(dataset):
"""`attrs` must have a string key and a value which is either: a number
a string, an ndarray or a list/tuple of numbers/strings.
"""
def check_attr(name, value):
if isinstance(name, basestring):
if not name:
raise ValueError('Invalid name for attr: string must be '
'length 1 or greater for serialization to '
'netCDF files')
else:
raise TypeError("Invalid name for attr: {} must be a string for "
"serialization to netCDF files".format(name))
if not isinstance(value, (basestring, Number, np.ndarray, np.number,
list, tuple)):
raise TypeError('Invalid value for attr: {} must be a number '
'string, ndarray or a list/tuple of '
'numbers/strings for serialization to netCDF '
'files'.format(value))
# Check attrs on the dataset itself
for k, v in dataset.attrs.items():
check_attr(k, v)
# Check attrs on each variable within the dataset
for variable in dataset.variables.values():
for k, v in variable.attrs.items():
check_attr(k, v)
def _protect_dataset_variables_inplace(dataset, cache):
for name, variable in dataset.variables.items():
if name not in variable.dims:
# no need to protect IndexVariable objects
data = indexing.CopyOnWriteArray(variable._data)
if cache:
data = indexing.MemoryCachedArray(data)
variable.data = data
def open_dataset(filename_or_obj, group=None, decode_cf=True,
mask_and_scale=True, decode_times=True, autoclose=False,
concat_characters=True, decode_coords=True, engine=None,
chunks=None, lock=None, cache=None, drop_variables=None):
"""Load and decode a dataset from a file or file-like object.
Parameters
----------
filename_or_obj : str, Path, file or xarray.backends.*DataStore
Strings and Path objects are interpreted as a path to a netCDF file
or an OpenDAP URL and opened with python-netCDF4, unless the filename
ends with .gz, in which case the file is gunzipped and opened with
scipy.io.netcdf (only netCDF3 supported). File-like objects are opened
with scipy.io.netcdf (only netCDF3 supported).
group : str, optional
Path to the netCDF4 group in the given file to open (only works for
netCDF4 files).
decode_cf : bool, optional
Whether to decode these variables, assuming they were saved according
to CF conventions.
mask_and_scale : bool, optional
If True, replace array values equal to `_FillValue` with NA and scale
values according to the formula `original_values * scale_factor +
add_offset`, where `_FillValue`, `scale_factor` and `add_offset` are
taken from variable attributes (if they exist). If the `_FillValue` or
`missing_value` attribute contains multiple values a warning will be
issued and all array values matching one of the multiple values will
be replaced by NA.
decode_times : bool, optional
If True, decode times encoded in the standard NetCDF datetime format
into datetime objects. Otherwise, leave them encoded as numbers.
autoclose : bool, optional
If True, automatically close files to avoid OS Error of too many files
being open. However, this option doesn't work with streams, e.g.,
BytesIO.
concat_characters : bool, optional
If True, concatenate along the last dimension of character arrays to
form string arrays. Dimensions will only be concatenated over (and
removed) if they have no corresponding variable and if they are only
used as the last dimension of character arrays.
decode_coords : bool, optional
If True, decode the 'coordinates' attribute to identify coordinates in
the resulting dataset.
engine : {'netcdf4', 'scipy', 'pydap', 'h5netcdf', 'pynio'}, optional
Engine to use when reading files. If not provided, the default engine
is chosen based on available dependencies, with a preference for
'netcdf4'.
chunks : int or dict, optional
If chunks is provided, it used to load the new dataset into dask
arrays. ``chunks={}`` loads the dataset with dask using a single
chunk for all arrays.
lock : False, True or threading.Lock, optional
If chunks is provided, this argument is passed on to
:py:func:`dask.array.from_array`. By default, a global lock is
used when reading data from netCDF files with the netcdf4 and h5netcdf
engines to avoid issues with concurrent access when using dask's
multithreaded backend.
cache : bool, optional
If True, cache data loaded from the underlying datastore in memory as
NumPy arrays when accessed to avoid reading from the underlying data-
store multiple times. Defaults to True unless you specify the `chunks`
argument to use dask, in which case it defaults to False. Does not
change the behavior of coordinates corresponding to dimensions, which
always load their data from disk into a ``pandas.Index``.
drop_variables: string or iterable, optional
A variable or list of variables to exclude from being parsed from the
dataset. This may be useful to drop variables with problems or
inconsistent values.
Returns
-------
dataset : Dataset
The newly created dataset.
See Also
--------
open_mfdataset
"""
if not decode_cf:
mask_and_scale = False
decode_times = False
concat_characters = False
decode_coords = False
if cache is None:
cache = chunks is None
def maybe_decode_store(store, lock=False):
ds = conventions.decode_cf(
store, mask_and_scale=mask_and_scale, decode_times=decode_times,
concat_characters=concat_characters, decode_coords=decode_coords,
drop_variables=drop_variables)
_protect_dataset_variables_inplace(ds, cache)
if chunks is not None:
from dask.base import tokenize
# if passed an actual file path, augment the token with
# the file modification time
if (isinstance(filename_or_obj, basestring) and
not is_remote_uri(filename_or_obj)):
mtime = os.path.getmtime(filename_or_obj)
else:
mtime = None
token = tokenize(filename_or_obj, mtime, group, decode_cf,
mask_and_scale, decode_times, concat_characters,
decode_coords, engine, chunks, drop_variables)
name_prefix = 'open_dataset-%s' % token
ds2 = ds.chunk(chunks, name_prefix=name_prefix, token=token,
lock=lock)
ds2._file_obj = ds._file_obj
else:
ds2 = ds
# protect so that dataset store isn't necessarily closed, e.g.,
# streams like BytesIO can't be reopened
# datastore backend is responsible for determining this capability
if store._autoclose:
store.close()
return ds2
if isinstance(filename_or_obj, path_type):
filename_or_obj = str(filename_or_obj)
if isinstance(filename_or_obj, backends.AbstractDataStore):
store = filename_or_obj
elif isinstance(filename_or_obj, basestring):
if (isinstance(filename_or_obj, bytes) and
filename_or_obj.startswith(b'\x89HDF')):
raise ValueError('cannot read netCDF4/HDF5 file images')
elif (isinstance(filename_or_obj, bytes) and
filename_or_obj.startswith(b'CDF')):
# netCDF3 file images are handled by scipy
pass
elif isinstance(filename_or_obj, basestring):
filename_or_obj = _normalize_path(filename_or_obj)
if filename_or_obj.endswith('.gz'):
if engine is not None and engine != 'scipy':
raise ValueError('can only read gzipped netCDF files with '
"default engine or engine='scipy'")
else:
engine = 'scipy'
if engine is None:
engine = _get_default_engine(filename_or_obj,
allow_remote=True)
if engine == 'netcdf4':
store = backends.NetCDF4DataStore.open(filename_or_obj,
group=group,
autoclose=autoclose)
elif engine == 'scipy':
store = backends.ScipyDataStore(filename_or_obj,
autoclose=autoclose)
elif engine == 'pydap':
store = backends.PydapDataStore.open(filename_or_obj)
elif engine == 'h5netcdf':
store = backends.H5NetCDFStore(filename_or_obj, group=group,
autoclose=autoclose)
elif engine == 'pynio':
store = backends.NioDataStore(filename_or_obj,
autoclose=autoclose)
else:
raise ValueError('unrecognized engine for open_dataset: %r'
% engine)
if lock is None:
lock = _default_lock(filename_or_obj, engine)
with close_on_error(store):
return maybe_decode_store(store, lock)
else:
if engine is not None and engine != 'scipy':
raise ValueError('can only read file-like objects with '
"default engine or engine='scipy'")
# assume filename_or_obj is a file-like object
store = backends.ScipyDataStore(filename_or_obj)
return maybe_decode_store(store)
def open_dataarray(*args, **kwargs):
"""Open an DataArray from a netCDF file containing a single data variable.
This is designed to read netCDF files with only one data variable. If
multiple variables are present then a ValueError is raised.
Parameters
----------
filename_or_obj : str, Path, file or xarray.backends.*DataStore
Strings and Paths are interpreted as a path to a netCDF file or an
OpenDAP URL and opened with python-netCDF4, unless the filename ends
with .gz, in which case the file is gunzipped and opened with
scipy.io.netcdf (only netCDF3 supported). File-like objects are opened
with scipy.io.netcdf (only netCDF3 supported).
group : str, optional
Path to the netCDF4 group in the given file to open (only works for
netCDF4 files).
decode_cf : bool, optional
Whether to decode these variables, assuming they were saved according
to CF conventions.
mask_and_scale : bool, optional
If True, replace array values equal to `_FillValue` with NA and scale
values according to the formula `original_values * scale_factor +
add_offset`, where `_FillValue`, `scale_factor` and `add_offset` are
taken from variable attributes (if they exist). If the `_FillValue` or
`missing_value` attribute contains multiple values a warning will be
issued and all array values matching one of the multiple values will
be replaced by NA.
decode_times : bool, optional
If True, decode times encoded in the standard NetCDF datetime format
into datetime objects. Otherwise, leave them encoded as numbers.
autoclose : bool, optional
If True, automatically close files to avoid OS Error of too many files
being open. However, this option doesn't work with streams, e.g.,
BytesIO.
concat_characters : bool, optional
If True, concatenate along the last dimension of character arrays to
form string arrays. Dimensions will only be concatenated over (and
removed) if they have no corresponding variable and if they are only
used as the last dimension of character arrays.
decode_coords : bool, optional
If True, decode the 'coordinates' attribute to identify coordinates in
the resulting dataset.
engine : {'netcdf4', 'scipy', 'pydap', 'h5netcdf', 'pynio'}, optional
Engine to use when reading files. If not provided, the default engine
is chosen based on available dependencies, with a preference for
'netcdf4'.
chunks : int or dict, optional
If chunks is provided, it used to load the new dataset into dask
arrays.
lock : False, True or threading.Lock, optional
If chunks is provided, this argument is passed on to
:py:func:`dask.array.from_array`. By default, a global lock is
used when reading data from netCDF files with the netcdf4 and h5netcdf
engines to avoid issues with concurrent access when using dask's
multithreaded backend.
cache : bool, optional
If True, cache data loaded from the underlying datastore in memory as
NumPy arrays when accessed to avoid reading from the underlying data-
store multiple times. Defaults to True unless you specify the `chunks`
argument to use dask, in which case it defaults to False. Does not
change the behavior of coordinates corresponding to dimensions, which
always load their data from disk into a ``pandas.Index``.
drop_variables: string or iterable, optional
A variable or list of variables to exclude from being parsed from the
dataset. This may be useful to drop variables with problems or
inconsistent values.
Notes
-----
This is designed to be fully compatible with `DataArray.to_netcdf`. Saving
using `DataArray.to_netcdf` and then loading with this function will
produce an identical result.
All parameters are passed directly to `xarray.open_dataset`. See that
documentation for further details.
See also
--------
open_dataset
"""
dataset = open_dataset(*args, **kwargs)
if len(dataset.data_vars) != 1:
raise ValueError('Given file dataset contains more than one data '
'variable. Please read with xarray.open_dataset and '
'then select the variable you want.')
else:
data_array, = dataset.data_vars.values()
data_array._file_obj = dataset._file_obj
# Reset names if they were changed during saving
# to ensure that we can 'roundtrip' perfectly
if DATAARRAY_NAME in dataset.attrs:
data_array.name = dataset.attrs[DATAARRAY_NAME]
del dataset.attrs[DATAARRAY_NAME]
if data_array.name == DATAARRAY_VARIABLE:
data_array.name = None
return data_array
class _MultiFileCloser(object):
def __init__(self, file_objs):
self.file_objs = file_objs
def close(self):
for f in self.file_objs:
f.close()
_CONCAT_DIM_DEFAULT = '__infer_concat_dim__'
def open_mfdataset(paths, chunks=None, concat_dim=_CONCAT_DIM_DEFAULT,
compat='no_conflicts', preprocess=None, engine=None,
lock=None, data_vars='all', coords='different', **kwargs):
"""Open multiple files as a single dataset.
Requires dask to be installed. Attributes from the first dataset file
are used for the combined dataset.
Parameters
----------
paths : str or sequence
Either a string glob in the form "path/to/my/files/*.nc" or an explicit
list of files to open. Paths can be given as strings or as pathlib
Paths.
chunks : int or dict, optional
Dictionary with keys given by dimension names and values given by chunk
sizes. In general, these should divide the dimensions of each dataset.
If int, chunk each dimension by ``chunks``.
By default, chunks will be chosen to load entire input files into
memory at once. This has a major impact on performance: please see the
full documentation for more details.
concat_dim : None, str, DataArray or Index, optional
Dimension to concatenate files along. This argument is passed on to
:py:func:`xarray.auto_combine` along with the dataset objects. You only
need to provide this argument if the dimension along which you want to
concatenate is not a dimension in the original datasets, e.g., if you
want to stack a collection of 2D arrays along a third dimension.
By default, xarray attempts to infer this argument by examining
component files. Set ``concat_dim=None`` explicitly to disable
concatenation.
compat : {'identical', 'equals', 'broadcast_equals',
'no_conflicts'}, optional
String indicating how to compare variables of the same name for
potential conflicts when merging:
- 'broadcast_equals': all values must be equal when variables are
broadcast against each other to ensure common dimensions.
- 'equals': all values and dimensions must be the same.
- 'identical': all values, dimensions and attributes must be the
same.
- 'no_conflicts': only values which are not null in both datasets
must be equal. The returned dataset then contains the combination
of all non-null values.
preprocess : callable, optional
If provided, call this function on each dataset prior to concatenation.
engine : {'netcdf4', 'scipy', 'pydap', 'h5netcdf', 'pynio'}, optional
Engine to use when reading files. If not provided, the default engine
is chosen based on available dependencies, with a preference for
'netcdf4'.
autoclose : bool, optional
If True, automatically close files to avoid OS Error of too many files
being open. However, this option doesn't work with streams, e.g.,
BytesIO.
lock : False, True or threading.Lock, optional
This argument is passed on to :py:func:`dask.array.from_array`. By
default, a per-variable lock is used when reading data from netCDF
files with the netcdf4 and h5netcdf engines to avoid issues with
concurrent access when using dask's multithreaded backend.
data_vars : {'minimal', 'different', 'all' or list of str}, optional
These data variables will be concatenated together:
* 'minimal': Only data variables in which the dimension already
appears are included.
* 'different': Data variables which are not equal (ignoring
attributes) across all datasets are also concatenated (as well as
all for which dimension already appears). Beware: this option may
load the data payload of data variables into memory if they are not
already loaded.
* 'all': All data variables will be concatenated.
* list of str: The listed data variables will be concatenated, in
addition to the 'minimal' data variables.
coords : {'minimal', 'different', 'all' o list of str}, optional
These coordinate variables will be concatenated together:
* 'minimal': Only coordinates in which the dimension already appears
are included.
* 'different': Coordinates which are not equal (ignoring attributes)
across all datasets are also concatenated (as well as all for which
dimension already appears). Beware: this option may load the data
payload of coordinate variables into memory if they are not already
loaded.
* 'all': All coordinate variables will be concatenated, except
those corresponding to other dimensions.
* list of str: The listed coordinate variables will be concatenated,
in addition the 'minimal' coordinates.
**kwargs : optional
Additional arguments passed on to :py:func:`xarray.open_dataset`.
Returns
-------
xarray.Dataset
See Also
--------
auto_combine
open_dataset
"""
if isinstance(paths, basestring):
paths = sorted(glob(paths))
else:
paths = [str(p) if isinstance(p, path_type) else p for p in paths]
if not paths:
raise IOError('no files to open')
if lock is None:
lock = _default_lock(paths[0], engine)
datasets = [open_dataset(p, engine=engine, chunks=chunks or {}, lock=lock,
**kwargs) for p in paths]
file_objs = [ds._file_obj for ds in datasets]
if preprocess is not None:
datasets = [preprocess(ds) for ds in datasets]
# close datasets in case of a ValueError
try:
if concat_dim is _CONCAT_DIM_DEFAULT:
combined = auto_combine(datasets, compat=compat,
data_vars=data_vars, coords=coords)
else:
combined = auto_combine(datasets, concat_dim=concat_dim,
compat=compat,
data_vars=data_vars, coords=coords)
except ValueError:
for ds in datasets:
ds.close()
raise
combined._file_obj = _MultiFileCloser(file_objs)
combined.attrs = datasets[0].attrs
return combined
WRITEABLE_STORES = {'netcdf4': backends.NetCDF4DataStore.open,
'scipy': backends.ScipyDataStore,
'h5netcdf': backends.H5NetCDFStore}
def to_netcdf(dataset, path_or_file=None, mode='w', format=None, group=None,
engine=None, writer=None, encoding=None, unlimited_dims=None):
"""This function creates an appropriate datastore for writing a dataset to
disk as a netCDF file
See `Dataset.to_netcdf` for full API docs.
The ``writer`` argument is only for the private use of save_mfdataset.
"""
if isinstance(path_or_file, path_type):
path_or_file = str(path_or_file)
if encoding is None:
encoding = {}
if path_or_file is None:
if engine is None:
engine = 'scipy'
elif engine != 'scipy':
raise ValueError('invalid engine for creating bytes with '
'to_netcdf: %r. Only the default engine '
"or engine='scipy' is supported" % engine)
elif isinstance(path_or_file, basestring):
if engine is None:
engine = _get_default_engine(path_or_file)
path_or_file = _normalize_path(path_or_file)
else: # file-like object
engine = 'scipy'
# validate Dataset keys, DataArray names, and attr keys/values
_validate_dataset_names(dataset)
_validate_attrs(dataset)
try:
store_open = WRITEABLE_STORES[engine]
except KeyError:
raise ValueError('unrecognized engine for to_netcdf: %r' % engine)
if format is not None:
format = format.upper()
# if a writer is provided, store asynchronously
sync = writer is None
target = path_or_file if path_or_file is not None else BytesIO()
store = store_open(target, mode, format, group, writer)
if unlimited_dims is None:
unlimited_dims = dataset.encoding.get('unlimited_dims', None)
try:
dataset.dump_to_store(store, sync=sync, encoding=encoding,
unlimited_dims=unlimited_dims)
if path_or_file is None:
return target.getvalue()
finally:
if sync and isinstance(path_or_file, basestring):
store.close()
if not sync:
return store
def save_mfdataset(datasets, paths, mode='w', format=None, groups=None,
engine=None):
"""Write multiple datasets to disk as netCDF files simultaneously.
This function is intended for use with datasets consisting of dask.array
objects, in which case it can write the multiple datasets to disk
simultaneously using a shared thread pool.
When not using dask, it is no different than calling ``to_netcdf``
repeatedly.
Parameters
----------
datasets : list of xarray.Dataset
List of datasets to save.
paths : list of str or list of Paths
List of paths to which to save each corresponding dataset.
mode : {'w', 'a'}, optional
Write ('w') or append ('a') mode. If mode='w', any existing file at
these locations will be overwritten.
format : {'NETCDF4', 'NETCDF4_CLASSIC', 'NETCDF3_64BIT',
'NETCDF3_CLASSIC'}, optional
File format for the resulting netCDF file:
* NETCDF4: Data is stored in an HDF5 file, using netCDF4 API
features.
* NETCDF4_CLASSIC: Data is stored in an HDF5 file, using only
netCDF 3 compatible API features.
* NETCDF3_64BIT: 64-bit offset version of the netCDF 3 file format,
which fully supports 2+ GB files, but is only compatible with
clients linked against netCDF version 3.6.0 or later.
* NETCDF3_CLASSIC: The classic netCDF 3 file format. It does not
handle 2+ GB files very well.
All formats are supported by the netCDF4-python library.
scipy.io.netcdf only supports the last two formats.
The default format is NETCDF4 if you are saving a file to disk and
have the netCDF4-python library available. Otherwise, xarray falls
back to using scipy to write netCDF files and defaults to the
NETCDF3_64BIT format (scipy does not support netCDF4).
groups : list of str, optional
Paths to the netCDF4 group in each corresponding file to which to save
datasets (only works for format='NETCDF4'). The groups will be created
if necessary.
engine : {'netcdf4', 'scipy', 'h5netcdf'}, optional
Engine to use when writing netCDF files. If not provided, the
default engine is chosen based on available dependencies, with a
preference for 'netcdf4' if writing to a file on disk.
Examples
--------
Save a dataset into one netCDF per year of data:
>>> years, datasets = zip(*ds.groupby('time.year'))
>>> paths = ['%s.nc' % y for y in years]
>>> xr.save_mfdataset(datasets, paths)
"""
if mode == 'w' and len(set(paths)) < len(paths):
raise ValueError("cannot use mode='w' when writing multiple "
'datasets to the same path')
for obj in datasets:
if not isinstance(obj, Dataset):
raise TypeError('save_mfdataset only supports writing Dataset '
'objects, recieved type %s' % type(obj))
if groups is None:
groups = [None] * len(datasets)
if len(set([len(datasets), len(paths), len(groups)])) > 1:
raise ValueError('must supply lists of the same length for the '
'datasets, paths and groups arguments to '
'save_mfdataset')
writer = ArrayWriter()
stores = [to_netcdf(ds, path, mode, format, group, engine, writer)
for ds, path, group in zip(datasets, paths, groups)]
try:
writer.sync()
for store in stores:
store.sync()
finally:
for store in stores:
store.close()
| |
import sys, os, imp, platform, shutil, subprocess, argparse
argParser = argparse.ArgumentParser(prog="make_kit.py", description="Build CnC runtime and API kit")
argParser.add_argument('-a', '--arch', default='intel64', help="Processor architecture(s) to build for")
argParser.add_argument('-r', '--release', default="current", help="release number")
argParser.add_argument('-t', '--travis', default=False, action='store_true', help="Run in Travis mode (implies --nodebug --itac='' --mpi=/usr)")
argParser.add_argument('-p', '--product', default=False, action='store_true', help="Build a release/product package (implies -i --mic --sdl and not -d -t --nodebug)")
argParser.add_argument('-d', '--devbuild', default=False, action='store_true', help="Build from an unclean development branch (implies -r=current)")
argParser.add_argument('-k', '--keep', default=False, action='store_true', help="Keep existing (partial) builds")
argParser.add_argument('-v', '--verbose', default=False, action='store_true', help="Verbose builds")
argParser.add_argument('-i', '--installer', default=False, action='store_true', help="Build an installer")
argParser.add_argument('--phi', default=False, action='store_true', help="Build libs for Xeon(R) Phi")
argParser.add_argument('--nodebug', default=False, action='store_true', help="Don't build debug libs")
argParser.add_argument('--tbb', default=os.getenv('TBBROOT', '/usr'), help="TBB root directory")
argParser.add_argument('--mpi', default=os.getenv('I_MPI_ROOT', '/usr'), help="MPI root directory")
argParser.add_argument('--itac', default=os.getenv('VT_ROOT', 'NONE'), help="ITAC root directory")
argParser.add_argument('--msvs', default='12', help="Version(s) of MS Visual Studio to use/build for (MS Windows only)")
argParser.add_argument('--zip', default=False, action='store_true', help="also create password protected zip archive (requires -i)")
argParser.add_argument('--sdl', default=False, action='store_true', help="Add SDL flags (security).")
args = argParser.parse_args()
release = args.release
travis = args.travis
product = args.product
devbuild = args.devbuild
keepbuild = args.keep
verbose = args.verbose
installer = args.installer
nodbg = args.nodebug
tbbroot = args.tbb
mpiroot = args.mpi
itacroot = args.itac
vs = args.msvs
phi = args.phi
PARCHS = args.arch.split()
sdl = args.sdl
if travis == True:
release = "current"
nodbg = True
mpiroot = '/usr'
itacroot = 'NONE'
pf = platform.system()
if product == True:
nodbg = False
installer = True
devbuild = False
travis = False
phi = True
PARCHS = ['intel64']
release = '1.0.100' # hm, need to update this automatically?
if pf == 'Windows':
tbbroot = "C:\\tbb42_20140122oss" #C:\\tbb41_20121003oss"
vs = '12 11'
else:
tbbroot = "/nfs/hd/disks/tpi0/vssad3/proj/CnC/intel/tbb42_20140122oss"
PARCHS += ['mic']
if itacroot == 'NONE' or mpiroot == 'NONE':
print('Need itacroot and mpiroot for product build')
sys.exit(44)
# dir where the kit gets installed into
kitdir = 'kit.pkg'
# the specific release gets installed into here
reldir = os.path.join(kitdir, 'cnc', release)
##############################################################
##############################################################
# End of config section
##############################################################
##############################################################
if pf == 'Windows':
VSS = vs.split()
else:
VSS = ['']
BUILDS = ['Release']
if nodbg == False:
BUILDS += ['Debug']
def exe_cmd( cmd ):
print(cmd)
if isinstance(cmd, str):
retv = os.system(cmd)
else:
retv = subprocess.call( cmd )
if not retv == 0:
print("error code: " + str(retv))
exit(444)
##############################################################
# check if our source tree is clean
output = subprocess.check_output(["git", "status", "-uall", "--porcelain", "cnc", "samples"])
output += subprocess.check_output(["git", "status", "-uno", "--porcelain", "src"])
if output:
print('\ngit status not clean')
if devbuild == False:
print(output)
sys.exit(43)
##############################################################
# clean existing builds if requested
if keepbuild == False:
shutil.rmtree(kitdir, True)
##############################################################
# cmake command line: the args which are shared on all platforms
cmake_args_core = ['-DTBBROOT=' + tbbroot, '-DCMAKE_INSTALL_PREFIX=' + os.path.join('..', reldir)]
if not mpiroot == 'NONE':
cmake_args_core += ['-DBUILD_LIBS_FOR_MPI=TRUE', '-DMPIROOT='+mpiroot]
else:
cmake_args_core += ['-DBUILD_LIBS_FOR_MPI=FALSE']
if not itacroot == 'NONE':
cmake_args_core += ['-DBUILD_LIBS_FOR_ITAC=TRUE', '-DITACROOT='+itacroot]
else:
cmake_args_core += ['-DBUILD_LIBS_FOR_ITAC=FALSE']
if travis == True or devbuild == True:
cmake_args_core += ['-DCMAKE_CXX_FLAGS=-DCNC_REQUIRED_TBB_VERSION=6101']
if product == True:
cmake_args_core += ['-DCNC_PRODUCT_BUILD=TRUE']
else:
cmake_args_core += ['-DCNC_PRODUCT_BUILD=FALSE']
if verbose == True:
cmake_args_core += ['-DCMAKE_VERBOSE_MAKEFILE=TRUE']
else:
cmake_args_core += ['-DCMAKE_VERBOSE_MAKEFILE=FALSE']
if sdl == True:
cmake_args_core += ['-DSDL=TRUE']
else:
cmake_args_core += ['-DSDL=FALSE']
cmake_args_core += ['..']
##############################################################
# build all libs and install headers and examples etc into reldir
for vs in VSS:
for arch in PARCHS:
if arch == 'mic':
cmake_args_arch = ['-DCMAKE_CXX_COMPILER=icpc', '-DCMAKE_C_COMPILER=icc', '-DPARCH=' + arch]
else:
cmake_args_arch = ['-DPARCH=' + arch]
for rel in BUILDS:
print('Building ' + vs + ' ' + arch + ' ' + rel)
builddir = 'kit.' + rel + '.' + arch
if not vs == '':
builddir += '.' + vs
if keepbuild == False:
shutil.rmtree(builddir, True )
if os.path.isdir(builddir) == False:
os.mkdir(builddir)
cmake_args = ['-DCMAKE_BUILD_TYPE=' + rel] + cmake_args_arch + cmake_args_core
os.chdir(builddir)
if pf == 'Windows':
exe_cmd( ['c:/Program Files (x86)/Microsoft Visual Studio ' + vs + '.0/VC/vcvarsall.bat', 'x64',
'&&', 'cmake', '-G', 'NMake Makefiles'] + cmake_args + ['&&', 'nmake', 'install'] )
else:
exe_cmd(['cmake'] + cmake_args)
exe_cmd(['make', '-j', '16', 'install'])
os.chdir('..')
##############################################################
# now copy license and docu
tbbver = os.path.basename( tbbroot )
pwd = os.getcwd()
docdir = os.path.join(reldir, 'doc')
pagesdir = 'icnc.github.io'
os.chdir('..')
if os.path.isdir(pagesdir) == False:
exe_cmd(("git clone --depth=1 https://github.com/icnc/"+pagesdir).split())
else:
os.chdir(pagesdir)
exe_cmd(['git', 'pull'])
os.chdir('..')
os.chdir(pwd)
orgdir = os.path.join('..', pagesdir)
shutil.copy(os.path.join(orgdir, 'LICENSE'), reldir)
shutil.copy(os.path.join(orgdir, 'README.md'), os.path.join(reldir, 'README'))
if os.path.isdir(docdir) == False:
os.mkdir(docdir)
for doc in ['FAQ.html', 'Release_Notes.html', 'Getting_Started.html', 'CnC_eight_patterns.pdf']:
shutil.copy(os.path.join(orgdir, doc), docdir)
##############################################################
# finally sanitize files and create installer
if installer == True:
if pf == 'Windows':
for withTBB in [True, False]:
aip = 'cnc_installer_' + arch + '.aip'
shutil.copy(os.path.join('pkg', aip), kitdir)
shutil.copy(os.path.join('pkg', 'LICENSE.rtf'), kitdir)
pkgstub = 'cnc_b_' + release + '_' + arch + ('_'+tbbver if withTBB == True else '')
tbbenv = ''
if withTBB == True:
tbbenv = ( 'AddFolder ' + os.path.join('APPDIR', tbbver) + ' ' + os.path.join(tbbroot, 'include') + '\n'
'AddFolder ' + os.path.join('APPDIR', tbbver, 'lib') + ' ' + os.path.join(tbbroot, 'lib', arch ) + '\n'
'AddFolder ' + os.path.join('APPDIR', tbbver, 'bin') + ' ' + os.path.join(tbbroot, 'bin', arch ) + '\n'
'AddFile ' + os.path.join('APPDIR', tbbver, 'bin') + ' ' + os.path.join(tbbroot, 'bin', 'tbbvars.bat') + '\n'
'AddFile ' + os.path.join('APPDIR', tbbver) + ' ' + os.path.join(tbbroot, 'README') + '\n'
'AddFile ' + os.path.join('APPDIR', tbbver) + ' ' + os.path.join(tbbroot, 'COPYING') + '\n'
'AddFile ' + os.path.join('APPDIR', tbbver) + ' ' + os.path.join(tbbroot, 'CHANGES') + '\n'
'NewEnvironment -name TBBROOT -value [APPDIR]\\' + tbbver + (' -install_operation CreateUpdate -behavior Replace\n' if withTBB == True else '\n') )
aic = os.path.join(kitdir, pkgstub + '.aic')
inf = open( os.path.join('pkg', 'edit_aip.aic.tmpl'))
l = inf.read();
inf.close()
outf = open( aic, 'w' )
outf.write( l.format( KITDIR=kitdir, CNCVER=release, ARCH=arch, PKGNAME=os.path.abspath(os.path.join(kitdir, 'w_'+pkgstub+'.msi')), TBBENV=tbbenv ) )
outf.close()
exe_cmd( os.path.normpath( r'"C:/Program Files (x86)/Caphyon/Advanced Installer 11.2/bin/x86/AdvancedInstaller.com"')
+ ' /execute ' + os.path.join(kitdir, aip) + ' ' + aic )
else:
bindir = os.path.join(reldir, 'bin')
if os.path.isdir(bindir) == False:
os.mkdir(bindir)
shutil.copy(os.path.join('pkg', 'cncvars.csh'), bindir)
shutil.copy(os.path.join('pkg', 'cncvars.sh'), bindir)
exe_cmd('chmod 644 `find ' + reldir + ' -type f`')
exe_cmd('chmod 755 `find ' + reldir + ' -name \*sh`')
exe_cmd('dos2unix -q `find ' + reldir + ' -name \*.h`')
exe_cmd('dos2unix -q `find ' + reldir + ' -name \*sh` `find ' + reldir + ' -name \*txt`')
exe_cmd('dos2unix -q `find ' + reldir + ' -name \*cpp`')
os.chdir(reldir)
exe_cmd('tar cfj - * | gpg --batch -c --passphrase "I accept the EULA" > ../cnc_b_' + release + '_install.files')
os.chdir(pwd)
cncdir = os.path.join(kitdir, 'cnc/')
shutil.copy(os.path.join('pkg', 'install.sh'), cncdir)
exe_cmd(['sed', '-i', 's/$TBBVER/' + tbbver + '/g ; s/$CNCVER/' + release + '/g', os.path.join(cncdir, 'install.sh')])
shutil.copy('LICENSE', cncdir)
shutil.copy(os.path.join('pkg', 'INSTALL.txt'), cncdir)
os.chdir(cncdir)
if keepbuild == False:
shutil.rmtree(tbbver, True )
if os.path.isdir(tbbver) == False:
os.mkdir(tbbver)
if os.path.isdir(os.path.join(tbbver, 'bin')) == False:
os.mkdir(os.path.join(tbbver, 'bin'))
if os.path.isdir(os.path.join(tbbver, 'lib')) == False:
os.mkdir(os.path.join(tbbver, 'lib'))
exe_cmd(['cp', '-r', os.path.join(tbbroot, 'include'), tbbver + '/'])
exe_cmd(['cp', '-r', os.path.join(tbbroot, 'lib/intel64'), os.path.join(tbbver, 'lib/')])
if phi:
exe_cmd(['cp', '-r', os.path.join(tbbroot, 'lib/mic'), os.path.join(tbbver, 'lib/')])
exe_cmd('cp ' + os.path.join(tbbroot, 'bin/tbbvars.*h') + ' ' + os.path.join(tbbver, 'bin/'))
exe_cmd(['cp', os.path.join(tbbroot, 'README'), os.path.join(tbbroot, 'CHANGES'), os.path.join(tbbroot, 'COPYING'), tbbver + '/'])
exe_cmd(['tar', 'cfj', tbbver + '_cnc_files.tbz', tbbver + '/'])
os.chdir('..')
exe_cmd(['tar', 'cfvz', 'l_cnc_b_' + release + '.tgz',
os.path.join('cnc', 'install.sh'),
os.path.join('cnc', 'INSTALL.txt'),
os.path.join('cnc', 'LICENSE'),
os.path.join('cnc', 'cnc_b_' + release + '_install.files'),
os.path.join('cnc', tbbver + '_cnc_files.tbz')])
if args.zip:
exe_cmd(['zip', '-rP', 'cnc', 'l_cnc_b_' + release + '.zip',
os.path.join('cnc', 'install.sh'),
os.path.join('cnc', 'INSTALL.txt'),
os.path.join('cnc', 'LICENSE'),
os.path.join('cnc', 'cnc_b_' + release + '_install.files'),
os.path.join('cnc', tbbver + '_cnc_files.tbz')])
| |
"""
Title: MelGAN-based spectrogram inversion using feature matching
Author: [Darshan Deshpande](https://twitter.com/getdarshan)
Date created: 02/09/2021
Last modified: 15/09/2021
Description: Inversion of audio from mel-spectrograms using the MelGAN architecture and feature matching.
"""
"""
## Introduction
Autoregressive vocoders have been ubiquitous for a majority of the history of speech processing,
but for most of their existence they have lacked parallelism.
[MelGAN](https://arxiv.org/pdf/1910.06711v3.pdf) is a
non-autoregressive, fully convolutional vocoder architecture used for purposes ranging
from spectral inversion and speech enhancement to present-day state-of-the-art
speech synthesis when used as a decoder
with models like Tacotron2 or FastSpeech that convert text to mel spectrograms.
In this tutorial, we will have a look at the MelGAN architecture and how it can achieve
fast spectral inversion, i.e. conversion of spectrograms to audio waves. The MelGAN
implemented in this tutorial is similar to the original implementation with only the
difference of method of padding for convolutions where we will use 'same' instead of
reflect padding.
"""
"""
## Importing and Defining Hyperparameters
"""
"""shell
pip install -qqq tensorflow_addons
pip install -qqq tensorflow-io
"""
import tensorflow as tf
import tensorflow_io as tfio
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow_addons import layers as addon_layers
# Setting logger level to avoid input shape warnings
tf.get_logger().setLevel("ERROR")
# Defining hyperparameters
DESIRED_SAMPLES = 8192
LEARNING_RATE_GEN = 1e-5
LEARNING_RATE_DISC = 1e-6
BATCH_SIZE = 16
mse = keras.losses.MeanSquaredError()
mae = keras.losses.MeanAbsoluteError()
"""
## Loading the Dataset
This example uses the [LJSpeech dataset](https://keithito.com/LJ-Speech-Dataset/).
The LJSpeech dataset is primarily used for text-to-speech and consists of 13,100 discrete
speech samples taken from 7 non-fiction books, having a total length of approximately 24
hours. The MelGAN training is only concerned with the audio waves so we process only the
WAV files and ignore the audio annotations.
"""
"""shell
wget https://data.keithito.com/data/speech/LJSpeech-1.1.tar.bz2
tar -xf /content/LJSpeech-1.1.tar.bz2
"""
"""
We create a `tf.data.Dataset` to load and process the audio files on the fly.
The `preprocess()` function takes the file path as input and returns two instances of the
wave, one for input and one as the ground truth for comparsion. The input wave will be
mapped to a spectrogram using the custom `MelSpec` layer as shown later in this example.
"""
# Splitting the dataset into training and testing splits
wavs = tf.io.gfile.glob("LJSpeech-1.1/wavs/*.wav")
print(f"Number of audio files: {len(wavs)}")
# Mapper function for loading the audio. This function returns two instances of the wave
def preprocess(filename):
audio = tf.audio.decode_wav(tf.io.read_file(filename), 1, DESIRED_SAMPLES).audio
return audio, audio
# Create tf.data.Dataset objects and apply preprocessing
train_dataset = tf.data.Dataset.from_tensor_slices((wavs,))
train_dataset = train_dataset.map(preprocess, num_parallel_calls=tf.data.AUTOTUNE)
"""
## Defining custom layers for MelGAN
The MelGAN architecture consists of 3 main modules:
1. The residual block
2. Dilated convolutional block
3. Discriminator block

"""
"""
Since the network takes a mel-spectrogram as input, we will create an additional custom
layer
which can convert the raw audio wave to a spectrogram on-the-fly. We use the raw audio
tensor from `train_dataset` and map it to a mel-spectrogram using the `MelSpec` layer
below.
"""
# Custom keras layer for on-the-fly audio to spectrogram conversion
class MelSpec(layers.Layer):
def __init__(
self,
frame_length=1024,
frame_step=256,
fft_length=None,
sampling_rate=22050,
num_mel_channels=80,
freq_min=125,
freq_max=7600,
**kwargs,
):
super().__init__(**kwargs)
self.frame_length = frame_length
self.frame_step = frame_step
self.fft_length = fft_length
self.sampling_rate = sampling_rate
self.num_mel_channels = num_mel_channels
self.freq_min = freq_min
self.freq_max = freq_max
# Defining mel filter. This filter will be multiplied with the STFT output
self.mel_filterbank = tf.signal.linear_to_mel_weight_matrix(
num_mel_bins=self.num_mel_channels,
num_spectrogram_bins=self.frame_length // 2 + 1,
sample_rate=self.sampling_rate,
lower_edge_hertz=self.freq_min,
upper_edge_hertz=self.freq_max,
)
def call(self, audio, training=True):
# We will only perform the transformation during training.
if training:
# Taking the Short Time Fourier Transform. Ensure that the audio is padded.
# In the paper, the STFT output is padded using the 'REFLECT' strategy.
stft = tf.signal.stft(
tf.squeeze(audio, -1),
self.frame_length,
self.frame_step,
self.fft_length,
pad_end=True,
)
# Taking the magnitude of the STFT output
magnitude = tf.abs(stft)
# Multiplying the Mel-filterbank with the magnitude and scaling it using the db scale
mel = tf.matmul(tf.square(magnitude), self.mel_filterbank)
log_mel_spec = tfio.audio.dbscale(mel, top_db=80)
return log_mel_spec
else:
return audio
def get_config(self):
config = super(MelSpec, self).get_config()
config.update(
{
"frame_length": self.frame_length,
"frame_step": self.frame_step,
"fft_length": self.fft_length,
"sampling_rate": self.sampling_rate,
"num_mel_channels": self.num_mel_channels,
"freq_min": self.freq_min,
"freq_max": self.freq_max,
}
)
return config
"""
The residual convolutional block extensively uses dilations and has a total receptive
field of 27 timesteps per block. The dilations must grow as a power of the `kernel_size`
to ensure reduction of hissing noise in the output. The network proposed by the paper is
as follows:

"""
# Creating the residual stack block
def residual_stack(input, filters):
"""Convolutional residual stack with weight normalization.
Args:
filter: int, determines filter size for the residual stack.
Returns:
Residual stack output.
"""
c1 = addon_layers.WeightNormalization(
layers.Conv1D(filters, 3, dilation_rate=1, padding="same"), data_init=False
)(input)
lrelu1 = layers.LeakyReLU()(c1)
c2 = addon_layers.WeightNormalization(
layers.Conv1D(filters, 3, dilation_rate=1, padding="same"), data_init=False
)(lrelu1)
add1 = layers.Add()([c2, input])
lrelu2 = layers.LeakyReLU()(add1)
c3 = addon_layers.WeightNormalization(
layers.Conv1D(filters, 3, dilation_rate=3, padding="same"), data_init=False
)(lrelu2)
lrelu3 = layers.LeakyReLU()(c3)
c4 = addon_layers.WeightNormalization(
layers.Conv1D(filters, 3, dilation_rate=1, padding="same"), data_init=False
)(lrelu3)
add2 = layers.Add()([add1, c4])
lrelu4 = layers.LeakyReLU()(add2)
c5 = addon_layers.WeightNormalization(
layers.Conv1D(filters, 3, dilation_rate=9, padding="same"), data_init=False
)(lrelu4)
lrelu5 = layers.LeakyReLU()(c5)
c6 = addon_layers.WeightNormalization(
layers.Conv1D(filters, 3, dilation_rate=1, padding="same"), data_init=False
)(lrelu5)
add3 = layers.Add()([c6, add2])
return add3
"""
Each convolutional block uses the dilations offered by the residual stack
and upsamples the input data by the `upsampling_factor`.
"""
# Dilated convolutional block consisting of the Residual stack
def conv_block(input, conv_dim, upsampling_factor):
"""Dilated Convolutional Block with weight normalization.
Args:
conv_dim: int, determines filter size for the block.
upsampling_factor: int, scale for upsampling.
Returns:
Dilated convolution block.
"""
conv_t = addon_layers.WeightNormalization(
layers.Conv1DTranspose(conv_dim, 16, upsampling_factor, padding="same"),
data_init=False,
)(input)
lrelu1 = layers.LeakyReLU()(conv_t)
res_stack = residual_stack(lrelu1, conv_dim)
lrelu2 = layers.LeakyReLU()(res_stack)
return lrelu2
"""
The discriminator block consists of convolutions and downsampling layers. This block is
essential for the implementation of the feature matching technique.
Each discriminator outputs a list of feature maps that will be compared during training
to compute the feature matching loss.
"""
def discriminator_block(input):
conv1 = addon_layers.WeightNormalization(
layers.Conv1D(16, 15, 1, "same"), data_init=False
)(input)
lrelu1 = layers.LeakyReLU()(conv1)
conv2 = addon_layers.WeightNormalization(
layers.Conv1D(64, 41, 4, "same", groups=4), data_init=False
)(lrelu1)
lrelu2 = layers.LeakyReLU()(conv2)
conv3 = addon_layers.WeightNormalization(
layers.Conv1D(256, 41, 4, "same", groups=16), data_init=False
)(lrelu2)
lrelu3 = layers.LeakyReLU()(conv3)
conv4 = addon_layers.WeightNormalization(
layers.Conv1D(1024, 41, 4, "same", groups=64), data_init=False
)(lrelu3)
lrelu4 = layers.LeakyReLU()(conv4)
conv5 = addon_layers.WeightNormalization(
layers.Conv1D(1024, 41, 4, "same", groups=256), data_init=False
)(lrelu4)
lrelu5 = layers.LeakyReLU()(conv5)
conv6 = addon_layers.WeightNormalization(
layers.Conv1D(1024, 5, 1, "same"), data_init=False
)(lrelu5)
lrelu6 = layers.LeakyReLU()(conv6)
conv7 = addon_layers.WeightNormalization(
layers.Conv1D(1, 3, 1, "same"), data_init=False
)(lrelu6)
return [lrelu1, lrelu2, lrelu3, lrelu4, lrelu5, lrelu6, conv7]
"""
### Create the generator
"""
def create_generator(input_shape):
inp = keras.Input(input_shape)
x = MelSpec()(inp)
x = layers.Conv1D(512, 7, padding="same")(x)
x = layers.LeakyReLU()(x)
x = conv_block(x, 256, 8)
x = conv_block(x, 128, 8)
x = conv_block(x, 64, 2)
x = conv_block(x, 32, 2)
x = addon_layers.WeightNormalization(
layers.Conv1D(1, 7, padding="same", activation="tanh")
)(x)
return keras.Model(inp, x)
# We use a dynamic input shape for the generator since the model is fully convolutional
generator = create_generator((None, 1))
generator.summary()
"""
### Create the discriminator
"""
def create_discriminator(input_shape):
inp = keras.Input(input_shape)
out_map1 = discriminator_block(inp)
pool1 = layers.AveragePooling1D()(inp)
out_map2 = discriminator_block(pool1)
pool2 = layers.AveragePooling1D()(pool1)
out_map3 = discriminator_block(pool2)
return keras.Model(inp, [out_map1, out_map2, out_map3])
# We use a dynamic input shape for the discriminator
# This is done because the input shape for the generator is unknown
discriminator = create_discriminator((None, 1))
discriminator.summary()
"""
## Defining the loss functions
**Generator Loss**
The generator architecture uses a combination of two losses
1. Mean Squared Error:
This is the standard MSE generator loss calculated between ones and the outputs from the
discriminator with _N_ layers.
<p align="center">
<img src="https://i.imgur.com/dz4JS3I.png" width=300px;></img>
</p>
2. Feature Matching Loss:
This loss involves extracting the outputs of every layer from the discriminator for both
the generator and ground truth and compare each layer output _k_ using Mean Absolute Error.
<p align="center">
<img src="https://i.imgur.com/gEpSBar.png" width=400px;></img>
</p>
**Discriminator Loss**
The discriminator uses the Mean Absolute Error and compares the real data predictions
with ones and generated predictions with zeros.
<p align="center">
<img src="https://i.imgur.com/bbEnJ3t.png" width=425px;></img>
</p>
"""
# Generator loss
def generator_loss(real_pred, fake_pred):
"""Loss function for the generator.
Args:
real_pred: Tensor, output of the ground truth wave passed through the discriminator.
fake_pred: Tensor, output of the generator prediction passed through the discriminator.
Returns:
Loss for the generator.
"""
gen_loss = []
for i in range(len(fake_pred)):
gen_loss.append(mse(tf.ones_like(fake_pred[i][-1]), fake_pred[i][-1]))
return tf.reduce_mean(gen_loss)
def feature_matching_loss(real_pred, fake_pred):
"""Implements the feature matching loss.
Args:
real_pred: Tensor, output of the ground truth wave passed through the discriminator.
fake_pred: Tensor, output of the generator prediction passed through the discriminator.
Returns:
Feature Matching Loss.
"""
fm_loss = []
for i in range(len(fake_pred)):
for j in range(len(fake_pred[i]) - 1):
fm_loss.append(mae(real_pred[i][j], fake_pred[i][j]))
return tf.reduce_mean(fm_loss)
def discriminator_loss(real_pred, fake_pred):
"""Implements the discriminator loss.
Args:
real_pred: Tensor, output of the ground truth wave passed through the discriminator.
fake_pred: Tensor, output of the generator prediction passed through the discriminator.
Returns:
Discriminator Loss.
"""
real_loss, fake_loss = [], []
for i in range(len(real_pred)):
real_loss.append(mse(tf.ones_like(real_pred[i][-1]), real_pred[i][-1]))
fake_loss.append(mse(tf.zeros_like(fake_pred[i][-1]), fake_pred[i][-1]))
# Calculating the final discriminator loss after scaling
disc_loss = tf.reduce_mean(real_loss) + tf.reduce_mean(fake_loss)
return disc_loss
"""
Defining the MelGAN model for training.
This subclass overrides the `train_step()` method to implement the training logic.
"""
class MelGAN(keras.Model):
def __init__(self, generator, discriminator, **kwargs):
"""MelGAN trainer class
Args:
generator: keras.Model, Generator model
discriminator: keras.Model, Discriminator model
"""
super().__init__(**kwargs)
self.generator = generator
self.discriminator = discriminator
def compile(
self,
gen_optimizer,
disc_optimizer,
generator_loss,
feature_matching_loss,
discriminator_loss,
):
"""MelGAN compile method.
Args:
gen_optimizer: keras.optimizer, optimizer to be used for training
disc_optimizer: keras.optimizer, optimizer to be used for training
generator_loss: callable, loss function for generator
feature_matching_loss: callable, loss function for feature matching
discriminator_loss: callable, loss function for discriminator
"""
super().compile()
# Optimizers
self.gen_optimizer = gen_optimizer
self.disc_optimizer = disc_optimizer
# Losses
self.generator_loss = generator_loss
self.feature_matching_loss = feature_matching_loss
self.discriminator_loss = discriminator_loss
# Trackers
self.gen_loss_tracker = keras.metrics.Mean(name="gen_loss")
self.disc_loss_tracker = keras.metrics.Mean(name="disc_loss")
def train_step(self, batch):
x_batch_train, y_batch_train = batch
with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape:
# Generating the audio wave
gen_audio_wave = generator(x_batch_train, training=True)
# Generating the features using the discriminator
fake_pred = discriminator(y_batch_train)
real_pred = discriminator(gen_audio_wave)
# Calculating the generator losses
gen_loss = generator_loss(real_pred, fake_pred)
fm_loss = feature_matching_loss(real_pred, fake_pred)
# Calculating final generator loss
gen_fm_loss = gen_loss + 10 * fm_loss
# Calculating the discriminator losses
disc_loss = discriminator_loss(real_pred, fake_pred)
# Calculating and applying the gradients for generator and discriminator
grads_gen = gen_tape.gradient(gen_fm_loss, generator.trainable_weights)
grads_disc = disc_tape.gradient(disc_loss, discriminator.trainable_weights)
gen_optimizer.apply_gradients(zip(grads_gen, generator.trainable_weights))
disc_optimizer.apply_gradients(zip(grads_disc, discriminator.trainable_weights))
self.gen_loss_tracker.update_state(gen_fm_loss)
self.disc_loss_tracker.update_state(disc_loss)
return {
"gen_loss": self.gen_loss_tracker.result(),
"disc_loss": self.disc_loss_tracker.result(),
}
"""
## Training
The paper suggests that the training with dynamic shapes takes around 400,000 steps (~500
epochs). For this example, we will run it only for a single epoch (819 steps).
Longer training time (greater than 300 epochs) will almost certainly provide better results.
"""
gen_optimizer = keras.optimizers.Adam(
LEARNING_RATE_GEN, beta_1=0.5, beta_2=0.9, clipnorm=1
)
disc_optimizer = keras.optimizers.Adam(
LEARNING_RATE_DISC, beta_1=0.5, beta_2=0.9, clipnorm=1
)
# Start training
generator = create_generator((None, 1))
discriminator = create_discriminator((None, 1))
mel_gan = MelGAN(generator, discriminator)
mel_gan.compile(
gen_optimizer,
disc_optimizer,
generator_loss,
feature_matching_loss,
discriminator_loss,
)
mel_gan.fit(
train_dataset.shuffle(200).batch(BATCH_SIZE).prefetch(tf.data.AUTOTUNE), epochs=1
)
"""
## Testing the model
The trained model can now be used for real time text-to-speech translation tasks.
To test how fast the MelGAN inference can be, let us take a sample audio mel-spectrogram
and convert it. Note that the actual model pipeline will not include the `MelSpec` layer
and hence this layer will be disabled during inference. The inference input will be a
mel-spectrogram processed similar to the `MelSpec` layer configuration.
For testing this, we will create a randomly uniformly distributed tensor to simulate the
behavior of the inference pipeline.
"""
# Sampling a random tensor to mimic a batch of 128 spectrograms of shape [50, 80]
audio_sample = tf.random.uniform([128, 50, 80])
"""
Timing the inference speed of a single sample. Running this, you can see that the average
inference time per spectrogram ranges from 8 milliseconds to 10 milliseconds on a K80 GPU which is
pretty fast.
"""
pred = generator.predict(audio_sample, batch_size=32, verbose=1)
"""
## Conclusion
The MelGAN is a highly effective architecture for spectral inversion that has a Mean
Opinion Score (MOS) of 3.61 that considerably outperforms the Griffin
Lim algorithm having a MOS of just 1.57. In contrast with this, the MelGAN compares with
the state-of-the-art WaveGlow and WaveNet architectures on text-to-speech and speech
enhancement tasks on
the LJSpeech and VCTK datasets <sup>[1]</sup>.
This tutorial highlights:
1. The advantages of using dilated convolutions that grow with the filter size
2. Implementation of a custom layer for on-the-fly conversion of audio waves to
mel-spectrograms
3. Effectiveness of using the feature matching loss function for training GAN generators.
Further reading
1. [MelGAN paper](https://arxiv.org/pdf/1910.06711v3.pdf) (Kundan Kumar et al.) to
understand the reasoning behind the architecture and training process
2. For in-depth understanding of the feature matching loss, you can refer to [Improved
Techniques for Training GANs](https://arxiv.org/pdf/1606.03498v1.pdf) (Tim Salimans et
al.).
"""
| |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
import sys
from pyspark.sql.functions import array, explode, col, lit, udf, sum, pandas_udf, PandasUDFType
from pyspark.sql.types import DoubleType, StructType, StructField, Row
from pyspark.testing.sqlutils import ReusedSQLTestCase, have_pandas, have_pyarrow, \
pandas_requirement_message, pyarrow_requirement_message
from pyspark.testing.utils import QuietTest
if have_pandas:
import pandas as pd
from pandas.util.testing import assert_frame_equal, assert_series_equal
if have_pyarrow:
import pyarrow as pa
@unittest.skipIf(
not have_pandas or not have_pyarrow,
pandas_requirement_message or pyarrow_requirement_message)
class CogroupedMapInPandasTests(ReusedSQLTestCase):
@property
def data1(self):
return self.spark.range(10).toDF('id') \
.withColumn("ks", array([lit(i) for i in range(20, 30)])) \
.withColumn("k", explode(col('ks')))\
.withColumn("v", col('k') * 10)\
.drop('ks')
@property
def data2(self):
return self.spark.range(10).toDF('id') \
.withColumn("ks", array([lit(i) for i in range(20, 30)])) \
.withColumn("k", explode(col('ks'))) \
.withColumn("v2", col('k') * 100) \
.drop('ks')
def test_simple(self):
self._test_merge(self.data1, self.data2)
def test_left_group_empty(self):
left = self.data1.where(col("id") % 2 == 0)
self._test_merge(left, self.data2)
def test_right_group_empty(self):
right = self.data2.where(col("id") % 2 == 0)
self._test_merge(self.data1, right)
def test_different_schemas(self):
right = self.data2.withColumn('v3', lit('a'))
self._test_merge(self.data1, right, 'id long, k int, v int, v2 int, v3 string')
def test_complex_group_by(self):
left = pd.DataFrame.from_dict({
'id': [1, 2, 3],
'k': [5, 6, 7],
'v': [9, 10, 11]
})
right = pd.DataFrame.from_dict({
'id': [11, 12, 13],
'k': [5, 6, 7],
'v2': [90, 100, 110]
})
left_gdf = self.spark\
.createDataFrame(left)\
.groupby(col('id') % 2 == 0)
right_gdf = self.spark \
.createDataFrame(right) \
.groupby(col('id') % 2 == 0)
def merge_pandas(l, r):
return pd.merge(l[['k', 'v']], r[['k', 'v2']], on=['k'])
result = left_gdf \
.cogroup(right_gdf) \
.applyInPandas(merge_pandas, 'k long, v long, v2 long') \
.sort(['k']) \
.toPandas()
expected = pd.DataFrame.from_dict({
'k': [5, 6, 7],
'v': [9, 10, 11],
'v2': [90, 100, 110]
})
assert_frame_equal(expected, result)
def test_empty_group_by(self):
left = self.data1
right = self.data2
def merge_pandas(l, r):
return pd.merge(l, r, on=['id', 'k'])
result = left.groupby().cogroup(right.groupby())\
.applyInPandas(merge_pandas, 'id long, k int, v int, v2 int') \
.sort(['id', 'k']) \
.toPandas()
left = left.toPandas()
right = right.toPandas()
expected = pd \
.merge(left, right, on=['id', 'k']) \
.sort_values(by=['id', 'k'])
assert_frame_equal(expected, result)
def test_mixed_scalar_udfs_followed_by_cogrouby_apply(self):
df = self.spark.range(0, 10).toDF('v1')
df = df.withColumn('v2', udf(lambda x: x + 1, 'int')(df['v1'])) \
.withColumn('v3', pandas_udf(lambda x: x + 2, 'int')(df['v1']))
result = df.groupby().cogroup(df.groupby()) \
.applyInPandas(lambda x, y: pd.DataFrame([(x.sum().sum(), y.sum().sum())]),
'sum1 int, sum2 int').collect()
self.assertEquals(result[0]['sum1'], 165)
self.assertEquals(result[0]['sum2'], 165)
def test_with_key_left(self):
self._test_with_key(self.data1, self.data1, isLeft=True)
def test_with_key_right(self):
self._test_with_key(self.data1, self.data1, isLeft=False)
def test_with_key_left_group_empty(self):
left = self.data1.where(col("id") % 2 == 0)
self._test_with_key(left, self.data1, isLeft=True)
def test_with_key_right_group_empty(self):
right = self.data1.where(col("id") % 2 == 0)
self._test_with_key(self.data1, right, isLeft=False)
def test_with_key_complex(self):
def left_assign_key(key, l, _):
return l.assign(key=key[0])
result = self.data1 \
.groupby(col('id') % 2 == 0)\
.cogroup(self.data2.groupby(col('id') % 2 == 0)) \
.applyInPandas(left_assign_key, 'id long, k int, v int, key boolean') \
.sort(['id', 'k']) \
.toPandas()
expected = self.data1.toPandas()
expected = expected.assign(key=expected.id % 2 == 0)
assert_frame_equal(expected, result)
def test_wrong_return_type(self):
# Test that we get a sensible exception invalid values passed to apply
left = self.data1
right = self.data2
with QuietTest(self.sc):
with self.assertRaisesRegexp(
NotImplementedError,
'Invalid return type.*MapType'):
left.groupby('id').cogroup(right.groupby('id')).applyInPandas(
lambda l, r: l, 'id long, v map<int, int>')
def test_wrong_args(self):
left = self.data1
right = self.data2
with self.assertRaisesRegexp(ValueError, 'Invalid function'):
left.groupby('id').cogroup(right.groupby('id')) \
.applyInPandas(lambda: 1, StructType([StructField("d", DoubleType())]))
def test_case_insensitive_grouping_column(self):
# SPARK-31915: case-insensitive grouping column should work.
df1 = self.spark.createDataFrame([(1, 1)], ("column", "value"))
row = df1.groupby("ColUmn").cogroup(
df1.groupby("COLUMN")
).applyInPandas(lambda r, l: r + l, "column long, value long").first()
self.assertEquals(row.asDict(), Row(column=2, value=2).asDict())
df2 = self.spark.createDataFrame([(1, 1)], ("column", "value"))
row = df1.groupby("ColUmn").cogroup(
df2.groupby("COLUMN")
).applyInPandas(lambda r, l: r + l, "column long, value long").first()
self.assertEquals(row.asDict(), Row(column=2, value=2).asDict())
@staticmethod
def _test_with_key(left, right, isLeft):
def right_assign_key(key, l, r):
return l.assign(key=key[0]) if isLeft else r.assign(key=key[0])
result = left \
.groupby('id') \
.cogroup(right.groupby('id')) \
.applyInPandas(right_assign_key, 'id long, k int, v int, key long') \
.toPandas()
expected = left.toPandas() if isLeft else right.toPandas()
expected = expected.assign(key=expected.id)
assert_frame_equal(expected, result)
@staticmethod
def _test_merge(left, right, output_schema='id long, k int, v int, v2 int'):
def merge_pandas(l, r):
return pd.merge(l, r, on=['id', 'k'])
result = left \
.groupby('id') \
.cogroup(right.groupby('id')) \
.applyInPandas(merge_pandas, output_schema)\
.sort(['id', 'k']) \
.toPandas()
left = left.toPandas()
right = right.toPandas()
expected = pd \
.merge(left, right, on=['id', 'k']) \
.sort_values(by=['id', 'k'])
assert_frame_equal(expected, result)
if __name__ == "__main__":
from pyspark.sql.tests.test_pandas_cogrouped_map import *
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports', verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2005-2009 Edgewall Software
# Copyright (C) 2005-2007 Christopher Lenz <cmlenz@gmx.de>
# Copyright (C) 2005 Matthew Good <trac@matt-good.net>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
#
# Author: Christopher Lenz <cmlenz@gmx.de>
# Matthew Good <trac@matt-good.net>
import cgi
import dircache
import fnmatch
from functools import partial
import gc
import locale
import os
import pkg_resources
from pprint import pformat, pprint
import re
import sys
from genshi.core import Markup
from genshi.builder import Fragment, tag
from genshi.output import DocType
from genshi.template import TemplateLoader
from trac import __version__ as TRAC_VERSION
from trac.config import ExtensionOption, Option, OrderedExtensionsOption
from trac.core import *
from trac.env import open_environment
from trac.loader import get_plugin_info, match_plugins_to_frames
from trac.perm import PermissionCache, PermissionError
from trac.resource import ResourceNotFound
from trac.util import arity, get_frame_info, get_last_traceback, hex_entropy, \
read_file, translation
from trac.util.concurrency import threading
from trac.util.datefmt import format_datetime, http_date, localtz, timezone, \
user_time
from trac.util.text import exception_to_unicode, shorten_line, to_unicode
from trac.util.translation import _, get_negotiated_locale, has_babel, \
safefmt, tag_
from trac.web.api import *
from trac.web.chrome import Chrome
from trac.web.href import Href
from trac.web.session import Session
#: This URL is used for semi-automatic bug reports (see
#: `send_internal_error`). Please modify it to point to your own
#: Trac instance if you distribute a patched version of Trac.
default_tracker = 'http://trac.edgewall.org'
class FakeSession(dict):
sid = None
def save(self):
pass
class FakePerm(dict):
def require(self, *args):
return False
def __call__(self, *args):
return self
class RequestDispatcher(Component):
"""Web request dispatcher.
This component dispatches incoming requests to registered
handlers. Besides, it also takes care of user authentication and
request pre- and post-processing.
"""
required = True
authenticators = ExtensionPoint(IAuthenticator)
handlers = ExtensionPoint(IRequestHandler)
filters = OrderedExtensionsOption('trac', 'request_filters',
IRequestFilter,
doc="""Ordered list of filters to apply to all requests
(''since 0.10'').""")
default_handler = ExtensionOption('trac', 'default_handler',
IRequestHandler, 'WikiModule',
"""Name of the component that handles requests to the base
URL.
Options include `TimelineModule`, `RoadmapModule`,
`BrowserModule`, `QueryModule`, `ReportModule`, `TicketModule`
and `WikiModule`. The default is `WikiModule`. (''since 0.9'')""")
default_timezone = Option('trac', 'default_timezone', '',
"""The default timezone to use""")
default_language = Option('trac', 'default_language', '',
"""The preferred language to use if no user preference has
been set. (''since 0.12.1'')
""")
default_date_format = Option('trac', 'default_date_format', '',
"""The date format. Valid options are 'iso8601' for selecting
ISO 8601 format, or leave it empty which means the default
date format will be inferred from the browser's default
language. (''since 0.13'')
""")
# Public API
def authenticate(self, req):
for authenticator in self.authenticators:
authname = authenticator.authenticate(req)
if authname:
return authname
else:
return 'anonymous'
def dispatch(self, req):
"""Find a registered handler that matches the request and let
it process it.
In addition, this method initializes the data dictionary
passed to the the template and adds the web site chrome.
"""
self.log.debug('Dispatching %r', req)
chrome = Chrome(self.env)
# Setup request callbacks for lazily-evaluated properties
req.callbacks.update({
'authname': self.authenticate,
'chrome': chrome.prepare_request,
'perm': self._get_perm,
'session': self._get_session,
'locale': self._get_locale,
'lc_time': self._get_lc_time,
'tz': self._get_timezone,
'form_token': self._get_form_token
})
try:
try:
# Select the component that should handle the request
chosen_handler = None
try:
for handler in self.handlers:
if handler.match_request(req):
chosen_handler = handler
break
if not chosen_handler:
if not req.path_info or req.path_info == '/':
chosen_handler = self.default_handler
# pre-process any incoming request, whether a handler
# was found or not
chosen_handler = self._pre_process_request(req,
chosen_handler)
except TracError, e:
raise HTTPInternalError(e)
if not chosen_handler:
if req.path_info.endswith('/'):
# Strip trailing / and redirect
target = req.path_info.rstrip('/').encode('utf-8')
if req.query_string:
target += '?' + req.query_string
req.redirect(req.href + target, permanent=True)
raise HTTPNotFound('No handler matched request to %s',
req.path_info)
req.callbacks['chrome'] = partial(chrome.prepare_request,
handler=chosen_handler)
# Protect against CSRF attacks: we validate the form token
# for all POST requests with a content-type corresponding
# to form submissions
if req.method == 'POST':
ctype = req.get_header('Content-Type')
if ctype:
ctype, options = cgi.parse_header(ctype)
if ctype in ('application/x-www-form-urlencoded',
'multipart/form-data') and \
req.args.get('__FORM_TOKEN') != req.form_token:
if self.env.secure_cookies and req.scheme == 'http':
msg = _('Secure cookies are enabled, you must '
'use https to submit forms.')
else:
msg = _('Do you have cookies enabled?')
raise HTTPBadRequest(_('Missing or invalid form token.'
' %(msg)s', msg=msg))
# Process the request and render the template
resp = chosen_handler.process_request(req)
if resp:
if len(resp) == 2: # old Clearsilver template and HDF data
self.log.error("Clearsilver template are no longer "
"supported (%s)", resp[0])
raise TracError(
_("Clearsilver templates are no longer supported, "
"please contact your Trac administrator."))
# Genshi
template, data, content_type = \
self._post_process_request(req, *resp)
if 'hdfdump' in req.args:
req.perm.require('TRAC_ADMIN')
# debugging helper - no need to render first
out = StringIO()
pprint(data, out)
req.send(out.getvalue(), 'text/plain')
output = chrome.render_template(req, template, data,
content_type)
# Give the session a chance to persist changes
req.session.save()
req.send(output, content_type or 'text/html')
else:
self._post_process_request(req)
except RequestDone:
raise
except:
# post-process the request in case of errors
err = sys.exc_info()
try:
self._post_process_request(req)
except RequestDone:
raise
except Exception, e:
self.log.error("Exception caught while post-processing"
" request: %s",
exception_to_unicode(e, traceback=True))
raise err[0], err[1], err[2]
except PermissionError, e:
raise HTTPForbidden(to_unicode(e))
except ResourceNotFound, e:
raise HTTPNotFound(e)
except TracError, e:
raise HTTPInternalError(e)
# Internal methods
def _get_perm(self, req):
if isinstance(req.session, FakeSession):
return FakePerm()
else:
return PermissionCache(self.env, self.authenticate(req))
def _get_session(self, req):
try:
return Session(self.env, req)
except TracError, e:
self.log.error("can't retrieve session: %s",
exception_to_unicode(e))
return FakeSession()
def _get_locale(self, req):
if has_babel:
preferred = req.session.get('language')
default = self.env.config.get('trac', 'default_language', '')
negotiated = get_negotiated_locale([preferred, default] +
req.languages)
self.log.debug("Negotiated locale: %s -> %s", preferred, negotiated)
return negotiated
def _get_lc_time(self, req):
lc_time = req.session.get('lc_time')
if not lc_time or lc_time == 'locale' and not has_babel:
lc_time = self.default_date_format
if lc_time == 'iso8601':
return 'iso8601'
return req.locale
def _get_timezone(self, req):
try:
return timezone(req.session.get('tz', self.default_timezone
or 'missing'))
except Exception:
return localtz
def _get_form_token(self, req):
"""Used to protect against CSRF.
The 'form_token' is strong shared secret stored in a user
cookie. By requiring that every POST form to contain this
value we're able to protect against CSRF attacks. Since this
value is only known by the user and not by an attacker.
If the the user does not have a `trac_form_token` cookie a new
one is generated.
"""
if req.incookie.has_key('trac_form_token'):
return req.incookie['trac_form_token'].value
else:
req.outcookie['trac_form_token'] = hex_entropy(24)
req.outcookie['trac_form_token']['path'] = req.base_path or '/'
if self.env.secure_cookies:
req.outcookie['trac_form_token']['secure'] = True
return req.outcookie['trac_form_token'].value
def _pre_process_request(self, req, chosen_handler):
for filter_ in self.filters:
chosen_handler = filter_.pre_process_request(req, chosen_handler)
return chosen_handler
def _post_process_request(self, req, *args):
nbargs = len(args)
resp = args
for f in reversed(self.filters):
# As the arity of `post_process_request` has changed since
# Trac 0.10, only filters with same arity gets passed real values.
# Errors will call all filters with None arguments,
# and results will not be not saved.
extra_arg_count = arity(f.post_process_request) - 1
if extra_arg_count == nbargs:
resp = f.post_process_request(req, *resp)
elif nbargs == 0:
f.post_process_request(req, *(None,)*extra_arg_count)
return resp
_slashes_re = re.compile(r'/+')
def dispatch_request(environ, start_response):
"""Main entry point for the Trac web interface.
:param environ: the WSGI environment dict
:param start_response: the WSGI callback for starting the response
"""
# SCRIPT_URL is an Apache var containing the URL before URL rewriting
# has been applied, so we can use it to reconstruct logical SCRIPT_NAME
script_url = environ.get('SCRIPT_URL')
if script_url is not None:
path_info = environ.get('PATH_INFO')
if not path_info:
environ['SCRIPT_NAME'] = script_url
else:
# mod_wsgi squashes slashes in PATH_INFO (!)
script_url = _slashes_re.sub('/', script_url)
path_info = _slashes_re.sub('/', path_info)
if script_url.endswith(path_info):
environ['SCRIPT_NAME'] = script_url[:-len(path_info)]
# If the expected configuration keys aren't found in the WSGI environment,
# try looking them up in the process environment variables
environ.setdefault('trac.env_path', os.getenv('TRAC_ENV'))
environ.setdefault('trac.env_parent_dir',
os.getenv('TRAC_ENV_PARENT_DIR'))
environ.setdefault('trac.env_index_template',
os.getenv('TRAC_ENV_INDEX_TEMPLATE'))
environ.setdefault('trac.template_vars',
os.getenv('TRAC_TEMPLATE_VARS'))
environ.setdefault('trac.locale', '')
environ.setdefault('trac.base_url',
os.getenv('TRAC_BASE_URL'))
locale.setlocale(locale.LC_ALL, environ['trac.locale'])
# Determine the environment
env_path = environ.get('trac.env_path')
if not env_path:
env_parent_dir = environ.get('trac.env_parent_dir')
env_paths = environ.get('trac.env_paths')
if env_parent_dir or env_paths:
# The first component of the path is the base name of the
# environment
path_info = environ.get('PATH_INFO', '').lstrip('/').split('/')
env_name = path_info.pop(0)
if not env_name:
# No specific environment requested, so render an environment
# index page
send_project_index(environ, start_response, env_parent_dir,
env_paths)
return []
errmsg = None
# To make the matching patterns of request handlers work, we append
# the environment name to the `SCRIPT_NAME` variable, and keep only
# the remaining path in the `PATH_INFO` variable.
script_name = environ.get('SCRIPT_NAME', '')
try:
script_name = unicode(script_name, 'utf-8')
# (as Href expects unicode parameters)
environ['SCRIPT_NAME'] = Href(script_name)(env_name)
environ['PATH_INFO'] = '/' + '/'.join(path_info)
if env_parent_dir:
env_path = os.path.join(env_parent_dir, env_name)
else:
env_path = get_environments(environ).get(env_name)
if not env_path or not os.path.isdir(env_path):
errmsg = 'Environment not found'
except UnicodeDecodeError:
errmsg = 'Invalid URL encoding (was %r)' % script_name
if errmsg:
start_response('404 Not Found',
[('Content-Type', 'text/plain'),
('Content-Length', str(len(errmsg)))])
return [errmsg]
if not env_path:
raise EnvironmentError('The environment options "TRAC_ENV" or '
'"TRAC_ENV_PARENT_DIR" or the mod_python '
'options "TracEnv" or "TracEnvParentDir" are '
'missing. Trac requires one of these options '
'to locate the Trac environment(s).')
run_once = environ['wsgi.run_once']
env = env_error = None
try:
env = open_environment(env_path, use_cache=not run_once)
if env.base_url_for_redirect:
environ['trac.base_url'] = env.base_url
# Web front-end type and version information
if not hasattr(env, 'webfrontend'):
mod_wsgi_version = environ.get('mod_wsgi.version')
if mod_wsgi_version:
mod_wsgi_version = (
"%s (WSGIProcessGroup %s WSGIApplicationGroup %s)" %
('.'.join([str(x) for x in mod_wsgi_version]),
environ.get('mod_wsgi.process_group'),
environ.get('mod_wsgi.application_group') or
'%{GLOBAL}'))
environ.update({
'trac.web.frontend': 'mod_wsgi',
'trac.web.version': mod_wsgi_version})
env.webfrontend = environ.get('trac.web.frontend')
if env.webfrontend:
env.systeminfo.append((env.webfrontend,
environ['trac.web.version']))
except Exception, e:
env_error = e
req = Request(environ, start_response)
translation.make_activable(lambda: req.locale, env.path if env else None)
try:
return _dispatch_request(req, env, env_error)
finally:
translation.deactivate()
if env and not run_once:
env.shutdown(threading._get_ident())
# Now it's a good time to do some clean-ups
#
# Note: enable the '##' lines as soon as there's a suspicion
# of memory leak due to uncollectable objects (typically
# objects with a __del__ method caught in a cycle)
#
##gc.set_debug(gc.DEBUG_UNCOLLECTABLE)
unreachable = gc.collect()
##env.log.debug("%d unreachable objects found.", unreachable)
##uncollectable = len(gc.garbage)
##if uncollectable:
## del gc.garbage[:]
## env.log.warn("%d uncollectable objects found.", uncollectable)
def _dispatch_request(req, env, env_error):
resp = []
# fixup env.abs_href if `[trac] base_url` was not specified
if env and not env.abs_href.base:
env._abs_href = req.abs_href
try:
if not env and env_error:
raise HTTPInternalError(env_error)
try:
dispatcher = RequestDispatcher(env)
dispatcher.dispatch(req)
except RequestDone:
pass
resp = req._response or []
except HTTPException, e:
_send_user_error(req, env, e)
except Exception, e:
send_internal_error(env, req, sys.exc_info())
return resp
def _send_user_error(req, env, e):
# See trac/web/api.py for the definition of HTTPException subclasses.
if env:
env.log.warn('[%s] %s' % (req.remote_addr, exception_to_unicode(e)))
try:
# We first try to get localized error messages here, but we
# should ignore secondary errors if the main error was also
# due to i18n issues
title = _('Error')
if e.reason:
if title.lower() in e.reason.lower():
title = e.reason
else:
title = _('Error: %(message)s', message=e.reason)
except Exception:
title = 'Error'
# The message is based on the e.detail, which can be an Exception
# object, but not a TracError one: when creating HTTPException,
# a TracError.message is directly assigned to e.detail
if isinstance(e.detail, Exception): # not a TracError
message = exception_to_unicode(e.detail)
elif isinstance(e.detail, Fragment): # markup coming from a TracError
message = e.detail
else:
message = to_unicode(e.detail)
data = {'title': title, 'type': 'TracError', 'message': message,
'frames': [], 'traceback': None}
if e.code == 403 and req.authname == 'anonymous':
# TRANSLATOR: ... not logged in, you may want to 'do so' now (link)
do_so = tag.a(_("do so"), href=req.href.login())
req.chrome['notices'].append(
tag_("You are currently not logged in. You may want to "
"%(do_so)s now.", do_so=do_so))
try:
req.send_error(sys.exc_info(), status=e.code, env=env, data=data)
except RequestDone:
pass
def send_internal_error(env, req, exc_info):
if env:
env.log.error("Internal Server Error: %s",
exception_to_unicode(exc_info[1], traceback=True))
message = exception_to_unicode(exc_info[1])
traceback = get_last_traceback()
frames, plugins, faulty_plugins = [], [], []
th = 'http://trac-hacks.org'
has_admin = False
try:
has_admin = 'TRAC_ADMIN' in req.perm
except Exception:
pass
tracker = default_tracker
if has_admin and not isinstance(exc_info[1], MemoryError):
# Collect frame and plugin information
frames = get_frame_info(exc_info[2])
if env:
plugins = [p for p in get_plugin_info(env)
if any(c['enabled']
for m in p['modules'].itervalues()
for c in m['components'].itervalues())]
match_plugins_to_frames(plugins, frames)
# Identify the tracker where the bug should be reported
faulty_plugins = [p for p in plugins if 'frame_idx' in p]
faulty_plugins.sort(key=lambda p: p['frame_idx'])
if faulty_plugins:
info = faulty_plugins[0]['info']
if 'trac' in info:
tracker = info['trac']
elif info.get('home_page', '').startswith(th):
tracker = th
def get_description(_):
if env and has_admin:
sys_info = "".join("|| '''`%s`''' || `%s` ||\n"
% (k, v.replace('\n', '` [[br]] `'))
for k, v in env.get_systeminfo())
sys_info += "|| '''`jQuery`''' || `#JQUERY#` ||\n"
enabled_plugins = "".join("|| '''`%s`''' || `%s` ||\n"
% (p['name'], p['version'] or _('N/A'))
for p in plugins)
else:
sys_info = _("''System information not available''\n")
enabled_plugins = _("''Plugin information not available''\n")
return _("""\
==== How to Reproduce ====
While doing a %(method)s operation on `%(path_info)s`, Trac issued an internal error.
''(please provide additional details here)''
Request parameters:
{{{
%(req_args)s
}}}
User agent: `#USER_AGENT#`
==== System Information ====
%(sys_info)s
==== Enabled Plugins ====
%(enabled_plugins)s
==== Python Traceback ====
{{{
%(traceback)s}}}""",
method=req.method, path_info=req.path_info,
req_args=pformat(req.args), sys_info=sys_info,
enabled_plugins=enabled_plugins, traceback=to_unicode(traceback))
# Generate the description once in English, once in the current locale
description_en = get_description(lambda s, **kw: safefmt(s, kw))
try:
description = get_description(_)
except Exception:
description = description_en
data = {'title': 'Internal Error',
'type': 'internal', 'message': message,
'traceback': traceback, 'frames': frames,
'shorten_line': shorten_line,
'plugins': plugins, 'faulty_plugins': faulty_plugins,
'tracker': tracker,
'description': description, 'description_en': description_en}
try:
req.send_error(exc_info, status=500, env=env, data=data)
except RequestDone:
pass
def send_project_index(environ, start_response, parent_dir=None,
env_paths=None):
req = Request(environ, start_response)
loadpaths = [pkg_resources.resource_filename('trac', 'templates')]
if req.environ.get('trac.env_index_template'):
env_index_template = req.environ['trac.env_index_template']
tmpl_path, template = os.path.split(env_index_template)
loadpaths.insert(0, tmpl_path)
else:
template = 'index.html'
data = {'trac': {'version': TRAC_VERSION,
'time': user_time(req, format_datetime)},
'req': req}
if req.environ.get('trac.template_vars'):
for pair in req.environ['trac.template_vars'].split(','):
key, val = pair.split('=')
data[key] = val
try:
href = Href(req.base_path)
projects = []
for env_name, env_path in get_environments(environ).items():
try:
env = open_environment(env_path,
use_cache=not environ['wsgi.run_once'])
proj = {
'env': env,
'name': env.project_name,
'description': env.project_description,
'href': href(env_name)
}
except Exception, e:
proj = {'name': env_name, 'description': to_unicode(e)}
projects.append(proj)
projects.sort(lambda x, y: cmp(x['name'].lower(), y['name'].lower()))
data['projects'] = projects
loader = TemplateLoader(loadpaths, variable_lookup='lenient',
default_encoding='utf-8')
tmpl = loader.load(template)
stream = tmpl.generate(**data)
output = stream.render('xhtml', doctype=DocType.XHTML_STRICT,
encoding='utf-8')
req.send(output, 'text/html')
except RequestDone:
pass
def get_tracignore_patterns(env_parent_dir):
"""Return the list of patterns from env_parent_dir/.tracignore or
a default pattern of `".*"` if the file doesn't exist.
"""
path = os.path.join(env_parent_dir, '.tracignore')
try:
lines = [line.strip() for line in read_file(path).splitlines()]
except IOError:
return ['.*']
return [line for line in lines if line and not line.startswith('#')]
def get_environments(environ, warn=False):
"""Retrieve canonical environment name to path mapping.
The environments may not be all valid environments, but they are
good candidates.
"""
env_paths = environ.get('trac.env_paths', [])
env_parent_dir = environ.get('trac.env_parent_dir')
if env_parent_dir:
env_parent_dir = os.path.normpath(env_parent_dir)
paths = dircache.listdir(env_parent_dir)[:]
dircache.annotate(env_parent_dir, paths)
# Filter paths that match the .tracignore patterns
ignore_patterns = get_tracignore_patterns(env_parent_dir)
paths = [path[:-1] for path in paths if path[-1] == '/'
and not any(fnmatch.fnmatch(path[:-1], pattern)
for pattern in ignore_patterns)]
env_paths.extend(os.path.join(env_parent_dir, project) \
for project in paths)
envs = {}
for env_path in env_paths:
env_path = os.path.normpath(env_path)
if not os.path.isdir(env_path):
continue
env_name = os.path.split(env_path)[1]
if env_name in envs:
if warn:
print >> sys.stderr, ('Warning: Ignoring project "%s" since '
'it conflicts with project "%s"'
% (env_path, envs[env_name]))
else:
envs[env_name] = env_path
return envs
| |
import os
import sys
import json
from requests_oauthlib import OAuth2Session
# Python 2 and 3 compatible input
from builtins import input
from .config import Parser, settings
from .errors import MezzanineValueError
# OAuth Redirect URI
# Must match the value supplied when creating the OAuth App!
# Ideally should be 'urn:ietf:wg:oauth:2.0:oob' but currently unsupported by django-oauth-toolkit
REDIRECT_URI = 'https://httpbin.org/get'
class MezzanineCore(object):
"""
Mezzanine API Client SDK
"""
def __init__(self, credentials=None, api_url=None, version=None):
"""
Create new instance of Mezzanine Client
:param credentials: tuple (app_id, app_secret)
:param api_url: str url to Mezzanine REST API
:param version: str version of the REST API (currently unimplemented)
"""
super(MezzanineCore, self).__init__()
# Allow insecure transport protocol (HTTP) for development/testing purposes
os.environ['OAUTHLIB_INSECURE_TRANSPORT'] = '1'
# Check for OAuth2 credentials
if credentials and type(credentials) is tuple:
self.client_id = credentials[0]
self.client_secret = credentials[1]
else:
try:
self.client_id = os.environ['MZN_ID']
self.client_secret = os.environ['MZN_SECRET']
except KeyError:
print('Error: API credentials were not provided.\n'
'Please set environment variables MZN_ID and MZN_SECRET with your OAuth app ID and secret.')
sys.exit(1)
credentials_data = {
'client_id': self.client_id,
'client_secret': self.client_secret,
}
# API URLs
self.api_url = api_url or settings.api_url or 'http://127.0.0.1:8000/api'
self.auth_url = self.api_url + '/oauth2/authorize'
self.token_url = self.api_url + '/oauth2/token/'
self.refresh_url = self.token_url
# Set refresh token from cache (if exists)
refresh_token = settings.refresh_token
# Initialise session
self.session = OAuth2Session(self.client_id, redirect_uri=REDIRECT_URI, auto_refresh_url=self.refresh_url)
self.session.headers.update({'Accept': 'application/json', 'Content-Type': 'application/json'})
# Authenticate
if not refresh_token:
authorization_url, state = self.session.authorization_url(self.auth_url)
print("Please click to authorize this app: {}".format(authorization_url))
code = input("Paste the authorization code (args > code) from your browser here: ").strip()
# Fetch the access token
self.session.fetch_token(self.token_url, client_secret=self.client_secret, code=code)
self._dump()
else:
# Refresh the token
self.session.refresh_token(self.refresh_url, refresh_token=refresh_token, **credentials_data)
self._dump()
def _dump(self):
"""
Save refresh token to client config
"""
filename = os.path.expanduser('~/.mezzanine.cfg')
parser = Parser()
parser.add_section('general')
parser.read(filename)
parser.set('general', 'refresh_token', self.session.token['refresh_token'])
with open(filename, 'w') as config_file:
parser.write(config_file)
@staticmethod
def _json_serialize(obj):
"""
Returns JSON serialization of an object
"""
return json.dumps(obj)
@staticmethod
def _json_deserialize(string):
"""
Returns dict deserialization of a JSON string
"""
try:
return json.loads(string)
except ValueError:
raise MezzanineValueError('Invalid API response.')
def _url_joiner(self, *args):
"""
Concatenate given endpoint resource with API URL
"""
args = map(str, args)
return '/'.join([self.api_url] + list(args))
def _api_resource(self, method, resource, params=None, data=None):
"""
Make an API request
"""
url = self._url_joiner(*resource)
response = self.session.request(method, url, params=params, data=data)
response.raise_for_status()
return response
def _get(self, resource, params=None):
"""
Make a GET HTTP request
"""
r = self._api_resource('GET', resource, params=params)
item = self._json_deserialize(r.content.decode('utf-8'))
return item
def _post(self, resource, data, params=None):
"""
Make a POST HTTP request
"""
r = self._api_resource('POST', resource, data=self._json_serialize(data), params=params)
item = self._json_deserialize(r.content.decode('utf-8'))
return item
def _put(self, resource, data, params=None):
"""
Make a PUT HTTP request
"""
r = self._api_resource('PUT', resource, data=self._json_serialize(data), params=params)
item = self._json_deserialize(r.content.decode('utf-8'))
return item
class Mezzanine(MezzanineCore):
"""
The publicly accessible API client class
"""
def __init__(self, credentials=None, api_url=None, version=None):
super(Mezzanine, self).__init__(credentials, api_url, version)
def get_post(self, item_id):
"""
Get a published blog post
:param item_id: id of blog post to retrieve
:return: dict of specified blog post
"""
return self._get(['posts', int(item_id)])
def get_posts(self, offset=0, limit=10, category_name="", date_min=""):
"""
Get published blog posts
:param offset: pagination offset
:param limit: pagination limit
:param category_name: category name
:param date_min: minimum date published (e.g. `2018-01-01`)
:return: list of dicts for most recently published blog posts
"""
return self._get(['posts?offset={}&limit={}&category_name={}&date_min={}'.format(int(offset), int(limit), category_name, date_min)])['results']
def create_post(self, data):
"""
Create a blog post
:param data: blog post data in JSON format (requires 'title' and 'content')
:return: deserialized API resource containing ID of the new blog post
"""
return self._post(['posts'], data)
def get_page(self, item_id):
"""
Get a page
:param item_id: id of page to retrieve
:return: dict of specified page
"""
return self._get(['pages', int(item_id)])
def get_pages(self, offset=0, limit=10):
"""
Get pages
:param offset: pagination offset
:param limit: pagination limit
:return: list of dicts for pages
"""
return self._get(['pages?offset={}&limit={}'.format(int(offset), int(limit))])['results']
def get_user(self, item_id):
"""
Get a user
:param item_id: id of user to retrieve
:return: dict of specified user
"""
return self._get(['users', int(item_id)])
def get_users(self, offset=0, limit=20):
"""
Get users
:param offset: pagination offset
:param limit: pagination limit
:return: list of dicts for users
"""
return self._get(['users?offset={}&limit={}'.format(int(offset), int(limit))])['results']
def get_category(self, item_id):
"""
Get a category
:param item_id: id of category to retrieve
:return: dict of specified category
"""
return self._get(['categories', int(item_id)])
def get_categories(self, offset=0, limit=20):
"""
Get categories
:param offset: pagination offset
:param limit: pagination limit
:return: list of dicts for categories
"""
return self._get(['categories?offset={}&limit={}'.format(int(offset), int(limit))])['results']
def get_site(self):
"""
Get site/app metadata
:return: dict of site/app metadata
"""
return self._get(['site'])
| |
"""
Python Markdown
===============
Python Markdown converts Markdown to HTML and can be used as a library or
called from the command line.
## Basic usage as a module:
import markdown
html = markdown.markdown(your_text_string)
See <http://www.freewisdom.org/projects/python-markdown/> for more
information and instructions on how to extend the functionality of
Python Markdown. Read that before you try modifying this file.
## Authors and License
Started by [Manfred Stienstra](http://www.dwerg.net/). Continued and
maintained by [Yuri Takhteyev](http://www.freewisdom.org), [Waylan
Limberg](http://achinghead.com/) and [Artem Yunusov](http://blog.splyer.com).
Contact: markdown@freewisdom.org
Copyright 2007, 2008 The Python Markdown Project (v. 1.7 and later)
Copyright 200? Django Software Foundation (OrderedDict implementation)
Copyright 2004, 2005, 2006 Yuri Takhteyev (v. 0.2-1.6b)
Copyright 2004 Manfred Stienstra (the original version)
License: BSD (see LICENSE for details).
"""
version = "2.1.0"
version_info = (2,1,0, "Dev")
import re
import codecs
from logging import DEBUG, INFO, WARN, ERROR, CRITICAL
from md_logging import message
import util
from preprocessors import build_preprocessors
from blockprocessors import build_block_parser
from treeprocessors import build_treeprocessors
from inlinepatterns import build_inlinepatterns
from postprocessors import build_postprocessors
from extensions import Extension
import html4
# For backwards compatibility in the 2.0.x series
# The things defined in these modules started off in __init__.py so third
# party code might need to access them here.
from util import *
class Markdown:
"""Convert Markdown to HTML."""
doc_tag = "div" # Element used to wrap document - later removed
option_defaults = {
'html_replacement_text' : '[HTML_REMOVED]',
'tab_length' : 4,
'enable_attributes' : True,
'smart_emphasis' : True,
}
output_formats = {
'html' : html4.to_html_string,
'html4' : html4.to_html_string,
'xhtml' : util.etree.tostring,
'xhtml1': util.etree.tostring,
}
def __init__(self, extensions=[], **kwargs):
"""
Creates a new Markdown instance.
Keyword arguments:
* extensions: A list of extensions.
If they are of type string, the module mdx_name.py will be loaded.
If they are a subclass of markdown.Extension, they will be used
as-is.
* extension-configs: Configuration settingis for extensions.
* output_format: Format of output. Supported formats are:
* "xhtml1": Outputs XHTML 1.x. Default.
* "xhtml": Outputs latest supported version of XHTML (currently XHTML 1.1).
* "html4": Outputs HTML 4
* "html": Outputs latest supported version of HTML (currently HTML 4).
Note that it is suggested that the more specific formats ("xhtml1"
and "html4") be used as "xhtml" or "html" may change in the future
if it makes sense at that time.
* safe_mode: Disallow raw html. One of "remove", "replace" or "escape".
* html_replacement_text: Text used when safe_mode is set to "replace".
* tab_length: Length of tabs in the source. Default: 4
* enable_attributes: Enable the conversion of attributes. Default: True
* smart_emphsasis: Treat `_connected_words_` intelegently Default: True
"""
for option, default in self.option_defaults.items():
setattr(self, option, kwargs.get(option, default))
self.safeMode = kwargs.get('safe_mode', False)
self.registeredExtensions = []
self.docType = ""
self.stripTopLevelTags = True
self.build_parser()
self.references = {}
self.htmlStash = util.HtmlStash()
self.registerExtensions(extensions = extensions,
configs = kwargs.get('extension_configs', {}))
self.set_output_format(kwargs.get('output_format', 'xhtml1'))
self.reset()
def build_parser(self):
""" Build the parser from the various parts. """
self.preprocessors = build_preprocessors(self)
self.parser = build_block_parser(self)
self.inlinePatterns = build_inlinepatterns(self)
self.treeprocessors = build_treeprocessors(self)
self.postprocessors = build_postprocessors(self)
return self
def registerExtensions(self, extensions, configs):
"""
Register extensions with this instance of Markdown.
Keyword aurguments:
* extensions: A list of extensions, which can either
be strings or objects. See the docstring on Markdown.
* configs: A dictionary mapping module names to config options.
"""
for ext in extensions:
if isinstance(ext, basestring):
ext = self.build_extension(ext, configs.get(ext, []))
if isinstance(ext, Extension):
try:
ext.extendMarkdown(self, globals())
except NotImplementedError, e:
message(ERROR, e)
else:
message(ERROR,
'Extension "%s.%s" must be of type: "markdown.Extension".' \
% (ext.__class__.__module__, ext.__class__.__name__))
return self
def build_extension(self, ext_name, configs = []):
"""Build extension by name, then return the module.
The extension name may contain arguments as part of the string in the
following format: "extname(key1=value1,key2=value2)"
"""
# Parse extensions config params (ignore the order)
configs = dict(configs)
pos = ext_name.find("(") # find the first "("
if pos > 0:
ext_args = ext_name[pos+1:-1]
ext_name = ext_name[:pos]
pairs = [x.split("=") for x in ext_args.split(",")]
configs.update([(x.strip(), y.strip()) for (x, y) in pairs])
# Setup the module names
ext_module = 'markdown.extensions'
module_name_new_style = '.'.join([ext_module, ext_name])
module_name_old_style = '_'.join(['mdx', ext_name])
# Try loading the extention first from one place, then another
try: # New style (markdown.extensons.<extension>)
module = __import__(module_name_new_style, {}, {}, [ext_module])
except ImportError:
try: # Old style (mdx_<extension>)
module = __import__(module_name_old_style)
except ImportError:
message(WARN, "Failed loading extension '%s' from '%s' or '%s'"
% (ext_name, module_name_new_style, module_name_old_style))
# Return None so we don't try to initiate none-existant extension
return None
# If the module is loaded successfully, we expect it to define a
# function called makeExtension()
try:
return module.makeExtension(configs.items())
except AttributeError, e:
message(CRITICAL, "Failed to initiate extension '%s': %s" % (ext_name, e))
def registerExtension(self, extension):
""" This gets called by the extension """
self.registeredExtensions.append(extension)
return self
def reset(self):
"""
Resets all state variables so that we can start with a new text.
"""
self.htmlStash.reset()
self.references.clear()
for extension in self.registeredExtensions:
if hasattr(extension, 'reset'):
extension.reset()
return self
def set_output_format(self, format):
""" Set the output format for the class instance. """
try:
self.serializer = self.output_formats[format.lower()]
except KeyError:
message(CRITICAL,
'Invalid Output Format: "%s". Use one of %s.' \
% (format, self.output_formats.keys()))
return self
def convert(self, source):
"""
Convert markdown to serialized XHTML or HTML.
Keyword arguments:
* source: Source text as a Unicode string.
Markdown processing takes place in five steps:
1. A bunch of "preprocessors" munge the input text.
2. BlockParser() parses the high-level structural elements of the
pre-processed text into an ElementTree.
3. A bunch of "treeprocessors" are run against the ElementTree. One
such treeprocessor runs InlinePatterns against the ElementTree,
detecting inline markup.
4. Some post-processors are run against the text after the ElementTree
has been serialized into text.
5. The output is written to a string.
"""
# Fixup the source text
if not source.strip():
return u"" # a blank unicode string
try:
source = unicode(source)
except UnicodeDecodeError:
message(CRITICAL,
'UnicodeDecodeError: Markdown only accepts unicode or ascii input.')
return u""
source = source.replace(util.STX, "").replace(util.ETX, "")
source = source.replace("\r\n", "\n").replace("\r", "\n") + "\n\n"
source = re.sub(r'\n\s+\n', '\n\n', source)
source = source.expandtabs(self.tab_length)
# Split into lines and run the line preprocessors.
self.lines = source.split("\n")
for prep in self.preprocessors.values():
self.lines = prep.run(self.lines)
# Parse the high-level elements.
root = self.parser.parseDocument(self.lines).getroot()
# Run the tree-processors
for treeprocessor in self.treeprocessors.values():
newRoot = treeprocessor.run(root)
if newRoot:
root = newRoot
# Serialize _properly_. Strip top-level tags.
output, length = codecs.utf_8_decode(self.serializer(root, encoding="utf-8"))
if self.stripTopLevelTags:
try:
start = output.index('<%s>'%self.doc_tag)+len(self.doc_tag)+2
end = output.rindex('</%s>'%self.doc_tag)
output = output[start:end].strip()
except ValueError:
if output.strip().endswith('<%s />'%self.doc_tag):
# We have an empty document
output = ''
else:
# We have a serious problem
message(CRITICAL, 'Failed to strip top level tags.')
# Run the text post-processors
for pp in self.postprocessors.values():
output = pp.run(output)
return output.strip()
def convertFile(self, input=None, output=None, encoding=None):
"""Converts a markdown file and returns the HTML as a unicode string.
Decodes the file using the provided encoding (defaults to utf-8),
passes the file content to markdown, and outputs the html to either
the provided stream or the file with provided name, using the same
encoding as the source file.
**Note:** This is the only place that decoding and encoding of unicode
takes place in Python-Markdown. (All other code is unicode-in /
unicode-out.)
Keyword arguments:
* input: File object or path of file as string.
* output: Name of output file. Writes to stdout if `None`.
* encoding: Encoding of input and output files. Defaults to utf-8.
"""
encoding = encoding or "utf-8"
# Read the source
if isinstance(input, basestring):
input_file = codecs.open(input, mode="r", encoding=encoding)
else:
input_file = input
text = input_file.read()
input_file.close()
text = text.lstrip(u'\ufeff') # remove the byte-order mark
# Convert
html = self.convert(text)
# Write to file or stdout
if isinstance(output, (str, unicode)):
output_file = codecs.open(output, "w", encoding=encoding)
output_file.write(html)
output_file.close()
else:
output.write(html.encode(encoding))
return self
"""
EXPORTED FUNCTIONS
=============================================================================
Those are the two functions we really mean to export: markdown() and
markdownFromFile().
"""
def markdown(text, *args, **kwargs):
"""Convert a markdown string to HTML and return HTML as a unicode string.
This is a shortcut function for `Markdown` class to cover the most
basic use case. It initializes an instance of Markdown, loads the
necessary extensions and runs the parser on the given text.
Keyword arguments:
* text: Markdown formatted text as Unicode or ASCII string.
* extensions: A list of extensions or extension names (may contain config args).
* safe_mode: Disallow raw html. One of "remove", "replace" or "escape".
* output_format: Format of output. Supported formats are:
* "xhtml1": Outputs XHTML 1.x. Default.
* "xhtml": Outputs latest supported version of XHTML (currently XHTML 1.1).
* "html4": Outputs HTML 4
* "html": Outputs latest supported version of HTML (currently HTML 4).
Note that it is suggested that the more specific formats ("xhtml1"
and "html4") be used as "xhtml" or "html" may change in the future
if it makes sense at that time.
Returns: An HTML document as a string.
"""
md = Markdown(*args, **kwargs)
return md.convert(text)
def markdownFromFile(input = None,
output = None,
extensions = [],
encoding = None,
*args, **kwargs):
"""Read markdown code from a file and write it to a file or a stream."""
md = Markdown(extensions=extensions, *args, **kwargs)
md.convertFile(input, output, encoding)
| |
from __future__ import division
import numpy as np
import math
def error_norm(y1, y2, atol, rtol):
tol = atol + np.maximum(y1,y2)*rtol
return np.linalg.norm((y1-y2)/tol)/(len(y1)**0.5)
def adapt_step(method, f, tn_1, yn_1, y, y_hat, h, p, atol, rtol):
'''
Checks if the step size is accepted. If not, computes a new step size
and checks again. Repeats until step size is accepted
**Inputs**:
- method -- the method on which the extrapolation is based
- f -- the right hand side function of the IVP.
Must output a non-scalar numpy.ndarray
- tn_1, yn_1 -- y(tn_1) = yn_1 is the last accepted value of the
computed solution
- y, y_hat -- the computed values of y(tn_1 + h) of order p and
(p-1), respectively
- h -- the step size taken and to be tested
- p -- the order of the higher extrapolation method
Assumed to be greater than 1.
- atol, rtol -- the absolute and relative tolerance of the local
error.
**Outputs**:
- y, y_hat -- the computed solution of orders p and (p-1) at the
accepted step size
- h -- the accepted step taken to compute y and y_hat
- h_new -- the proposed next step size
- fe -- the number of f evaluations
'''
fe = 0
facmax = 5
facmin = 0.2
fac = 0.8
err = error_norm(y, y_hat, atol, rtol)
h_new = h*min(facmax, max(facmin, fac*((1/err)**(1/p))))
while err > 1:
h = h_new
y, y_hat, fe_ = method(f, tn_1, yn_1, h, p)
fe += fe_
err = error_norm(y, y_hat, atol, rtol)
h_new = h*min(facmax, max(facmin, fac*((1/err)**(1/p))))
return (y, y_hat, h, h_new, fe)
def extrapolation_serial(method, f, t0, tf, y0, adaptive="order", p=4,
step_size=0.5, atol=0, rtol=0, exact=(lambda t: t)):
'''
Solves the system of IVPs y'(t) = f(y, t) with extrapolation of order
p based on method provided. The implementation is serial.
**Inputs**:
- method -- the method on which the extrapolation is based
- f -- the right hand side function of the IVP
Must output a non-scalar numpy.ndarray
- [t0, tf] -- the interval of integration
- y0 -- the value of y(t0). Must be a non-scalar numpy.ndarray
- adaptive -- can be either of three values:
"fixed" = use fixed step size and order.
"step" = use adaptive step size but fixed order.
"order" = use adaptive step size and adaptive order.
optional; defaults to "order"
- p -- the order of extrapolation if adaptive is not "order",
and the starting order otherwise. optional; defaults to 4
- step_size -- the fixed step size when adaptive="fixed", and the
starting step size otherwise. optional; defaults to 0.5
- atol, rtol -- the absolute and relative tolerance of the local
error. optional; both default to 0
- exact -- the exact solution to the IVP. Only used for
debugging. optional; defaults to (lambda t: t).
Must output a non-scalar numpy.ndarray
**Outputs**:
- y -- the computed solution for y(tf)
- fe -- the number of f evaluations
'''
fe = 0
if adaptive == "fixed":
ts, h = np.linspace(t0, tf, (tf-t0)/step_size + 1, retstep=True)
y = y0
for i in range(len(ts) - 1):
y, _, fe_ = method(f, ts[i], y, h, p)
fe += fe_
elif adaptive == "step":
assert p > 1, "order of method must be greater than 1 if adaptive=True"
y, t = y0, t0
h = min(step_size, tf-t0)
while t < tf:
y_, y_hat, fe_ = method(f, t, y, h, p)
fe += fe_
y, _, h, h_new, fe_ = adapt_step(method, f, t, y, y_, y_hat, h, p,
atol, rtol)
t, fe = t + h, fe + fe_
h = min(h_new, tf - t)
elif adaptive == "order":
y, t, k = y0, t0, p
h = min(step_size, tf-t0)
sum_ks, sum_hs = 0, 0
num_iter = 0
while t < tf:
y, h, k, h_new, k_new, _, _, fe_ = method(f, t, y, h, k, atol, rtol)
t, fe = t + h, fe + fe_
sum_ks += k
sum_hs += h
num_iter += 1
h = min(h_new, tf - t)
k = k_new
return (y, fe, sum_hs/num_iter, sum_ks/num_iter)
else:
raise Exception("\'" + str(adaptive) +
"\' is not a valid value for the argument \'adaptive\'")
return (y, fe)
def euler_fixed_step(f, tn, yn, h, p):
Y = np.zeros((p+1,p+1, len(yn)), dtype=(type(yn[0])))
T = np.zeros((p+1,p+1, len(yn)), dtype=(type(yn[0])))
fe = 0
for k in range(1,p+1):
Y[k,0] = yn
for j in range(1,k+1):
Y[k,j] = Y[k,j-1] + (h/k)*f(Y[k,j-1], tn + j*(h/k))
fe += 1
T[k,1] = Y[k,k]
for k in range(2, p+1):
for j in range(k, p+1):
T[j,k] = T[j,k-1] + (T[j,k-1] - T[j-1,k-1])/((j/(j-k+1)) - 1)
return (T[p,p], T[p-1,p-1], fe)
def midpoint_fixed_step(f, tn, yn, h, p):
r = int(round(p/2))
Y = np.zeros((r+1,2*r+1, len(yn)), dtype=(type(yn[0])))
T = np.zeros((r+1,r+1, len(yn)), dtype=(type(yn[0])))
fe = 0
for k in range(1,r+1):
Y[k,0] = yn
Y[k,1] = Y[k,0] + h/(2*k)*f(Y[k,0], tn)
for j in range(2,2*k+1):
Y[k,j] = Y[k,j-2] + (h/k)*f(Y[k,j-1], tn + (j-1)*(h/(2*k)))
fe += 1
T[k,1] = Y[k,2*k]
for k in range(2, r+1):
for j in range(k, r+1):
T[j,k] = T[j,k-1] + (T[j,k-1] - T[j-1,k-1])/((j/(j-k+1))**2 - 1)
return (T[r,r], T[r-1,r-1], fe)
def midpoint_adapt_order(f, tn, yn, h, k, atol, rtol):
k_max = 10
k_min = 3
k = min(k_max, max(k_min, k))
A_k = lambda k: k*(k+1)
H_k = lambda h, k, err_k: h*0.94*(0.65/err_k)**(1/(2*k-1))
W_k = lambda Ak,Hk: Ak/Hk
Y = np.zeros((k+2,2*(k+1)+1, len(yn)), dtype=(type(yn[0])))
T = np.zeros((k+2,k+2, len(yn)), dtype=(type(yn[0])))
fe = 0
h_rej = []
k_rej = []
# compute the first k-1 lines extrapolation tableau
for i in range(1,k):
Y[i,0] = yn
Y[i,1] = Y[i,0] + h/(2*i)*f(Y[i,0], tn)
for j in range(2,2*i+1):
Y[i,j] = Y[i,j-2] + (h/i)*f(Y[i,j-1], tn + (j-1)*(h/(2*i)))
fe += 1
T[i,1] = Y[i,2*i]
for i in range(2, k):
for j in range(i, k):
T[j,i] = T[j,i-1] + (T[j,i-1] - T[j-1,i-1])/((j/(j-i+1))**2 - 1)
err_k_2 = error_norm(T[k-2,k-3], T[k-2,k-2], atol, rtol)
err_k_1 = error_norm(T[k-1,k-2], T[k-1,k-1], atol, rtol)
h_k_2 = H_k(h, k-2, err_k_2)
h_k_1 = H_k(h, k-1, err_k_1)
w_k_2 = W_k(A_k(k-2), h_k_2)
w_k_1 = W_k(A_k(k-1), h_k_1)
if err_k_1 <= 1:
# convergence in line k-1
y = T[k-1,k-1]
k_new = k if w_k_1 < 0.9*w_k_2 else k-1
h_new = h_k_1 if k_new <= k-1 else h_k_1*A_k(k)/A_k(k-1)
elif err_k_1 > ((k+1)*k)**2:
# convergence monitor
# reject (h, k) and restart with new values accordingly
k_new = k-1
h_new = min(h_k_1, h)
h_rej.append(h)
k_rej.append(k)
y, h, k, h_new, k_new, h_rej_, k_rej_, fe_ = midpoint_adapt_order(f, tn,
yn, h_new, k_new, atol, rtol)
fe += fe_
h_rej += h_rej_
k_rej += k_rej_
else:
# compute line k of extrapolation tableau
Y[k,0] = yn
Y[k,1] = Y[k,0] + h/(2*k)*f(Y[k,0], tn)
for j in range(2,2*k+1):
Y[k,j] = Y[k,j-2] + (h/k)*f(Y[k,j-1], tn + (j-1)*(h/(2*k)))
fe += 1
T[k,1] = Y[k,2*k]
for i in range(2, k+1):
T[k,i] = T[k,i-1] + (T[k,i-1] - T[k-1,i-1])/((k/(k-i+1))**2 - 1)
err_k = error_norm(T[k,k-1], T[k,k], atol, rtol)
h_k = H_k(h, k, err_k)
w_k = W_k(A_k(k), h_k)
if err_k <= 1:
# convergence in line k
y = T[k,k]
k_new = k-1 if w_k_1 < 0.9*w_k else (
k+1 if w_k < 0.9*w_k_1 else k)
h_new = h_k_1 if k_new == k-1 else (
h_k if k_new == k else h_k*A_k(k+1)/A_k(k))
elif err_k > (k+1)**2:
# second convergence monitor
# reject (h, k) and restart with new values accordingly
k_new = k-1 if w_k_1 < 0.9*w_k else k
h_new = min(h_k_1 if k_new == k-1 else h_k, h)
h_rej.append(h)
k_rej.append(k)
y, h, k, h_new, k_new, h_rej_, k_rej_, fe_ = midpoint_adapt_order(f,
tn, yn, h_new, k_new, atol, rtol)
fe += fe_
h_rej += h_rej_
k_rej += k_rej_
else:
# hope for convergence in line k+1
# compute line k+1 of extrapolation tableau
Y[(k+1),0] = yn
Y[(k+1),1] = Y[(k+1),0] + h/(2*(k+1))*f(Y[(k+1),0], tn)
for j in range(2,2*(k+1)+1):
Y[(k+1),j] = Y[(k+1),j-2] + (h/(k+1))*f(Y[(k+1),j-1], tn + (j-1)*(h/(2*(k+1))))
fe += 1
T[(k+1),1] = Y[(k+1),2*(k+1)]
for i in range(2, (k+1)+1):
T[(k+1),i] = T[(k+1),i-1] + (T[(k+1),i-1] - T[(k+1)-1,i-1])/(((k+1)/((k+1)-i+1))**2 - 1)
err_k1 = error_norm(T[(k+1),(k+1)-1], T[(k+1),(k+1)], atol, rtol)
h_k1 = H_k(h, (k+1), err_k1)
w_k1 = W_k(A_k((k+1)), h_k1)
if err_k1 <= 1:
# convergence in line k+1
y = T[k+1,k+1]
if w_k_1 < 0.9*w_k:
k_new = k+1 if w_k1 < 0.9*w_k_1 else k-1
else:
k_new = k+1 if w_k1 < 0.9*w_k else k
h_new = h_k_1 if k_new == k-1 else (
h_k if k_new == k else h_k1)
else:
# no convergence
# reject (h, k) and restart with new values accordingly
k_new = k-1 if w_k_1 < 0.9*w_k else k
h_new = min(h_k_1 if k_new == k-1 else h_k, h)
h_rej.append(h)
k_rej.append(k)
y, h, k, h_new, k_new, h_rej_, k_rej_, fe_ = midpoint_adapt_order(f,
tn, yn, h_new, k_new, atol, rtol)
fe += fe_
h_rej += h_rej_
k_rej += k_rej_
return (y, h, k, h_new, k_new, h_rej, k_rej, fe)
def ex_euler_serial(f, t0, tf, y0, adaptive="order", p=4, step_size=0.5, atol=0,
rtol=0, exact=(lambda t: t)):
'''
An instantiation of extrapolation_serial() function with Euler's method.
For more details, refer to extrapolation_serial() function.
TODO: implement adaptive order extrapolation based on Euler
'''
method = euler_fixed_step
if adaptive == "order":
raise NotImplementedError
return extrapolation_serial(method, f, t0, tf, y0,
adaptive=adaptive, p=p, step_size=step_size, atol=atol, rtol=rtol,
exact=exact)
def ex_midpoint_serial(f, t0, tf, y0, adaptive="order", p=4, step_size=0.5, atol=0,
rtol=0, exact=(lambda t: t)):
'''
An instantiation of extrapolation_serial() function with the midpoint method.
For more details, refer to extrapolation_serial() function.
'''
method = midpoint_adapt_order if adaptive == "order" else midpoint_fixed_step
return extrapolation_serial(method, f, t0, tf, y0,
adaptive=adaptive, p=p, step_size=step_size, atol=atol, rtol=rtol,
exact=exact)
if __name__ == "__main__":
import doctest
doctest.testmod()
| |
from rest_framework.decorators import api_view
from rest_framework.response import Response
from rest_framework.reverse import reverse
from rest_framework import serializers
from rest_framework import generics
from rest_framework import status
from rest_framework.generics import GenericAPIView
from core.models import *
from django.forms import widgets
from rest_framework import filters
from django.conf.urls import patterns, url
from rest_framework.exceptions import PermissionDenied as RestFrameworkPermissionDenied
from django.core.exceptions import PermissionDenied as DjangoPermissionDenied
from apibase import XOSRetrieveUpdateDestroyAPIView, XOSListCreateAPIView, XOSNotAuthenticated
if hasattr(serializers, "ReadOnlyField"):
# rest_framework 3.x
IdField = serializers.ReadOnlyField
else:
# rest_framework 2.x
IdField = serializers.Field
"""
Schema of the generator object:
all: Set of all Model objects
all_if(regex): Set of Model objects that match regex
Model object:
plural: English plural of object name
camel: CamelCase version of object name
refs: list of references to other Model objects
props: list of properties minus refs
TODO: Deal with subnets
"""
def get_REST_patterns():
return patterns('',
url(r'^xos/$', api_root),
{% for object in generator.all %}
url(r'xos/{{ object.rest_name }}/$', {{ object.camel }}List.as_view(), name='{{ object.singular }}-list'),
url(r'xos/{{ object.rest_name }}/(?P<pk>[a-zA-Z0-9\-]+)/$', {{ object.camel }}Detail.as_view(), name ='{{ object.singular }}-detail'),
{% endfor %}
)
@api_view(['GET'])
def api_root(request, format=None):
return Response({
{% for object in generator.all %}'{{ object.plural }}': reverse('{{ object }}-list', request=request, format=format),
{% endfor %}
})
# Based on serializers.py
class XOSModelSerializer(serializers.ModelSerializer):
def save_object(self, obj, **kwargs):
""" rest_framework can't deal with ManyToMany relations that have a
through table. In xos, most of the through tables we have
use defaults or blank fields, so there's no reason why we shouldn't
be able to save these objects.
So, let's strip out these m2m relations, and deal with them ourself.
"""
obj._complex_m2m_data={};
if getattr(obj, '_m2m_data', None):
for relatedObject in obj._meta.get_all_related_many_to_many_objects():
if (relatedObject.field.rel.through._meta.auto_created):
# These are non-trough ManyToMany relations and
# can be updated just fine
continue
fieldName = relatedObject.get_accessor_name()
if fieldName in obj._m2m_data.keys():
obj._complex_m2m_data[fieldName] = (relatedObject, obj._m2m_data[fieldName])
del obj._m2m_data[fieldName]
serializers.ModelSerializer.save_object(self, obj, **kwargs);
for (accessor, stuff) in obj._complex_m2m_data.items():
(relatedObject, data) = stuff
through = relatedObject.field.rel.through
local_fieldName = relatedObject.field.m2m_reverse_field_name()
remote_fieldName = relatedObject.field.m2m_field_name()
# get the current set of existing relations
existing = through.objects.filter(**{local_fieldName: obj});
data_ids = [item.id for item in data]
existing_ids = [getattr(item,remote_fieldName).id for item in existing]
#print "data_ids", data_ids
#print "existing_ids", existing_ids
# remove relations that are in 'existing' but not in 'data'
for item in list(existing):
if (getattr(item,remote_fieldName).id not in data_ids):
print "delete", getattr(item,remote_fieldName)
item.delete() #(purge=True)
# add relations that are in 'data' but not in 'existing'
for item in data:
if (item.id not in existing_ids):
#print "add", item
newModel = through(**{local_fieldName: obj, remote_fieldName: item})
newModel.save()
{% for object in generator.all %}
class {{ object.camel }}Serializer(serializers.HyperlinkedModelSerializer):
id = IdField()
{% for ref in object.refs %}
{% if ref.multi %}
{{ ref.plural }} = serializers.HyperlinkedRelatedField(many=True, read_only=True, view_name='{{ ref }}-detail')
{% else %}
{{ ref }} = serializers.HyperlinkedRelatedField(read_only=True, view_name='{{ ref }}-detail')
{% endif %}
{% endfor %}
humanReadableName = serializers.SerializerMethodField("getHumanReadableName")
validators = serializers.SerializerMethodField("getValidators")
def getHumanReadableName(self, obj):
return str(obj)
def getValidators(self, obj):
try:
return obj.getValidators()
except:
return None
class Meta:
model = {{ object.camel }}
fields = ('humanReadableName', 'validators', {% for prop in object.props %}'{{ prop }}',{% endfor %}{% for ref in object.refs %}{%if ref.multi %}'{{ ref.plural }}'{% else %}'{{ ref }}'{% endif %},{% endfor %})
class {{ object.camel }}IdSerializer(XOSModelSerializer):
id = IdField()
{% for ref in object.refs %}
{% if ref.multi %}
{{ ref.plural }} = serializers.PrimaryKeyRelatedField(many=True, queryset = {{ ref.camel }}.objects.all())
{% else %}
{{ ref }} = serializers.PrimaryKeyRelatedField( queryset = {{ ref.camel }}.objects.all())
{% endif %}
{% endfor %}
humanReadableName = serializers.SerializerMethodField("getHumanReadableName")
validators = serializers.SerializerMethodField("getValidators")
def getHumanReadableName(self, obj):
return str(obj)
def getValidators(self, obj):
try:
return obj.getValidators()
except:
return None
class Meta:
model = {{ object.camel }}
fields = ('humanReadableName', 'validators', {% for prop in object.props %}'{{ prop }}',{% endfor %}{% for ref in object.refs %}{%if ref.multi %}'{{ ref.plural }}'{% else %}'{{ ref }}'{% endif %},{% endfor %})
{% endfor %}
serializerLookUp = {
{% for object in generator.all %}
{{ object.camel }}: {{ object.camel }}Serializer,
{% endfor %}
None: None,
}
# Based on core/views/*.py
{% for object in generator.all %}
class {{ object.camel }}List(XOSListCreateAPIView):
queryset = {{ object.camel }}.objects.select_related().all()
serializer_class = {{ object.camel }}Serializer
id_serializer_class = {{ object.camel }}IdSerializer
filter_backends = (filters.DjangoFilterBackend,)
filter_fields = ({% for prop in object.props %}'{{ prop }}',{% endfor %}{% for ref in object.refs %}{%if ref.multi %}'{{ ref.plural }}'{% else %}'{{ ref }}'{% endif %},{% endfor %})
def get_serializer_class(self):
no_hyperlinks=False
if hasattr(self.request,"QUERY_PARAMS"):
no_hyperlinks = self.request.QUERY_PARAMS.get('no_hyperlinks', False)
if (no_hyperlinks):
return self.id_serializer_class
else:
return self.serializer_class
def get_queryset(self):
if (not self.request.user.is_authenticated()):
raise XOSNotAuthenticated()
return {{ object.camel }}.select_by_user(self.request.user)
class {{ object.camel }}Detail(XOSRetrieveUpdateDestroyAPIView):
queryset = {{ object.camel }}.objects.select_related().all()
serializer_class = {{ object.camel }}Serializer
id_serializer_class = {{ object.camel }}IdSerializer
def get_serializer_class(self):
no_hyperlinks=False
if hasattr(self.request,"QUERY_PARAMS"):
no_hyperlinks = self.request.QUERY_PARAMS.get('no_hyperlinks', False)
if (no_hyperlinks):
return self.id_serializer_class
else:
return self.serializer_class
def get_queryset(self):
if (not self.request.user.is_authenticated()):
raise XOSNotAuthenticated()
return {{ object.camel }}.select_by_user(self.request.user)
# update() is handled by XOSRetrieveUpdateDestroyAPIView
# destroy() is handled by XOSRetrieveUpdateDestroyAPIView
{% endfor %}
| |
import math, unittest, collections
from ghugh import util
from ghugh.ffann import *
from ghugh.backprop import *
class Layer(collections.Sequence):
def __init__(self, outputs):
super().__init__()
self._outputs = outputs
def __len__(self):
return len(self._outputs)
def __getitem__(self, index):
return self._outputs[index]
class Weighted(Layer):
def __init__(self, outputs, weights, bias):
if bias: super().__init__([1] + outputs)
else: super().__init__(outputs)
self._weights = weights
self.bias = lambda: bias
self._weightsAt = util.transposed(self._weights)
def weightsTo(self, index):
return self._weights[index]
def weightsAt(self, index):
return self._weightsAt[index]
class LayerMock(Weighted):
def __init__(self, outputs, weights, bias, df):
super().__init__(outputs, weights, bias)
self.dfunction = lambda: df
class OLayerMock(Layer):
def __init__(self, outputs, df):
super().__init__(outputs)
self.dfunction = lambda: df
class TestOutputDeltas(unittest.TestCase):
def setUp(self):
self.o = OLayerMock((1, 2), lambda y: y)
self.algo = Output(self.o)
def testDeltas(self):
error = self.algo.propagate((3, 6))
self.assertEqual(2.0+8.0, error)
deltas = list(self.algo.deltas())
self.assertEqual(2, len(deltas))
self.assertEqual(1*2, deltas[0])
self.assertEqual(2*4, deltas[1])
class WeightedLayerAlgo(object):
def init(self, bias):
self.bias = bias
self.ws = [[4, 5, 6],
[7, 8, 9]]
if self.bias:
self.ws[0].insert(0, 31)
self.ws[1].insert(0, 23)
self.deltaWs = [[0.0]*len(self.ws[0]) for _ in range(len(self.ws))]
self.oldDeltaWs = [[0.0]*len(self.ws[0])
for _ in range(len(self.ws[0]))]
self.i = [1, 2, 3]
def updateAlgo(self, odelta1, odelta2, LR, M):
self.algo.update([odelta1, odelta2], LR, M)
# deltaW[j][i] = LR*o[i]*odeltas[j] + M*oldDeltaWij
s = self.bias and 1 or 0
self.deltaWs = [[LR*self.i[0]*odelta1 + M*self.oldDeltaWs[s][0],
LR*self.i[0]*odelta2 + M*self.oldDeltaWs[s][1]],
[LR*self.i[1]*odelta1 + M*self.oldDeltaWs[s+1][0],
LR*self.i[1]*odelta2 + M*self.oldDeltaWs[s+1][1]],
[LR*self.i[2]*odelta1 + M*self.oldDeltaWs[s+2][0],
LR*self.i[2]*odelta2 + M*self.oldDeltaWs[s+2][1]]]
if self.bias:
self.deltaWs.insert(0, [LR*1*odelta1 + M*self.oldDeltaWs[0][0],
LR*1*odelta2 + M*self.oldDeltaWs[0][1]])
# w[j][i] = w[j][i] + deltaW[i][j]
for j,dws in enumerate(self.deltaWs):
for i,dw in enumerate(dws):
self.ws[i][j] += dw
def testLayerWeights(self):
self.updateAlgo(11, 12, 100, 1000)
actual = self.h._weights
self.assertEqual(self.ws, actual)
def testAlgoDeltaWeights(self):
self.updateAlgo(11, 12, 100, 1000)
oldDeltas = self.deltaWs
self.assertEqual(oldDeltas, self.algo._oldWDeltas)
def testLayerWeightsTwoPass(self):
self.updateAlgo(11, 12, 100, 1000)
self.oldDeltaWs = [list(i) for i in self.deltaWs]
self.updateAlgo(-13, -14, 100, 1000)
self.assertEqual(self.ws, self.h._weights)
def testAlgoDeltaWeightsTwoPass(self):
self.updateAlgo(11, 12, 100, 1000)
self.oldDeltaWs = [list(i) for i in self.deltaWs]
self.updateAlgo(-13, -14, 100, 1000)
oldDeltas = [list(i) for i in self.deltaWs]
self.assertEqual(oldDeltas, self.algo._oldWDeltas)
class HiddenAlgo(WeightedLayerAlgo):
def init(self, bias):
super().init(bias)
self.h = LayerMock(self.i, [list(i) for i in self.ws],
self.bias, lambda y: y)
self.algo = Hidden(self.h, 2, False)
class TestHiddenAlgoOnlineNoBias(HiddenAlgo, unittest.TestCase):
def setUp(self):
super().init(False)
def deltas(self, odelta1, odelta2):
# delta[i] = sumOf(odelta[j]*ws[j][i]*df(o[i]) j=0..J)
s = self.bias and 1 or 0
deltas = [odelta1*self.ws[0][s]*self.i[0] + \
odelta2*self.ws[1][s]*self.i[0],
odelta1*self.ws[0][s+1]*self.i[1] + \
odelta2*self.ws[1][s+1]*self.i[1],
odelta1*self.ws[0][s+2]*self.i[2] + \
odelta2*self.ws[1][s+2]*self.i[2]]
return deltas
def testDeltas(self):
expected = self.deltas(11, 12)
self.algo.update([11, 12], 1, 1)
actual = list(self.algo.deltas())
self.assertEqual(expected, actual)
class TestHiddenAlgoOnlineBias(TestHiddenAlgoOnlineNoBias):
def setUp(self):
self.init(True)
class InputAlgo(WeightedLayerAlgo):
def init(self, bias):
super().init(bias)
self.h = LayerMock(self.i, [list(i) for i in self.ws],
self.bias, lambda y: y)
self.algo = Input(self.h, 2, False)
class TestInputAlgoOnlineNoBias(InputAlgo, unittest.TestCase):
def setUp(self):
self.init(False)
class TestInputAlgoOnlineBias(TestInputAlgoOnlineNoBias):
def setUp(self):
self.init(True)
class WeightedAlgoBatch(object):
def init(self, bias):
self.bias = bias
self.i = [1, 2, 3]
self.ws = [[4, 5, 6],
[7, 8, 9]]
if self.bias:
self.ws[0].insert(0, 31)
self.ws[1].insert(0, 23)
self.deltaWs = [[0.0]*len(self.ws) for _ in range(len(self.ws[0]))]
self.accuDeltaWs = \
[[0.0]*len(self.ws) for _ in range(len(self.ws[0]))]
self.oldDeltaWs = \
[[0.0]*len(self.ws) for _ in range(len(self.ws[0]))]
def updateAlgo(self, odelta1, odelta2, LR, M):
self.algo.update([odelta1, odelta2], LR, M)
# deltaW[j][i] = o[i]*odeltas[j] + M*oldDeltaWij
s = self.bias and 1 or 0
self.deltaWs = [[self.i[0]*odelta1, self.i[0]*odelta2],
[self.i[1]*odelta1, self.i[1]*odelta2],
[self.i[2]*odelta1, self.i[2]*odelta2]]
if self.bias:
self.deltaWs.insert(0, [1*odelta1, 1*odelta2])
def updateDeltaWs(self, deltaWs):
self.accuDeltaWs = [[o+n for o,n in zip(os,ns)]
for os,ns in zip(self.accuDeltaWs, deltaWs)]
def updateWeights(self, deltaWs, LR, M):
# w[j][i] = w[j][i] + deltaWs[i][j]
# deltaWs[i][j] = LR*deltaWs[i][j] + M*oldDeltaWs[i][j]
for j,dws in enumerate(deltaWs):
for i,dw in enumerate(dws):
dw = LR*dw + M*self.oldDeltaWs[j][i]
self.ws[i][j] += dw
dws[i] = 0.0
self.oldDeltaWs[j][i] = dw
def testAccuDeltaWsOnePass(self):
self.updateAlgo(17, 23, 100, 1000)
self.updateDeltaWs(self.deltaWs)
expected = self.deltaWs
actual = self.algo._wDeltas
self.assertEqual(expected, actual)
def testAccuDeltaWsTwoPass(self):
self.testAccuDeltaWsOnePass()
self.updateAlgo(31, 37, 100, 1000)
self.updateDeltaWs(self.deltaWs)
expected = self.accuDeltaWs
actual = self.algo._wDeltas
self.assertEqual(expected, actual)
def testAccuDeltaWsThreePass(self):
self.testAccuDeltaWsTwoPass()
self.updateAlgo(41, 43, 100, 1000)
self.updateDeltaWs(self.deltaWs)
expected = self.accuDeltaWs
actual = self.algo._wDeltas
self.assertEqual(expected, actual)
def testLayerWeights(self):
self.testAccuDeltaWsThreePass()
self.algo.updateWeights(100, 1000)
self.updateWeights(self.accuDeltaWs, 100, 1000)
expected = self.ws
actual = self.h._weights
self.assertEqual(expected, actual)
def testOldDeltasOneEpoch(self):
self.testLayerWeights()
expected = self.oldDeltaWs
actual = self.algo._oldWDeltas
self.assertEqual(expected, actual)
def deltaTest(self, od1, od2):
pass
def test2ndEpoch(self):
self.testOldDeltasOneEpoch()
for odeltas in ((47, 49), (53, 59), (61, 67)):
self.updateAlgo(odeltas[0], odeltas[1], 1, 1)
self.deltaTest(odeltas[0], odeltas[1])
self.updateDeltaWs(self.deltaWs)
expected = self.accuDeltaWs
actual = self.algo._wDeltas
self.assertEqual(expected, actual)
self.algo.updateWeights(100, 1000)
self.updateWeights(self.accuDeltaWs, 100, 1000)
expected = self.ws
actual = self.h._weights
self.assertEqual(expected, actual)
expected = self.oldDeltaWs
actual = self.algo._oldWDeltas
self.assertEqual(expected, actual)
class HiddenAlgoBatch(WeightedAlgoBatch):
def init(self, bias):
super().init(bias)
self.h = LayerMock(self.i, [list(i) for i in self.ws],
self.bias, lambda y: y)
self.algo = Hidden(self.h, 2, True)
def deltas(self, odelta1, odelta2):
s = self.bias and 1 or 0
deltas = [odelta1*self.ws[0][s]*self.i[0] + \
odelta2*self.ws[1][s]*self.i[0],
odelta1*self.ws[0][s+1]*self.i[1] + \
odelta2*self.ws[1][s+1]*self.i[1],
odelta1*self.ws[0][s+2]*self.i[2] + \
odelta2*self.ws[1][s+2]*self.i[2]]
return deltas
def deltaTest(self, od1, od2):
expected = self.deltas(od1, od2)
actual = self.algo.deltas()
self.assertEqual(expected, actual)
def testDeltas(self):
self.algo.update([17, 23], 1, 1)
self.deltaTest(17, 23)
class TestHiddenAlgoBatchNoBias(HiddenAlgoBatch, unittest.TestCase):
def setUp(self):
super().init(False)
class TestHiddenAlgoBatchBias(TestHiddenAlgoBatchNoBias):
def setUp(self):
self.init(True)
class InputAlgoBatch(WeightedAlgoBatch):
def init(self, bias):
super().init(bias)
self.h = LayerMock(self.i, [list(i) for i in self.ws],
self.bias, lambda y: y)
self.algo = Input(self.h, 2, True)
class TestInputAlgoBatchNoBias(InputAlgoBatch, unittest.TestCase):
def setUp(self):
super().init(False)
class TestInputAlgoBatchBias(TestInputAlgoBatchNoBias):
def setUp(self):
self.init(True)
class BackpropAlgo(object):
def init(self, bias, batch):
self.i = InputLayer(2, 3, bias=bias)
self.h1 = HiddenLayer(3, 4, bias=bias)
self.h2 = HiddenLayer(4, 1, bias=bias)
self.o = OutputLayer(1)
self.net = Net(self.i, self.h1, self.h2, self.o)
self.dataset = [[[1, 1], [0]],
[[1, 0], [1]],
[[0, 1], [1]],
[[0, 0], [0]]]
self.algo = Backpropagation(self.net, batch=batch)
class TestBackpropagation(BackpropAlgo):
def init(self, bias, batch):
super().init(bias, batch)
def test0(self):
self.algo.train(self.dataset, 0.1, 0.2)
class TestBackpropagationOnlineNoBias(TestBackpropagation,
unittest.TestCase):
def setUp(self):
super().init(None, False)
class TestBackpropagationOnlineBias(TestBackpropagation,
unittest.TestCase):
def setUp(self):
super().init(1, False)
class TestBackpropagationBatchNoBias(TestBackpropagation,
unittest.TestCase):
def setUp(self):
super().init(None, True)
class TestBackpropagationBatchBias(TestBackpropagation,
unittest.TestCase):
def setUp(self):
super().init(1, True)
if __name__ == "__main__":
unittest.main()
| |
#!/usr/bin/env python
import cv2
import numpy as np
import rospy
import tf # not tensorflow - local library
import yaml
from cv_bridge import CvBridge
from geometry_msgs.msg import PointStamped, Pose, PoseStamped
from image_geometry import PinholeCameraModel
from sensor_msgs.msg import CameraInfo, Image
from std_msgs.msg import Int32
from tf.transformations import euler_from_quaternion
from light_classification.tl_classifier import TLClassifier
from styx_msgs.msg import TrafficLight, TrafficLightArray
from styx_msgs.msg import Lane
from sensor_msgs.msg import CameraInfo
from random import randint
from scipy.misc import imsave
import os
import time
STATE_COUNT_THRESHOLD = 3
def distance2(a, b):
r'''Return the squared distance between `a` and `b`.
If `a` is a `n X d` matrix and `b` a `d`-dimensional array, return a
1-D array of distances between each `a[i]` and `b`.
'''
d = (a - b) ** 2.0
if len(d.shape) > 1:
return np.sum(d, axis=1).flat
else:
return np.sum(d)
def distance(a, b):
r'''Return the Euclidean distance between `a` and `b`.
If `a` is a `n X d` matrix and `b` a `d`-dimensional array, return a
1-D array of distances between each `a[i]` and `b`.
'''
return np.sqrt(distance2(a, b))
def closest(P, p):
r'''Given a `n X d` matrix `P` and a d-dimensional point `p`, return
the index `i` such that `P[i]` is the point closest to `p` in `P`.
'''
return np.argmin(distance2(P, p))
def save_training_data(image, label):
r'''giving the whole camera image, the portion containing the traffic
lights and the current light state, saving the image to files for later
use
'''
if label == 0 and randint(0, 9) < 7:
return
path_prefix="/home/michael/tl_test2/"
path = path_prefix + str(label)
current_millis = time.time()
image_name = path + "/" + str(current_millis) + ".jpg"
image_name2 = path + "/" + str(current_millis) + "sci" + ".jpg"
cv2.imwrite(image_name, image)
imsave(image_name2, image)
class TLDetector(object):
def __init__(self):
rospy.init_node('tl_detector')
self.camera = PinholeCameraModel()
self.camera.fromCameraInfo(self.load_camera_info())
self.camera_image = None
self.pose = None
self.stop_indexes = None
self.traffic_lights = None
# used to get traffic lights state ground truth from /vehicle/traffic_lights
# topic, this is used to gather training data for traffic lights classifier
self.traffic_lights_state = None
self.waypoints = None
self.previous_light_state = TrafficLight.UNKNOWN
self.bridge = CvBridge()
self.light_classifier = TLClassifier()
self.listener = tf.TransformListener()
self.last_state = TrafficLight.UNKNOWN
self.state = TrafficLight.UNKNOWN
self.state_count = 0
self.last_wp = -1
config = yaml.load(rospy.get_param("/traffic_light_config"))
self.stop_lines = np.array(config['stop_line_positions'])
self.subscribers = [
rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb, queue_size=1),
rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb, queue_size=1),
rospy.Subscriber('/image_color', Image, self.image_cb, queue_size=1),
rospy.Subscriber('/vehicle/traffic_lights', TrafficLightArray, self.traffic_cb, queue_size=1)
]
self.upcoming_red_light_pub = rospy.Publisher('/traffic_waypoint', Int32, queue_size=1)
self.image_zoomed = rospy.Publisher('/image_zoomed', Image, queue_size=1)
# keeps python from exiting until node is stopped
rospy.spin()
def load_camera_info(self):
calib_yaml_fname = "./calibration_simulator.yaml"
calib_data = None
with open(calib_yaml_fname) as f:
calib_data = yaml.load(f.read())
camera_info_msg = CameraInfo()
camera_info_msg.width = calib_data["image_width"]
camera_info_msg.height = calib_data["image_height"]
camera_info_msg.K = calib_data["camera_matrix"]["data"]
camera_info_msg.D = calib_data["distortion_coefficients"]["data"]
camera_info_msg.R = calib_data["rectification_matrix"]["data"]
camera_info_msg.P = calib_data["projection_matrix"]["data"]
camera_info_msg.distortion_model = calib_data["distortion_model"]
return camera_info_msg
def pose_cb(self, msg):
self.pose = msg
def waypoints_cb(self, waypoints):
if self.waypoints is not None:
return
self.waypoints = np.array([[w.pose.pose.position.x, w.pose.pose.position.y] for w in waypoints.waypoints])
self.stop_indexes = np.array([closest(self.waypoints, light) for light in self.stop_lines])
def image_cb(self, msg):
r'''Identifies red lights in the incoming camera image and publishes the index
of the waypoint closest to the red light to /traffic_waypoint
Args:
msg (Image): image from car-mounted camera
'''
self.camera_image = msg
light_wp, state = self.process_traffic_lights()
'''
Publish upcoming red lights at camera frequency.
Each predicted state has to occur `STATE_COUNT_THRESHOLD` number
of times till we start using it. Otherwise the previous stable state is
used.
'''
if self.state != state:
self.state_count = 0
self.state = state
elif self.state_count >= STATE_COUNT_THRESHOLD:
self.last_state = self.state
if (
state == TrafficLight.GREEN
or (
state == TrafficLight.YELLOW
and self.state_count < 2 * STATE_COUNT_THRESHOLD
)
):
light_wp = -1
self.last_wp = light_wp
self.upcoming_red_light_pub.publish(Int32(light_wp))
else:
self.upcoming_red_light_pub.publish(Int32(self.last_wp))
self.state_count += 1
def traffic_cb(self, msg):
def pose(light):
position = light.pose.pose.position
return [position.x, position.y, position.z]
self.traffic_lights = np.array([pose(light) for light in msg.lights])
## used to gathher training data for traffic light classifier, will not work on
# the actual calar self driving car
self.traffic_lights_state = np.array([light.state for light in msg.lights])
def get_closest_waypoint(self, pose):
r'''Identifies the closest path waypoint to the given position
https://en.wikipedia.org/wiki/Closest_pair_of_points_problem
Args:
pose (Pose): position to match a waypoint to
Returns:
int: index of the closest waypoint in self.waypoints
'''
p = np.array([pose.position.x, pose.position.y])
return closest(self.waypoints, p)
def project_to_image_plane(self, point3d):
r'''Project point from 3D world coordinates to 2D camera image location
Args:
point3d (Point): 3D location of a point in the world
Returns:
x (int): x coordinate of target point in image
y (int): y coordinate of target point in image
'''
stamp = self.camera_image.header.stamp
p_world = PointStamped()
p_world.header.seq = self.camera_image.header.seq
p_world.header.stamp = stamp
p_world.header.frame_id = '/world'
p_world.point.x = point3d[0]
p_world.point.y = point3d[1]
p_world.point.z = point3d[2]
# Transform point from world to camera frame.
self.listener.waitForTransform('/base_link', '/world', stamp, rospy.Duration(1.0))
p_camera = self.listener.transformPoint('/base_link', p_world)
# The navigation frame has X pointing forward, Y left and Z up, whereas the
# vision frame has X pointing right, Y down and Z forward; hence the need to
# reassign axes here.
x = -p_camera.point.y
y = -p_camera.point.z
z = p_camera.point.x
return self.camera.project3dToPixel((x, y, z))
def get_light_state(self, light_index):
r'''Determines the current color of the traffic light
Args:
light (TrafficLight): light to classify
Returns:
int: ID of traffic light color (specified in styx_msgs/TrafficLight)
'''
light = self.traffic_lights[light_index]
# state = self.traffic_lights_state[light_index]
if self.camera_image is None:
self.prev_light_loc = None
return TrafficLight.UNKNOWN
cv_image = self.bridge.imgmsg_to_cv2(self.camera_image, "bgr8")
resized = cv2.resize(cv_image, (400, 300))
# save_training_data(resized, self.traffic_lights_state[light_index])
state = self.light_classifier.get_classification(resized)
return state
def process_traffic_lights(self):
r'''Finds closest visible traffic light, if one exists, and determines its
location and color
Returns:
int: index of waypoint closes to the upcoming stop line for a traffic light (-1 if none exists)
int: ID of traffic light color (specified in styx_msgs/TrafficLight)
'''
# Entire function has changed? what exacty is going on here?
# is it failing to classify all topics, or is it not listening at all?
# Don't process if there are no waypoints or current position.
if any(v is None for v in [self.waypoints, self.stop_indexes, self.pose]):
return (-1, TrafficLight.UNKNOWN)
# Get car's position.
i_car = self.get_closest_waypoint(self.pose.pose)
p_car = self.waypoints[i_car]
# Get the closest stop line's index.
j_stop = closest(self.stop_lines, p_car)
i_stop = self.stop_indexes[j_stop]
# If the car is ahead of the closest stop line, get the next one.
if i_car > i_stop:
j_stop = (j_stop + 1) % len(self.stop_indexes)
i_stop = self.stop_indexes[j_stop]
# Don't process if the closest stop line is too far.
if distance(p_car, self.stop_lines[j_stop]) > 70.0:
if self.previous_light_state != TrafficLight.UNKNOWN:
rospy.logwarn("light state is %s", TrafficLight.UNKNOWN)
self.previous_light_state = TrafficLight.UNKNOWN
return (-1, TrafficLight.UNKNOWN)
# Return the index and state of the traffic light.
state = self.get_light_state(j_stop)
# self.save_classifier_training_data(j_stop)
if state != self.previous_light_state:
rospy.logwarn("light state is %s", state)
self.previous_light_state = state
return (i_stop, state)
if __name__ == '__main__':
try:
TLDetector()
except rospy.ROSInterruptException:
rospy.logerr('Could not start traffic node.')
| |
# Copyright 2022 The Brax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Soft Actor-Critic training.
See: https://arxiv.org/pdf/1812.05905.pdf
"""
import os
import time
from typing import Any, Callable, Dict, Mapping, Optional, Tuple
from absl import logging
from brax import envs
from brax.io import model
from brax.training import distribution
from brax.training import networks
from brax.training import normalization
from brax.training import pmap
from brax.training.types import Params
from brax.training.types import PRNGKey
import flax
from flax import linen
import jax
import jax.numpy as jnp
import numpy as onp
import optax
Metrics = Mapping[str, jnp.ndarray]
@flax.struct.dataclass
class Transition:
"""Contains data for one environment step."""
o_tm1: jnp.ndarray
a_tm1: jnp.ndarray
r_t: jnp.ndarray
o_t: jnp.ndarray
d_t: jnp.ndarray # discount (1-done)
truncation_t: jnp.ndarray
# The rewarder allows to change the reward of before the learner trains.
RewarderState = Any
RewarderInit = Callable[[int, PRNGKey], RewarderState]
ComputeReward = Callable[[RewarderState, Transition, PRNGKey],
Tuple[RewarderState, jnp.ndarray, Metrics]]
Rewarder = Tuple[RewarderInit, ComputeReward]
@flax.struct.dataclass
class ReplayBuffer:
"""Contains data related to a replay buffer."""
data: jnp.ndarray
current_position: jnp.ndarray
current_size: jnp.ndarray
@flax.struct.dataclass
class TrainingState:
"""Contains training state for the learner."""
policy_optimizer_state: optax.OptState
policy_params: Params
q_optimizer_state: optax.OptState
q_params: Params
target_q_params: Params
key: PRNGKey
steps: jnp.ndarray
alpha_optimizer_state: optax.OptState
alpha_params: Params
normalizer_params: Params
# The is passed to the rewarder to update the reward.
rewarder_state: Any
def make_sac_networks(
param_size: int,
obs_size: int,
action_size: int,
hidden_layer_sizes: Tuple[int, ...] = (256, 256),
) -> Tuple[networks.FeedForwardModel, networks.FeedForwardModel]:
"""Creates a policy and a value networks for SAC."""
policy_module = networks.MLP(
layer_sizes=hidden_layer_sizes + (param_size,),
activation=linen.relu,
kernel_init=jax.nn.initializers.lecun_uniform())
class QModule(linen.Module):
"""Q Module."""
n_critics: int = 2
@linen.compact
def __call__(self, obs: jnp.ndarray, actions: jnp.ndarray):
hidden = jnp.concatenate([obs, actions], axis=-1)
res = []
for _ in range(self.n_critics):
q = networks.MLP(
layer_sizes=hidden_layer_sizes + (1,),
activation=linen.relu,
kernel_init=jax.nn.initializers.lecun_uniform())(
hidden)
res.append(q)
return jnp.concatenate(res, axis=-1)
q_module = QModule()
dummy_obs = jnp.zeros((1, obs_size))
dummy_action = jnp.zeros((1, action_size))
policy = networks.FeedForwardModel(
init=lambda key: policy_module.init(key, dummy_obs),
apply=policy_module.apply)
value = networks.FeedForwardModel(
init=lambda key: q_module.init(key, dummy_obs, dummy_action),
apply=q_module.apply)
return policy, value
def train(
environment_fn: Callable[..., envs.Env],
num_timesteps,
episode_length: int,
action_repeat: int = 1,
num_envs: int = 1,
num_eval_envs: int = 128,
learning_rate: float = 1e-4,
discounting: float = 0.9,
seed: int = 0,
batch_size: int = 256,
log_frequency: int = 10000,
normalize_observations: bool = False,
max_devices_per_host: Optional[int] = None,
reward_scaling: float = 1.,
tau: float = 0.005,
min_replay_size: int = 8192,
max_replay_size: int = 1048576,
grad_updates_per_step: float = 1,
progress_fn: Optional[Callable[[int, Dict[str, Any]], None]] = None,
# The rewarder is an init function and a compute_reward function.
# It is used to change the reward before the learner trains on it.
make_rewarder: Optional[Callable[[], Rewarder]] = None,
checkpoint_dir: Optional[str] = None):
"""SAC training."""
assert min_replay_size % num_envs == 0
assert max_replay_size % min_replay_size == 0
# jax.config.update('jax_log_compiles', True)
process_count = jax.process_count()
process_id = jax.process_index()
local_device_count = jax.local_device_count()
local_devices_to_use = local_device_count
if max_devices_per_host:
local_devices_to_use = min(local_devices_to_use, max_devices_per_host)
logging.info(
'Device count: %d, process count: %d (id %d), local device count: %d, '
'devices to be used count: %d', jax.device_count(), process_count,
process_id, local_device_count, local_devices_to_use)
assert max_replay_size % local_devices_to_use == 0
assert min_replay_size % local_devices_to_use == 0
assert num_envs % local_devices_to_use == 0
max_replay_size = max_replay_size // local_devices_to_use
min_replay_size = min_replay_size // local_devices_to_use
key = jax.random.PRNGKey(seed)
global_key, local_key = jax.random.split(key)
del key
local_key = jax.random.fold_in(local_key, process_id)
key_models, key_rewarder = jax.random.split(global_key, 2)
local_key, key_env, key_eval = jax.random.split(local_key, 3)
core_env = environment_fn(
action_repeat=action_repeat,
batch_size=num_envs // local_devices_to_use // process_count,
episode_length=episode_length)
key_envs = jax.random.split(key_env, local_devices_to_use)
step_fn = jax.jit(core_env.step)
reset_fn = jax.jit(jax.vmap(core_env.reset))
first_state = reset_fn(key_envs)
eval_env = environment_fn(
action_repeat=action_repeat,
batch_size=num_eval_envs,
episode_length=episode_length, eval_metrics=True)
eval_step_fn = jax.jit(eval_env.step)
eval_first_state = jax.jit(eval_env.reset)(key_eval)
parametric_action_distribution = distribution.NormalTanhDistribution(
event_size=core_env.action_size)
_, obs_size = eval_first_state.obs.shape
policy_model, value_model = make_sac_networks(
parametric_action_distribution.param_size, obs_size, core_env.action_size)
log_alpha = jnp.asarray(0., dtype=jnp.float32)
alpha_optimizer = optax.adam(learning_rate=3e-4)
alpha_optimizer_state = alpha_optimizer.init(log_alpha)
policy_optimizer = optax.adam(learning_rate=learning_rate)
q_optimizer = optax.adam(learning_rate=learning_rate)
key_policy, key_q = jax.random.split(key_models)
policy_params = policy_model.init(key_policy)
policy_optimizer_state = policy_optimizer.init(policy_params)
q_params = value_model.init(key_q)
q_optimizer_state = q_optimizer.init(q_params)
policy_optimizer_state, policy_params = pmap.bcast_local_devices(
(policy_optimizer_state, policy_params), local_devices_to_use)
q_optimizer_state, q_params = pmap.bcast_local_devices(
(q_optimizer_state, q_params), local_devices_to_use)
alpha_optimizer_state, log_alpha = pmap.bcast_local_devices(
(alpha_optimizer_state, log_alpha), local_devices_to_use)
normalizer_params, obs_normalizer_update_fn, obs_normalizer_apply_fn = (
normalization.create_observation_normalizer(
obs_size,
normalize_observations,
pmap_to_devices=local_devices_to_use))
if make_rewarder is not None:
init, compute_reward = make_rewarder()
rewarder_state = init(obs_size, key_rewarder)
rewarder_state = pmap.bcast_local_devices(rewarder_state,
local_devices_to_use)
else:
rewarder_state = None
compute_reward = None
key_debug = jax.random.PRNGKey(seed + 666)
# EVAL
def do_one_step_eval(carry, unused_target_t):
state, policy_params, normalizer_params, key = carry
key, key_sample = jax.random.split(key)
obs = obs_normalizer_apply_fn(normalizer_params, state.obs)
logits = policy_model.apply(policy_params, obs)
actions = parametric_action_distribution.sample(logits, key_sample)
nstate = eval_step_fn(state, actions)
return (nstate, policy_params, normalizer_params, key), ()
@jax.jit
def run_eval(state, key, policy_params,
normalizer_params) -> Tuple[envs.State, PRNGKey]:
policy_params, normalizer_params = jax.tree_map(
lambda x: x[0], (policy_params, normalizer_params))
(state, _, _, key), _ = jax.lax.scan(
do_one_step_eval, (state, policy_params, normalizer_params, key), (),
length=episode_length // action_repeat)
return state, key
# SAC
target_entropy = -0.5 * core_env.action_size
def alpha_loss(log_alpha: jnp.ndarray, policy_params: Params,
transitions: Transition, key: PRNGKey) -> jnp.ndarray:
"""Eq 18 from https://arxiv.org/pdf/1812.05905.pdf."""
dist_params = policy_model.apply(policy_params, transitions.o_tm1)
action = parametric_action_distribution.sample_no_postprocessing(
dist_params, key)
log_prob = parametric_action_distribution.log_prob(dist_params, action)
alpha = jnp.exp(log_alpha)
alpha_loss = alpha * jax.lax.stop_gradient(-log_prob - target_entropy)
return jnp.mean(alpha_loss)
def critic_loss(q_params: Params, policy_params: Params,
target_q_params: Params, alpha: jnp.ndarray,
transitions: Transition, key: PRNGKey) -> jnp.ndarray:
q_old_action = value_model.apply(q_params, transitions.o_tm1,
transitions.a_tm1)
next_dist_params = policy_model.apply(policy_params, transitions.o_t)
next_action = parametric_action_distribution.sample_no_postprocessing(
next_dist_params, key)
next_log_prob = parametric_action_distribution.log_prob(
next_dist_params, next_action)
next_action = parametric_action_distribution.postprocess(next_action)
next_q = value_model.apply(target_q_params, transitions.o_t, next_action)
next_v = jnp.min(next_q, axis=-1) - alpha * next_log_prob
target_q = jax.lax.stop_gradient(transitions.r_t * reward_scaling +
transitions.d_t * discounting * next_v)
q_error = q_old_action - jnp.expand_dims(target_q, -1)
# Better bootstrapping for truncated episodes.
q_error *= jnp.expand_dims(1 - transitions.truncation_t, -1)
q_loss = 0.5 * jnp.mean(jnp.square(q_error))
return q_loss
def actor_loss(policy_params: Params, q_params: Params, alpha: jnp.ndarray,
transitions: Transition, key: PRNGKey) -> jnp.ndarray:
dist_params = policy_model.apply(policy_params, transitions.o_tm1)
action = parametric_action_distribution.sample_no_postprocessing(
dist_params, key)
log_prob = parametric_action_distribution.log_prob(dist_params, action)
action = parametric_action_distribution.postprocess(action)
q_action = value_model.apply(q_params, transitions.o_tm1, action)
min_q = jnp.min(q_action, axis=-1)
actor_loss = alpha * log_prob - min_q
return jnp.mean(actor_loss)
alpha_grad = jax.jit(jax.value_and_grad(alpha_loss))
critic_grad = jax.jit(jax.value_and_grad(critic_loss))
actor_grad = jax.jit(jax.value_and_grad(actor_loss))
@jax.jit
def update_step(
state: TrainingState,
transitions: jnp.ndarray,
) -> Tuple[TrainingState, Dict[str, jnp.ndarray]]:
normalized_transitions = Transition(
o_tm1=obs_normalizer_apply_fn(state.normalizer_params,
transitions[:, :obs_size]),
o_t=obs_normalizer_apply_fn(state.normalizer_params,
transitions[:, obs_size:2 * obs_size]),
a_tm1=transitions[:, 2 * obs_size:2 * obs_size + core_env.action_size],
r_t=transitions[:, -3],
d_t=transitions[:, -2],
truncation_t=transitions[:, -1])
(key, key_alpha, key_critic, key_actor,
key_rewarder) = jax.random.split(state.key, 5)
if compute_reward is not None:
new_rewarder_state, rewards, rewarder_metrics = compute_reward(
state.rewarder_state, normalized_transitions, key_rewarder)
# Assertion prevents building errors.
assert hasattr(normalized_transitions, 'replace')
normalized_transitions = normalized_transitions.replace(r_t=rewards)
else:
new_rewarder_state = state.rewarder_state
rewarder_metrics = {}
alpha_loss, alpha_grads = alpha_grad(state.alpha_params,
state.policy_params,
normalized_transitions, key_alpha)
alpha = jnp.exp(state.alpha_params)
critic_loss, critic_grads = critic_grad(state.q_params, state.policy_params,
state.target_q_params, alpha,
normalized_transitions, key_critic)
actor_loss, actor_grads = actor_grad(state.policy_params, state.q_params,
alpha, normalized_transitions,
key_actor)
alpha_grads = jax.lax.pmean(alpha_grads, axis_name='i')
critic_grads = jax.lax.pmean(critic_grads, axis_name='i')
actor_grads = jax.lax.pmean(actor_grads, axis_name='i')
policy_params_update, policy_optimizer_state = policy_optimizer.update(
actor_grads, state.policy_optimizer_state)
policy_params = optax.apply_updates(state.policy_params,
policy_params_update)
q_params_update, q_optimizer_state = q_optimizer.update(
critic_grads, state.q_optimizer_state)
q_params = optax.apply_updates(state.q_params, q_params_update)
alpha_params_update, alpha_optimizer_state = alpha_optimizer.update(
alpha_grads, state.alpha_optimizer_state)
alpha_params = optax.apply_updates(state.alpha_params, alpha_params_update)
new_target_q_params = jax.tree_multimap(
lambda x, y: x * (1 - tau) + y * tau, state.target_q_params, q_params)
metrics = {
'critic_loss': critic_loss,
'actor_loss': actor_loss,
'alpha_loss': alpha_loss,
'alpha': jnp.exp(alpha_params),
**rewarder_metrics
}
new_state = TrainingState(
policy_optimizer_state=policy_optimizer_state,
policy_params=policy_params,
q_optimizer_state=q_optimizer_state,
q_params=q_params,
target_q_params=new_target_q_params,
key=key,
steps=state.steps + 1,
alpha_optimizer_state=alpha_optimizer_state,
alpha_params=alpha_params,
normalizer_params=state.normalizer_params,
rewarder_state=new_rewarder_state)
return new_state, metrics
def collect_data(training_state: TrainingState, state):
key, key_sample = jax.random.split(training_state.key)
normalized_obs = obs_normalizer_apply_fn(training_state.normalizer_params,
state.obs)
logits = policy_model.apply(training_state.policy_params, normalized_obs)
actions = parametric_action_distribution.sample_no_postprocessing(
logits, key_sample)
postprocessed_actions = parametric_action_distribution.postprocess(actions)
nstate = step_fn(state, postprocessed_actions)
normalizer_params = obs_normalizer_update_fn(
training_state.normalizer_params, state.obs)
training_state = training_state.replace(
key=key, normalizer_params=normalizer_params)
# Concatenating data into a single data blob performs faster than 5
# separate tensors.
concatenated_data = jnp.concatenate([
state.obs,
nstate.obs,
postprocessed_actions,
jnp.expand_dims(nstate.reward, axis=-1),
jnp.expand_dims(1 - nstate.done, axis=-1),
jnp.expand_dims(nstate.info['truncation'], axis=-1),
],
axis=-1)
return training_state, nstate, concatenated_data
def collect_and_update_buffer(training_state, state, replay_buffer):
training_state, state, newdata = collect_data(training_state, state)
new_replay_data = jax.tree_multimap(
lambda x, y: jax.lax.dynamic_update_slice_in_dim(
x,
y,
replay_buffer.current_position,
axis=0),
replay_buffer.data,
newdata)
new_position = (replay_buffer.current_position +
num_envs // local_devices_to_use) % max_replay_size
new_size = jnp.minimum(
replay_buffer.current_size + num_envs // local_devices_to_use,
max_replay_size)
return training_state, state, ReplayBuffer(
data=new_replay_data,
current_position=new_position,
current_size=new_size)
def init_replay_buffer(training_state, state, replay_buffer):
(training_state, state, replay_buffer), _ = jax.lax.scan(
(lambda a, b: (collect_and_update_buffer(*a),
())), (training_state, state, replay_buffer), (),
length=min_replay_size // (num_envs // local_devices_to_use))
return training_state, state, replay_buffer
init_replay_buffer = jax.pmap(init_replay_buffer, axis_name='i')
num_updates = int(num_envs * grad_updates_per_step)
def sample_data(training_state, replay_buffer):
key1, key2 = jax.random.split(training_state.key)
idx = jax.random.randint(
key2, (batch_size * num_updates // local_devices_to_use,),
minval=0,
maxval=replay_buffer.current_size)
transitions = jnp.take(replay_buffer.data, idx, axis=0, mode='clip')
transitions = jnp.reshape(transitions,
[num_updates, -1] + list(transitions.shape[1:]))
training_state = training_state.replace(key=key1)
return training_state, transitions
def run_one_sac_epoch(carry, unused_t):
training_state, state, replay_buffer = carry
training_state, state, replay_buffer = collect_and_update_buffer(
training_state, state, replay_buffer)
training_state, transitions = sample_data(training_state, replay_buffer)
training_state, metrics = jax.lax.scan(
update_step, training_state, transitions, length=num_updates)
metrics['buffer_current_size'] = replay_buffer.current_size
metrics['buffer_current_position'] = replay_buffer.current_position
return (training_state, state, replay_buffer), metrics
def run_sac_training(training_state, state, replay_buffer):
synchro = pmap.is_replicated(
training_state.replace(key=jax.random.PRNGKey(0)), axis_name='i')
(training_state, state, replay_buffer), metrics = jax.lax.scan(
run_one_sac_epoch, (training_state, state, replay_buffer), (),
length=(log_frequency // action_repeat + num_envs - 1) // num_envs)
metrics = jax.tree_map(jnp.mean, metrics)
return training_state, state, replay_buffer, metrics, synchro
run_sac_training = jax.pmap(run_sac_training, axis_name='i')
training_state = TrainingState(
policy_optimizer_state=policy_optimizer_state,
policy_params=policy_params,
q_optimizer_state=q_optimizer_state,
q_params=q_params,
target_q_params=q_params,
key=jnp.stack(jax.random.split(local_key, local_devices_to_use)),
steps=jnp.zeros((local_devices_to_use,)),
alpha_optimizer_state=alpha_optimizer_state,
alpha_params=log_alpha,
normalizer_params=normalizer_params,
rewarder_state=rewarder_state)
training_walltime = 0
eval_walltime = 0
sps = 0
eval_sps = 0
training_metrics = {}
state = first_state
metrics = {}
while True:
current_step = int(training_state.normalizer_params[0][0]) * action_repeat
logging.info('step %s', current_step)
t = time.time()
if process_id == 0:
eval_state, key_debug = run_eval(eval_first_state, key_debug,
training_state.policy_params,
training_state.normalizer_params)
eval_metrics = eval_state.info['eval_metrics']
eval_metrics.completed_episodes.block_until_ready()
eval_walltime += time.time() - t
eval_sps = (
episode_length * eval_first_state.reward.shape[0] /
(time.time() - t))
avg_episode_length = (
eval_metrics.completed_episodes_steps /
eval_metrics.completed_episodes)
metrics = dict(
dict({
f'eval/episode_{name}': value / eval_metrics.completed_episodes
for name, value in eval_metrics.completed_episodes_metrics.items()
}),
**dict({
f'training/{name}': onp.mean(value)
for name, value in training_metrics.items()
}),
**dict({
'eval/completed_episodes': eval_metrics.completed_episodes,
'eval/avg_episode_length': avg_episode_length,
'speed/sps': sps,
'speed/eval_sps': eval_sps,
'speed/training_walltime': training_walltime,
'speed/eval_walltime': eval_walltime,
'training/grad_updates': training_state.steps[0],
}),
)
logging.info(metrics)
if progress_fn:
progress_fn(current_step, metrics)
if checkpoint_dir:
# Save current policy.
normalizer_params = jax.tree_map(lambda x: x[0],
training_state.normalizer_params)
policy_params = jax.tree_map(lambda x: x[0],
training_state.policy_params)
params = normalizer_params, policy_params
path = os.path.join(checkpoint_dir, f'sac_{current_step}.pkl')
model.save_params(path, params)
if current_step >= num_timesteps:
break
# Create an initialize the replay buffer.
if current_step == 0:
t = time.time()
replay_buffer = ReplayBuffer(
data=jnp.zeros((local_devices_to_use, max_replay_size,
obs_size * 2 + core_env.action_size + 1 + 1 + 1)),
current_size=jnp.zeros((local_devices_to_use,), dtype=jnp.int32),
current_position=jnp.zeros((local_devices_to_use,), dtype=jnp.int32))
training_state, state, replay_buffer = init_replay_buffer(
training_state, state, replay_buffer)
training_walltime += time.time() - t
t = time.time()
# optimization
training_state, state, replay_buffer, training_metrics, synchro = run_sac_training(
training_state, state, replay_buffer)
assert synchro[0], (current_step, training_state)
jax.tree_map(lambda x: x.block_until_ready(), training_metrics)
sps = ((training_state.normalizer_params[0][0] * action_repeat -
current_step) / (time.time() - t))
training_walltime += time.time() - t
normalizer_params = jax.tree_map(lambda x: x[0],
training_state.normalizer_params)
policy_params = jax.tree_map(lambda x: x[0], training_state.policy_params)
logging.info('total steps: %s', normalizer_params[0] * action_repeat)
inference = make_inference_fn(core_env.observation_size, core_env.action_size,
normalize_observations)
params = normalizer_params, policy_params
pmap.synchronize_hosts()
return (inference, params, metrics)
def make_inference_fn(observation_size, action_size, normalize_observations):
"""Creates params and inference function for the SAC agent."""
_, obs_normalizer_apply_fn = normalization.make_data_and_apply_fn(
observation_size, normalize_observations)
parametric_action_distribution = distribution.NormalTanhDistribution(
event_size=action_size)
policy_model, _ = make_sac_networks(parametric_action_distribution.param_size,
observation_size, action_size)
def inference_fn(params, obs, key):
normalizer_params, policy_params = params
obs = obs_normalizer_apply_fn(normalizer_params, obs)
action = parametric_action_distribution.sample(
policy_model.apply(policy_params, obs), key)
return action
return inference_fn
| |
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ConfigParser
import contextlib
import types
import urlparse
from tempest import config
from tempest import exceptions
import boto
import boto.ec2
import boto.s3.connection
CONF = config.CONF
class BotoClientBase(object):
ALLOWED_METHODS = set()
def __init__(self, username=None, password=None,
auth_url=None, tenant_name=None,
*args, **kwargs):
# FIXME(andreaf) replace credentials and auth_url with auth_provider
insecure_ssl = CONF.identity.disable_ssl_certificate_validation
self.connection_timeout = str(CONF.boto.http_socket_timeout)
self.num_retries = str(CONF.boto.num_retries)
self.build_timeout = CONF.boto.build_timeout
self.ks_cred = {"username": username,
"password": password,
"auth_url": auth_url,
"tenant_name": tenant_name,
"insecure": insecure_ssl}
def _keystone_aws_get(self):
# FIXME(andreaf) Move EC2 credentials to AuthProvider
import keystoneclient.v2_0.client
keystone = keystoneclient.v2_0.client.Client(**self.ks_cred)
ec2_cred_list = keystone.ec2.list(keystone.auth_user_id)
ec2_cred = None
for cred in ec2_cred_list:
if cred.tenant_id == keystone.auth_tenant_id:
ec2_cred = cred
break
else:
ec2_cred = keystone.ec2.create(keystone.auth_user_id,
keystone.auth_tenant_id)
if not all((ec2_cred, ec2_cred.access, ec2_cred.secret)):
raise exceptions.NotFound("Unable to get access and secret keys")
return ec2_cred
def _config_boto_timeout(self, timeout, retries):
try:
boto.config.add_section("Boto")
except ConfigParser.DuplicateSectionError:
pass
boto.config.set("Boto", "http_socket_timeout", timeout)
boto.config.set("Boto", "num_retries", retries)
def __getattr__(self, name):
"""Automatically creates methods for the allowed methods set."""
if name in self.ALLOWED_METHODS:
def func(self, *args, **kwargs):
with contextlib.closing(self.get_connection()) as conn:
return getattr(conn, name)(*args, **kwargs)
func.__name__ = name
setattr(self, name, types.MethodType(func, self, self.__class__))
setattr(self.__class__, name,
types.MethodType(func, None, self.__class__))
return getattr(self, name)
else:
raise AttributeError(name)
def get_connection(self):
self._config_boto_timeout(self.connection_timeout, self.num_retries)
if not all((self.connection_data["aws_access_key_id"],
self.connection_data["aws_secret_access_key"])):
if all([self.ks_cred.get('auth_url'),
self.ks_cred.get('username'),
self.ks_cred.get('tenant_name'),
self.ks_cred.get('password')]):
ec2_cred = self._keystone_aws_get()
self.connection_data["aws_access_key_id"] = \
ec2_cred.access
self.connection_data["aws_secret_access_key"] = \
ec2_cred.secret
else:
raise exceptions.InvalidConfiguration(
"Unable to get access and secret keys")
return self.connect_method(**self.connection_data)
class APIClientEC2(BotoClientBase):
def connect_method(self, *args, **kwargs):
return boto.connect_ec2(*args, **kwargs)
def __init__(self, *args, **kwargs):
super(APIClientEC2, self).__init__(*args, **kwargs)
insecure_ssl = CONF.identity.disable_ssl_certificate_validation
aws_access = CONF.boto.aws_access
aws_secret = CONF.boto.aws_secret
purl = urlparse.urlparse(CONF.boto.ec2_url)
region_name = CONF.compute.region
if not region_name:
region_name = CONF.identity.region
region = boto.ec2.regioninfo.RegionInfo(name=region_name,
endpoint=purl.hostname)
port = purl.port
if port is None:
if purl.scheme is not "https":
port = 80
else:
port = 443
else:
port = int(port)
self.connection_data = {"aws_access_key_id": aws_access,
"aws_secret_access_key": aws_secret,
"is_secure": purl.scheme == "https",
"validate_certs": not insecure_ssl,
"region": region,
"host": purl.hostname,
"port": port,
"path": purl.path}
ALLOWED_METHODS = set(('create_key_pair', 'get_key_pair',
'delete_key_pair', 'import_key_pair',
'get_all_key_pairs',
'get_all_tags',
'create_image', 'get_image',
'register_image', 'deregister_image',
'get_all_images', 'get_image_attribute',
'modify_image_attribute', 'reset_image_attribute',
'get_all_kernels',
'create_volume', 'delete_volume',
'get_all_volume_status', 'get_all_volumes',
'get_volume_attribute', 'modify_volume_attribute'
'bundle_instance', 'cancel_spot_instance_requests',
'confirm_product_instanc',
'get_all_instance_status', 'get_all_instances',
'get_all_reserved_instances',
'get_all_spot_instance_requests',
'get_instance_attribute', 'monitor_instance',
'monitor_instances', 'unmonitor_instance',
'unmonitor_instances',
'purchase_reserved_instance_offering',
'reboot_instances', 'request_spot_instances',
'reset_instance_attribute', 'run_instances',
'start_instances', 'stop_instances',
'terminate_instances',
'attach_network_interface', 'attach_volume',
'detach_network_interface', 'detach_volume',
'get_console_output',
'delete_network_interface', 'create_subnet',
'create_network_interface', 'delete_subnet',
'get_all_network_interfaces',
'allocate_address', 'associate_address',
'disassociate_address', 'get_all_addresses',
'release_address',
'create_snapshot', 'delete_snapshot',
'get_all_snapshots', 'get_snapshot_attribute',
'modify_snapshot_attribute',
'reset_snapshot_attribute', 'trim_snapshots',
'get_all_regions', 'get_all_zones',
'get_all_security_groups', 'create_security_group',
'delete_security_group', 'authorize_security_group',
'authorize_security_group_egress',
'revoke_security_group',
'revoke_security_group_egress'))
class ObjectClientS3(BotoClientBase):
def connect_method(self, *args, **kwargs):
return boto.connect_s3(*args, **kwargs)
def __init__(self, *args, **kwargs):
super(ObjectClientS3, self).__init__(*args, **kwargs)
insecure_ssl = CONF.identity.disable_ssl_certificate_validation
aws_access = CONF.boto.aws_access
aws_secret = CONF.boto.aws_secret
purl = urlparse.urlparse(CONF.boto.s3_url)
port = purl.port
if port is None:
if purl.scheme is not "https":
port = 80
else:
port = 443
else:
port = int(port)
self.connection_data = {"aws_access_key_id": aws_access,
"aws_secret_access_key": aws_secret,
"is_secure": purl.scheme == "https",
"validate_certs": not insecure_ssl,
"host": purl.hostname,
"port": port,
"calling_format": boto.s3.connection.
OrdinaryCallingFormat()}
ALLOWED_METHODS = set(('create_bucket', 'delete_bucket', 'generate_url',
'get_all_buckets', 'get_bucket', 'delete_key',
'lookup'))
| |
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""Interfaces under evaluation before upstreaming to nipype.interfaces.utility."""
import numpy as np
import re
import json
from collections import OrderedDict
from nipype.utils.filemanip import fname_presuffix
from nipype.interfaces.io import add_traits
from nipype.interfaces.base import (
BaseInterface,
BaseInterfaceInputSpec,
DynamicTraitedSpec,
File,
InputMultiObject,
isdefined,
SimpleInterface,
Str,
TraitedSpec,
traits,
)
class _KeySelectInputSpec(DynamicTraitedSpec):
key = Str(mandatory=True, desc="selective key")
keys = InputMultiObject(Str, mandatory=True, min=1, desc="index of keys")
class _KeySelectOutputSpec(DynamicTraitedSpec):
key = Str(desc="propagates selected key")
class KeySelect(BaseInterface):
"""
An interface that operates similarly to an OrderedDict.
>>> ks = KeySelect(keys=['MNI152NLin6Asym', 'MNI152Lin', 'fsaverage'],
... fields=['field1', 'field2', 'field3'])
>>> ks.inputs.field1 = ['fsl', 'mni', 'freesurfer']
>>> ks.inputs.field2 = ['volume', 'volume', 'surface']
>>> ks.inputs.field3 = [True, False, False]
>>> ks.inputs.key = 'MNI152Lin'
>>> ks.run().outputs
<BLANKLINE>
field1 = mni
field2 = volume
field3 = False
key = MNI152Lin
<BLANKLINE>
>>> ks = KeySelect(fields=['field1', 'field2', 'field3'])
>>> ks.inputs.keys=['MNI152NLin6Asym', 'MNI152Lin', 'fsaverage']
>>> ks.inputs.field1 = ['fsl', 'mni', 'freesurfer']
>>> ks.inputs.field2 = ['volume', 'volume', 'surface']
>>> ks.inputs.field3 = [True, False, False]
>>> ks.inputs.key = 'MNI152Lin'
>>> ks.run().outputs
<BLANKLINE>
field1 = mni
field2 = volume
field3 = False
key = MNI152Lin
<BLANKLINE>
>>> ks.inputs.field1 = ['fsl', 'mni', 'freesurfer']
>>> ks.inputs.field2 = ['volume', 'volume', 'surface']
>>> ks.inputs.field3 = [True, False, False]
>>> ks.inputs.key = 'fsaverage'
>>> ks.run().outputs
<BLANKLINE>
field1 = freesurfer
field2 = surface
field3 = False
key = fsaverage
<BLANKLINE>
>>> ks.inputs.field1 = ['fsl', 'mni', 'freesurfer']
>>> ks.inputs.field2 = ['volume', 'volume', 'surface']
>>> ks.inputs.field3 = [True, False] # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
ValueError: Trying to set an invalid value
>>> ks.inputs.key = 'MNINLin2009cAsym'
Traceback (most recent call last):
ValueError: Selected key "MNINLin2009cAsym" not found in the index
>>> ks = KeySelect(fields=['field1', 'field2', 'field3'])
>>> ks.inputs.keys=['MNI152NLin6Asym']
>>> ks.inputs.field1 = ['fsl']
>>> ks.inputs.field2 = ['volume']
>>> ks.inputs.field3 = [True]
>>> ks.inputs.key = 'MNI152NLin6Asym'
>>> ks.run().outputs
<BLANKLINE>
field1 = fsl
field2 = volume
field3 = True
key = MNI152NLin6Asym
<BLANKLINE>
"""
input_spec = _KeySelectInputSpec
output_spec = _KeySelectOutputSpec
def __init__(self, keys=None, fields=None, **inputs):
"""
Instantiate a KeySelect utility interface.
Examples
--------
>>> ks = KeySelect(fields='field1')
>>> ks.inputs
<BLANKLINE>
field1 = <undefined>
key = <undefined>
keys = <undefined>
<BLANKLINE>
>>> ks = KeySelect(fields='field1', field1=['a', 'b'])
>>> ks.inputs
<BLANKLINE>
field1 = ['a', 'b']
key = <undefined>
keys = <undefined>
<BLANKLINE>
>>> ks = KeySelect()
Traceback (most recent call last):
ValueError: A list or multiplexed...
>>> ks = KeySelect(fields='key')
Traceback (most recent call last):
ValueError: Some fields are invalid...
"""
# Call constructor
super(KeySelect, self).__init__(**inputs)
# Handle and initiate fields
if not fields:
raise ValueError(
"A list or multiplexed fields must be provided at "
"instantiation time."
)
if isinstance(fields, str):
fields = [fields]
_invalid = set(self.input_spec.class_editable_traits()).intersection(fields)
if _invalid:
raise ValueError("Some fields are invalid (%s)." % ", ".join(_invalid))
self._fields = fields
# Attach events
self.inputs.on_trait_change(self._check_len)
if keys:
self.inputs.keys = keys
# Add fields in self._fields
add_traits(self.inputs, self._fields)
for in_field in set(self._fields).intersection(inputs.keys()):
setattr(self.inputs, in_field, inputs[in_field])
def _check_len(self, name, new):
if name == "keys":
nitems = len(new)
if len(set(new)) != nitems:
raise ValueError(
"Found duplicated entries in the index of ordered keys"
)
if not isdefined(self.inputs.keys):
return
if name == "key" and new not in self.inputs.keys:
raise ValueError('Selected key "%s" not found in the index' % new)
if name in self._fields:
if isinstance(new, str) or len(new) < 1:
raise ValueError(
'Trying to set an invalid value (%s) for input "%s"' % (new, name)
)
if len(new) != len(self.inputs.keys):
raise ValueError(
'Length of value (%s) for input field "%s" does not match '
"the length of the indexing list." % (new, name)
)
def _run_interface(self, runtime):
return runtime
def _list_outputs(self):
index = self.inputs.keys.index(self.inputs.key)
outputs = {k: getattr(self.inputs, k)[index] for k in self._fields}
outputs["key"] = self.inputs.key
return outputs
def _outputs(self):
base = super(KeySelect, self)._outputs()
base = add_traits(base, self._fields)
return base
class _AddTSVHeaderInputSpec(BaseInterfaceInputSpec):
in_file = File(exists=True, mandatory=True, desc="input file")
columns = traits.List(traits.Str, mandatory=True, desc="header for columns")
class _AddTSVHeaderOutputSpec(TraitedSpec):
out_file = File(exists=True, desc="output average file")
class AddTSVHeader(SimpleInterface):
r"""Add a header row to a TSV file
Examples
--------
An example TSV:
>>> np.savetxt('data.tsv', np.arange(30).reshape((6, 5)), delimiter='\t')
Add headers:
>>> addheader = AddTSVHeader()
>>> addheader.inputs.in_file = 'data.tsv'
>>> addheader.inputs.columns = ['a', 'b', 'c', 'd', 'e']
>>> res = addheader.run()
>>> df = pd.read_csv(res.outputs.out_file, delim_whitespace=True,
... index_col=None)
>>> df.columns.ravel().tolist()
['a', 'b', 'c', 'd', 'e']
>>> np.all(df.values == np.arange(30).reshape((6, 5)))
True
"""
input_spec = _AddTSVHeaderInputSpec
output_spec = _AddTSVHeaderOutputSpec
def _run_interface(self, runtime):
out_file = fname_presuffix(
self.inputs.in_file,
suffix="_motion.tsv",
newpath=runtime.cwd,
use_ext=False,
)
data = np.loadtxt(self.inputs.in_file)
np.savetxt(
out_file,
data,
delimiter="\t",
header="\t".join(self.inputs.columns),
comments="",
)
self._results["out_file"] = out_file
return runtime
class _JoinTSVColumnsInputSpec(BaseInterfaceInputSpec):
in_file = File(exists=True, mandatory=True, desc="input file")
join_file = File(exists=True, mandatory=True, desc="file to be adjoined")
side = traits.Enum("right", "left", usedefault=True, desc="where to join")
columns = traits.List(traits.Str, desc="header for columns")
class _JoinTSVColumnsOutputSpec(TraitedSpec):
out_file = File(exists=True, desc="output TSV file")
class JoinTSVColumns(SimpleInterface):
r"""Add a header row to a TSV file
Examples
--------
An example TSV:
>>> data = np.arange(30).reshape((6, 5))
>>> np.savetxt('data.tsv', data[:, :3], delimiter='\t')
>>> np.savetxt('add.tsv', data[:, 3:], delimiter='\t')
Join without naming headers:
>>> join = JoinTSVColumns()
>>> join.inputs.in_file = 'data.tsv'
>>> join.inputs.join_file = 'add.tsv'
>>> res = join.run()
>>> df = pd.read_csv(res.outputs.out_file, delim_whitespace=True,
... index_col=None, dtype=float, header=None)
>>> df.columns.ravel().tolist() == list(range(5))
True
>>> np.all(df.values.astype(int) == data)
True
Adding column names:
>>> join = JoinTSVColumns()
>>> join.inputs.in_file = 'data.tsv'
>>> join.inputs.join_file = 'add.tsv'
>>> join.inputs.columns = ['a', 'b', 'c', 'd', 'e']
>>> res = join.run()
>>> res.outputs.out_file # doctest: +ELLIPSIS
'...data_joined.tsv'
>>> df = pd.read_csv(res.outputs.out_file, delim_whitespace=True,
... index_col=None)
>>> df.columns.ravel().tolist()
['a', 'b', 'c', 'd', 'e']
>>> np.all(df.values == np.arange(30).reshape((6, 5)))
True
>>> join = JoinTSVColumns()
>>> join.inputs.in_file = 'data.tsv'
>>> join.inputs.join_file = 'add.tsv'
>>> join.inputs.side = 'left'
>>> join.inputs.columns = ['a', 'b', 'c', 'd', 'e']
>>> res = join.run()
>>> df = pd.read_csv(res.outputs.out_file, delim_whitespace=True,
... index_col=None)
>>> df.columns.ravel().tolist()
['a', 'b', 'c', 'd', 'e']
>>> np.all(df.values == np.hstack((data[:, 3:], data[:, :3])))
True
"""
input_spec = _JoinTSVColumnsInputSpec
output_spec = _JoinTSVColumnsOutputSpec
def _run_interface(self, runtime):
out_file = fname_presuffix(
self.inputs.in_file,
suffix="_joined.tsv",
newpath=runtime.cwd,
use_ext=False,
)
header = ""
if isdefined(self.inputs.columns) and self.inputs.columns:
header = "\t".join(self.inputs.columns)
with open(self.inputs.in_file) as ifh:
data = ifh.read().splitlines(keepends=False)
with open(self.inputs.join_file) as ifh:
join = ifh.read().splitlines(keepends=False)
if len(data) != len(join):
raise ValueError("Number of columns in datasets do not match")
merged = []
for d, j in zip(data, join):
line = "%s\t%s" % ((j, d) if self.inputs.side == "left" else (d, j))
merged.append(line)
if header:
merged.insert(0, header)
with open(out_file, "w") as ofh:
ofh.write("\n".join(merged))
self._results["out_file"] = out_file
return runtime
class _DictMergeInputSpec(BaseInterfaceInputSpec):
in_dicts = traits.List(
traits.Either(traits.Dict, traits.Instance(OrderedDict)),
desc="Dictionaries to be merged. In the event of a collision, values "
"from dictionaries later in the list receive precedence.",
)
class _DictMergeOutputSpec(TraitedSpec):
out_dict = traits.Dict(desc="Merged dictionary")
class DictMerge(SimpleInterface):
"""Merge (ordered) dictionaries."""
input_spec = _DictMergeInputSpec
output_spec = _DictMergeOutputSpec
def _run_interface(self, runtime):
out_dict = {}
for in_dict in self.inputs.in_dicts:
out_dict.update(in_dict)
self._results["out_dict"] = out_dict
return runtime
class _TSV2JSONInputSpec(BaseInterfaceInputSpec):
in_file = File(exists=True, mandatory=True, desc="Input TSV file")
index_column = traits.Str(
mandatory=True,
desc="Name of the column in the TSV to be used "
"as the top-level key in the JSON. All "
"remaining columns will be assigned as "
"nested keys.",
)
output = traits.Either(
None,
File,
desc="Path where the output file is to be saved. "
"If this is `None`, then a JSON-compatible "
"dictionary is returned instead.",
)
additional_metadata = traits.Either(
None,
traits.Dict,
traits.Instance(OrderedDict),
usedefault=True,
desc="Any additional metadata that "
"should be applied to all "
"entries in the JSON.",
)
drop_columns = traits.Either(
None,
traits.List(),
usedefault=True,
desc="List of columns in the TSV to be " "dropped from the JSON.",
)
enforce_case = traits.Bool(
True,
usedefault=True,
desc="Enforce snake case for top-level keys " "and camel case for nested keys",
)
class _TSV2JSONOutputSpec(TraitedSpec):
output = traits.Either(
traits.Dict,
File(exists=True),
traits.Instance(OrderedDict),
desc="Output dictionary or JSON file",
)
class TSV2JSON(SimpleInterface):
"""Convert metadata from TSV format to JSON format."""
input_spec = _TSV2JSONInputSpec
output_spec = _TSV2JSONOutputSpec
def _run_interface(self, runtime):
if not isdefined(self.inputs.output):
output = fname_presuffix(
self.inputs.in_file, suffix=".json", newpath=runtime.cwd, use_ext=False
)
else:
output = self.inputs.output
self._results["output"] = _tsv2json(
in_tsv=self.inputs.in_file,
out_json=output,
index_column=self.inputs.index_column,
additional_metadata=self.inputs.additional_metadata,
drop_columns=self.inputs.drop_columns,
enforce_case=self.inputs.enforce_case,
)
return runtime
def _tsv2json(
in_tsv,
out_json,
index_column,
additional_metadata=None,
drop_columns=None,
enforce_case=True,
):
"""
Convert metadata from TSV format to JSON format.
Parameters
----------
in_tsv: str
Path to the metadata in TSV format.
out_json: str
Path where the metadata should be saved in JSON format after
conversion. If this is None, then a dictionary is returned instead.
index_column: str
Name of the column in the TSV to be used as an index (top-level key in
the JSON).
additional_metadata: dict
Any additional metadata that should be applied to all entries in the
JSON.
drop_columns: list
List of columns from the input TSV to be dropped from the JSON.
enforce_case: bool
Indicates whether BIDS case conventions should be followed. Currently,
this means that index fields (column names in the associated data TSV)
use snake case and other fields use camel case.
Returns
-------
str
Path to the metadata saved in JSON format.
"""
import pandas as pd
# Adapted from https://dev.to/rrampage/snake-case-to-camel-case-and- ...
# back-using-regular-expressions-and-python-m9j
re_to_camel = r"(.*?)_([a-zA-Z0-9])"
re_to_snake = r"(^.+?|.*?)((?<![_A-Z])[A-Z]|(?<![_0-9])[0-9]+)"
def snake(match):
return "{}_{}".format(match.group(1).lower(), match.group(2).lower())
def camel(match):
return "{}{}".format(match.group(1), match.group(2).upper())
# from fmriprep
def less_breakable(a_string):
"""hardens the string to different envs (i.e. case insensitive, no
whitespace, '#'"""
return "".join(a_string.split()).strip("#")
drop_columns = drop_columns or []
additional_metadata = additional_metadata or {}
tsv_data = pd.read_csv(in_tsv, "\t")
for k, v in additional_metadata.items():
tsv_data[k] = [v] * len(tsv_data.index)
for col in drop_columns:
tsv_data.drop(labels=col, axis="columns", inplace=True)
tsv_data.set_index(index_column, drop=True, inplace=True)
if enforce_case:
tsv_data.index = [
re.sub(re_to_snake, snake, less_breakable(i), 0).lower()
for i in tsv_data.index
]
tsv_data.columns = [
re.sub(re_to_camel, camel, less_breakable(i).title(), 0).replace(
"Csf", "CSF"
)
for i in tsv_data.columns
]
json_data = tsv_data.to_json(orient="index")
json_data = json.JSONDecoder(object_pairs_hook=OrderedDict).decode(json_data)
for i in json_data:
json_data[i].update(additional_metadata)
if out_json is None:
return json_data
with open(out_json, "w") as f:
json.dump(json_data, f, indent=4)
return out_json
| |
# Copyright 2016 Google LLC All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pools managing shared Session objects."""
import datetime
from six.moves import queue
from google.cloud.exceptions import NotFound
from google.cloud.spanner_v1._helpers import _metadata_with_prefix
_NOW = datetime.datetime.utcnow # unit tests may replace
class AbstractSessionPool(object):
"""Specifies required API for concrete session pool implementations.
:type labels: dict (str -> str) or None
:param labels: (Optional) user-assigned labels for sessions created
by the pool.
"""
_database = None
def __init__(self, labels=None):
if labels is None:
labels = {}
self._labels = labels
@property
def labels(self):
"""User-assigned labels for sesions created by the pool.
:rtype: dict (str -> str)
:returns: labels assigned by the user
"""
return self._labels
def bind(self, database):
"""Associate the pool with a database.
:type database: :class:`~google.cloud.spanner_v1.database.Database`
:param database: database used by the pool: used to create sessions
when needed.
Concrete implementations of this method may pre-fill the pool
using the database.
:raises NotImplementedError: abstract method
"""
raise NotImplementedError()
def get(self):
"""Check a session out from the pool.
Concrete implementations of this method are allowed to raise an
error to signal that the pool is exhausted, or to block until a
session is available.
:raises NotImplementedError: abstract method
"""
raise NotImplementedError()
def put(self, session):
"""Return a session to the pool.
:type session: :class:`~google.cloud.spanner_v1.session.Session`
:param session: the session being returned.
Concrete implementations of this method are allowed to raise an
error to signal that the pool is full, or to block until it is
not full.
:raises NotImplementedError: abstract method
"""
raise NotImplementedError()
def clear(self):
"""Delete all sessions in the pool.
Concrete implementations of this method are allowed to raise an
error to signal that the pool is full, or to block until it is
not full.
:raises NotImplementedError: abstract method
"""
raise NotImplementedError()
def _new_session(self):
"""Helper for concrete methods creating session instances.
:rtype: :class:`~google.cloud.spanner_v1.session.Session`
:returns: new session instance.
"""
if self.labels:
return self._database.session(labels=self.labels)
return self._database.session()
def session(self, **kwargs):
"""Check out a session from the pool.
:param kwargs: (optional) keyword arguments, passed through to
the returned checkout.
:rtype: :class:`~google.cloud.spanner_v1.session.SessionCheckout`
:returns: a checkout instance, to be used as a context manager for
accessing the session and returning it to the pool.
"""
return SessionCheckout(self, **kwargs)
class FixedSizePool(AbstractSessionPool):
"""Concrete session pool implementation:
- Pre-allocates / creates a fixed number of sessions.
- "Pings" existing sessions via :meth:`session.exists` before returning
them, and replaces expired sessions.
- Blocks, with a timeout, when :meth:`get` is called on an empty pool.
Raises after timing out.
- Raises when :meth:`put` is called on a full pool. That error is
never expected in normal practice, as users should be calling
:meth:`get` followed by :meth:`put` whenever in need of a session.
:type size: int
:param size: fixed pool size
:type default_timeout: int
:param default_timeout: default timeout, in seconds, to wait for
a returned session.
:type labels: dict (str -> str) or None
:param labels: (Optional) user-assigned labels for sessions created
by the pool.
"""
DEFAULT_SIZE = 10
DEFAULT_TIMEOUT = 10
def __init__(self, size=DEFAULT_SIZE, default_timeout=DEFAULT_TIMEOUT, labels=None):
super(FixedSizePool, self).__init__(labels=labels)
self.size = size
self.default_timeout = default_timeout
self._sessions = queue.LifoQueue(size)
def bind(self, database):
"""Associate the pool with a database.
:type database: :class:`~google.cloud.spanner_v1.database.Database`
:param database: database used by the pool: used to create sessions
when needed.
"""
self._database = database
api = database.spanner_api
metadata = _metadata_with_prefix(database.name)
while not self._sessions.full():
resp = api.batch_create_sessions(
database.name,
self.size - self._sessions.qsize(),
timeout=self.default_timeout,
metadata=metadata,
)
for session_pb in resp.session:
session = self._new_session()
session._session_id = session_pb.name.split("/")[-1]
self._sessions.put(session)
def get(self, timeout=None): # pylint: disable=arguments-differ
"""Check a session out from the pool.
:type timeout: int
:param timeout: seconds to block waiting for an available session
:rtype: :class:`~google.cloud.spanner_v1.session.Session`
:returns: an existing session from the pool, or a newly-created
session.
:raises: :exc:`six.moves.queue.Empty` if the queue is empty.
"""
if timeout is None:
timeout = self.default_timeout
session = self._sessions.get(block=True, timeout=timeout)
if not session.exists():
session = self._database.session()
session.create()
return session
def put(self, session):
"""Return a session to the pool.
Never blocks: if the pool is full, raises.
:type session: :class:`~google.cloud.spanner_v1.session.Session`
:param session: the session being returned.
:raises: :exc:`six.moves.queue.Full` if the queue is full.
"""
self._sessions.put_nowait(session)
def clear(self):
"""Delete all sessions in the pool."""
while True:
try:
session = self._sessions.get(block=False)
except queue.Empty:
break
else:
session.delete()
class BurstyPool(AbstractSessionPool):
"""Concrete session pool implementation:
- "Pings" existing sessions via :meth:`session.exists` before returning
them.
- Creates a new session, rather than blocking, when :meth:`get` is called
on an empty pool.
- Discards the returned session, rather than blocking, when :meth:`put`
is called on a full pool.
:type target_size: int
:param target_size: max pool size
:type labels: dict (str -> str) or None
:param labels: (Optional) user-assigned labels for sessions created
by the pool.
"""
def __init__(self, target_size=10, labels=None):
super(BurstyPool, self).__init__(labels=labels)
self.target_size = target_size
self._database = None
self._sessions = queue.LifoQueue(target_size)
def bind(self, database):
"""Associate the pool with a database.
:type database: :class:`~google.cloud.spanner_v1.database.Database`
:param database: database used by the pool: used to create sessions
when needed.
"""
self._database = database
def get(self):
"""Check a session out from the pool.
:rtype: :class:`~google.cloud.spanner_v1.session.Session`
:returns: an existing session from the pool, or a newly-created
session.
"""
try:
session = self._sessions.get_nowait()
except queue.Empty:
session = self._new_session()
session.create()
else:
if not session.exists():
session = self._new_session()
session.create()
return session
def put(self, session):
"""Return a session to the pool.
Never blocks: if the pool is full, the returned session is
discarded.
:type session: :class:`~google.cloud.spanner_v1.session.Session`
:param session: the session being returned.
"""
try:
self._sessions.put_nowait(session)
except queue.Full:
try:
session.delete()
except NotFound:
pass
def clear(self):
"""Delete all sessions in the pool."""
while True:
try:
session = self._sessions.get(block=False)
except queue.Empty:
break
else:
session.delete()
class PingingPool(AbstractSessionPool):
"""Concrete session pool implementation:
- Pre-allocates / creates a fixed number of sessions.
- Sessions are used in "round-robin" order (LRU first).
- "Pings" existing sessions in the background after a specified interval
via an API call (``session.exists()``).
- Blocks, with a timeout, when :meth:`get` is called on an empty pool.
Raises after timing out.
- Raises when :meth:`put` is called on a full pool. That error is
never expected in normal practice, as users should be calling
:meth:`get` followed by :meth:`put` whenever in need of a session.
The application is responsible for calling :meth:`ping` at appropriate
times, e.g. from a background thread.
:type size: int
:param size: fixed pool size
:type default_timeout: int
:param default_timeout: default timeout, in seconds, to wait for
a returned session.
:type ping_interval: int
:param ping_interval: interval at which to ping sessions.
:type labels: dict (str -> str) or None
:param labels: (Optional) user-assigned labels for sessions created
by the pool.
"""
def __init__(self, size=10, default_timeout=10, ping_interval=3000, labels=None):
super(PingingPool, self).__init__(labels=labels)
self.size = size
self.default_timeout = default_timeout
self._delta = datetime.timedelta(seconds=ping_interval)
self._sessions = queue.PriorityQueue(size)
def bind(self, database):
"""Associate the pool with a database.
:type database: :class:`~google.cloud.spanner_v1.database.Database`
:param database: database used by the pool: used to create sessions
when needed.
"""
self._database = database
api = database.spanner_api
metadata = _metadata_with_prefix(database.name)
created_session_count = 0
while created_session_count < self.size:
resp = api.batch_create_sessions(
database.name,
self.size - created_session_count,
timeout=self.default_timeout,
metadata=metadata,
)
for session_pb in resp.session:
session = self._new_session()
session._session_id = session_pb.name.split("/")[-1]
self.put(session)
created_session_count += len(resp.session)
def get(self, timeout=None): # pylint: disable=arguments-differ
"""Check a session out from the pool.
:type timeout: int
:param timeout: seconds to block waiting for an available session
:rtype: :class:`~google.cloud.spanner_v1.session.Session`
:returns: an existing session from the pool, or a newly-created
session.
:raises: :exc:`six.moves.queue.Empty` if the queue is empty.
"""
if timeout is None:
timeout = self.default_timeout
ping_after, session = self._sessions.get(block=True, timeout=timeout)
if _NOW() > ping_after:
if not session.exists():
session = self._new_session()
session.create()
return session
def put(self, session):
"""Return a session to the pool.
Never blocks: if the pool is full, raises.
:type session: :class:`~google.cloud.spanner_v1.session.Session`
:param session: the session being returned.
:raises: :exc:`six.moves.queue.Full` if the queue is full.
"""
self._sessions.put_nowait((_NOW() + self._delta, session))
def clear(self):
"""Delete all sessions in the pool."""
while True:
try:
_, session = self._sessions.get(block=False)
except queue.Empty:
break
else:
session.delete()
def ping(self):
"""Refresh maybe-expired sessions in the pool.
This method is designed to be called from a background thread,
or during the "idle" phase of an event loop.
"""
while True:
try:
ping_after, session = self._sessions.get(block=False)
except queue.Empty: # all sessions in use
break
if ping_after > _NOW(): # oldest session is fresh
# Re-add to queue with existing expiration
self._sessions.put((ping_after, session))
break
if not session.exists(): # stale
session = self._new_session()
session.create()
# Re-add to queue with new expiration
self.put(session)
class TransactionPingingPool(PingingPool):
"""Concrete session pool implementation:
In addition to the features of :class:`PingingPool`, this class
creates and begins a transaction for each of its sessions at startup.
When a session is returned to the pool, if its transaction has been
committed or rolled back, the pool creates a new transaction for the
session and pushes the transaction onto a separate queue of "transactions
to begin." The application is responsible for flushing this queue
as appropriate via the pool's :meth:`begin_pending_transactions` method.
:type size: int
:param size: fixed pool size
:type default_timeout: int
:param default_timeout: default timeout, in seconds, to wait for
a returned session.
:type ping_interval: int
:param ping_interval: interval at which to ping sessions.
:type labels: dict (str -> str) or None
:param labels: (Optional) user-assigned labels for sessions created
by the pool.
"""
def __init__(self, size=10, default_timeout=10, ping_interval=3000, labels=None):
self._pending_sessions = queue.Queue()
super(TransactionPingingPool, self).__init__(
size, default_timeout, ping_interval, labels=labels
)
self.begin_pending_transactions()
def bind(self, database):
"""Associate the pool with a database.
:type database: :class:`~google.cloud.spanner_v1.database.Database`
:param database: database used by the pool: used to create sessions
when needed.
"""
super(TransactionPingingPool, self).bind(database)
self.begin_pending_transactions()
def put(self, session):
"""Return a session to the pool.
Never blocks: if the pool is full, raises.
:type session: :class:`~google.cloud.spanner_v1.session.Session`
:param session: the session being returned.
:raises: :exc:`six.moves.queue.Full` if the queue is full.
"""
if self._sessions.full():
raise queue.Full
txn = session._transaction
if txn is None or txn.committed or txn._rolled_back:
session.transaction()
self._pending_sessions.put(session)
else:
super(TransactionPingingPool, self).put(session)
def begin_pending_transactions(self):
"""Begin all transactions for sessions added to the pool."""
while not self._pending_sessions.empty():
session = self._pending_sessions.get()
session._transaction.begin()
super(TransactionPingingPool, self).put(session)
class SessionCheckout(object):
"""Context manager: hold session checked out from a pool.
:type pool: concrete subclass of
:class:`~google.cloud.spanner_v1.session.AbstractSessionPool`
:param pool: Pool from which to check out a session.
:param kwargs: extra keyword arguments to be passed to :meth:`pool.get`.
"""
_session = None # Not checked out until '__enter__'.
def __init__(self, pool, **kwargs):
self._pool = pool
self._kwargs = kwargs.copy()
def __enter__(self):
self._session = self._pool.get(**self._kwargs)
return self._session
def __exit__(self, *ignored):
self._pool.put(self._session)
| |
"""Extract reference documentation from the NumPy source tree.
"""
from __future__ import print_function
import inspect
import textwrap
import re
import pydoc
from six import StringIO
from warnings import warn
from six import iteritems
class Reader(object):
"""A line-based string reader.
"""
def __init__(self, data):
"""
Parameters
----------
data : str
String with lines separated by '\n'.
"""
if isinstance(data,list):
self._str = data
else:
self._str = data.split('\n') # store string as list of lines
self.reset()
def __getitem__(self, n):
return self._str[n]
def reset(self):
self._l = 0 # current line nr
def read(self):
if not self.eof():
out = self[self._l]
self._l += 1
return out
else:
return ''
def seek_next_non_empty_line(self):
for l in self[self._l:]:
if l.strip():
break
else:
self._l += 1
def eof(self):
return self._l >= len(self._str)
def read_to_condition(self, condition_func):
start = self._l
for line in self[start:]:
if condition_func(line):
return self[start:self._l]
self._l += 1
if self.eof():
return self[start:self._l+1]
return []
def read_to_next_empty_line(self):
self.seek_next_non_empty_line()
def is_empty(line):
return not line.strip()
return self.read_to_condition(is_empty)
def read_to_next_unindented_line(self):
def is_unindented(line):
return (line.strip() and (len(line.lstrip()) == len(line)))
return self.read_to_condition(is_unindented)
def peek(self,n=0):
if self._l + n < len(self._str):
return self[self._l + n]
else:
return ''
def is_empty(self):
return not ''.join(self._str).strip()
class NumpyDocString(object):
def __init__(self,docstring):
docstring = textwrap.dedent(docstring).split('\n')
self._doc = Reader(docstring)
self._parsed_data = {
'Signature': '',
'Summary': [''],
'Extended Summary': [],
'Parameters': [],
'Returns': [],
'Raises': [],
'Warns': [],
'Other Parameters': [],
'Attributes': [],
'Methods': [],
'See Also': [],
'Notes': [],
'Warnings': [],
'References': '',
'Examples': '',
'index': {}
}
self._parse()
def __getitem__(self,key):
return self._parsed_data[key]
def __setitem__(self,key,val):
if key not in self._parsed_data:
warn("Unknown section %s" % key)
else:
self._parsed_data[key] = val
def _is_at_section(self):
self._doc.seek_next_non_empty_line()
if self._doc.eof():
return False
l1 = self._doc.peek().strip() # e.g. Parameters
if l1.startswith('.. index::'):
return True
l2 = self._doc.peek(1).strip() # ---------- or ==========
return l2.startswith('-'*len(l1)) or l2.startswith('='*len(l1))
def _strip(self,doc):
i = 0
j = 0
for i,line in enumerate(doc):
if line.strip():
break
for j,line in enumerate(doc[::-1]):
if line.strip():
break
return doc[i:len(doc)-j]
def _read_to_next_section(self):
section = self._doc.read_to_next_empty_line()
while not self._is_at_section() and not self._doc.eof():
if not self._doc.peek(-1).strip(): # previous line was empty
section += ['']
section += self._doc.read_to_next_empty_line()
return section
def _read_sections(self):
while not self._doc.eof():
data = self._read_to_next_section()
name = data[0].strip()
if name.startswith('..'): # index section
yield name, data[1:]
elif len(data) < 2:
yield StopIteration
else:
yield name, self._strip(data[2:])
def _parse_param_list(self,content):
r = Reader(content)
params = []
while not r.eof():
header = r.read().strip()
if ' : ' in header:
arg_name, arg_type = header.split(' : ')[:2]
else:
arg_name, arg_type = header, ''
desc = r.read_to_next_unindented_line()
desc = dedent_lines(desc)
params.append((arg_name,arg_type,desc))
return params
_name_rgx = re.compile(r"^\s*(:(?P<role>\w+):`(?P<name>[a-zA-Z0-9_.-]+)`|"
r" (?P<name2>[a-zA-Z0-9_.-]+))\s*", re.X)
def _parse_see_also(self, content):
"""
func_name : Descriptive text
continued text
another_func_name : Descriptive text
func_name1, func_name2, :meth:`func_name`, func_name3
"""
items = []
def parse_item_name(text):
"""Match ':role:`name`' or 'name'"""
m = self._name_rgx.match(text)
if m:
g = m.groups()
if g[1] is None:
return g[3], None
else:
return g[2], g[1]
raise ValueError("%s is not a item name" % text)
def push_item(name, rest):
if not name:
return
name, role = parse_item_name(name)
items.append((name, list(rest), role))
del rest[:]
current_func = None
rest = []
for line in content:
if not line.strip():
continue
m = self._name_rgx.match(line)
if m and line[m.end():].strip().startswith(':'):
push_item(current_func, rest)
current_func, line = line[:m.end()], line[m.end():]
rest = [line.split(':', 1)[1].strip()]
if not rest[0]:
rest = []
elif not line.startswith(' '):
push_item(current_func, rest)
current_func = None
if ',' in line:
for func in line.split(','):
push_item(func, [])
elif line.strip():
current_func = line
elif current_func is not None:
rest.append(line.strip())
push_item(current_func, rest)
return items
def _parse_index(self, section, content):
"""
.. index: default
:refguide: something, else, and more
"""
def strip_each_in(lst):
return [s.strip() for s in lst]
out = {}
section = section.split('::')
if len(section) > 1:
out['default'] = strip_each_in(section[1].split(','))[0]
for line in content:
line = line.split(':')
if len(line) > 2:
out[line[1]] = strip_each_in(line[2].split(','))
return out
def _parse_summary(self):
"""Grab signature (if given) and summary"""
if self._is_at_section():
return
summary = self._doc.read_to_next_empty_line()
summary_str = " ".join([s.strip() for s in summary]).strip()
if re.compile('^([\w., ]+=)?\s*[\w\.]+\(.*\)$').match(summary_str):
self['Signature'] = summary_str
if not self._is_at_section():
self['Summary'] = self._doc.read_to_next_empty_line()
else:
self['Summary'] = summary
if not self._is_at_section():
self['Extended Summary'] = self._read_to_next_section()
def _parse(self):
self._doc.reset()
self._parse_summary()
for (section,content) in self._read_sections():
if not section.startswith('..'):
section = ' '.join([s.capitalize() for s in section.split(' ')])
if section in ('Parameters', 'Attributes', 'Methods',
'Returns', 'Raises', 'Warns'):
self[section] = self._parse_param_list(content)
elif section.startswith('.. index::'):
self['index'] = self._parse_index(section, content)
elif section == 'See Also':
self['See Also'] = self._parse_see_also(content)
else:
self[section] = content
# string conversion routines
def _str_header(self, name, symbol='-'):
return [name, len(name)*symbol]
def _str_indent(self, doc, indent=4):
out = []
for line in doc:
out += [' '*indent + line]
return out
def _str_signature(self):
if self['Signature']:
return [self['Signature'].replace('*','\*')] + ['']
else:
return ['']
def _str_summary(self):
if self['Summary']:
return self['Summary'] + ['']
else:
return []
def _str_extended_summary(self):
if self['Extended Summary']:
return self['Extended Summary'] + ['']
else:
return []
def _str_param_list(self, name):
out = []
if self[name]:
out += self._str_header(name)
for param,param_type,desc in self[name]:
out += ['%s : %s' % (param, param_type)]
out += self._str_indent(desc)
out += ['']
return out
def _str_section(self, name):
out = []
if self[name]:
out += self._str_header(name)
out += self[name]
out += ['']
return out
def _str_see_also(self, func_role):
if not self['See Also']:
return []
out = []
out += self._str_header("See Also")
last_had_desc = True
for func, desc, role in self['See Also']:
if role:
link = ':%s:`%s`' % (role, func)
elif func_role:
link = ':%s:`%s`' % (func_role, func)
else:
link = "`%s`_" % func
if desc or last_had_desc:
out += ['']
out += [link]
else:
out[-1] += ", %s" % link
if desc:
out += self._str_indent([' '.join(desc)])
last_had_desc = True
else:
last_had_desc = False
out += ['']
return out
def _str_index(self):
idx = self['index']
out = []
out += ['.. index:: %s' % idx.get('default','')]
for section, references in iteritems(idx):
if section == 'default':
continue
out += [' :%s: %s' % (section, ', '.join(references))]
return out
def __str__(self, func_role=''):
out = []
out += self._str_signature()
out += self._str_summary()
out += self._str_extended_summary()
for param_list in ('Parameters','Returns','Raises'):
out += self._str_param_list(param_list)
out += self._str_section('Warnings')
out += self._str_see_also(func_role)
for s in ('Notes','References','Examples'):
out += self._str_section(s)
out += self._str_index()
return '\n'.join(out)
def indent(str,indent=4):
indent_str = ' '*indent
if str is None:
return indent_str
lines = str.split('\n')
return '\n'.join(indent_str + l for l in lines)
def dedent_lines(lines):
"""Deindent a list of lines maximally"""
return textwrap.dedent("\n".join(lines)).split("\n")
def header(text, style='-'):
return text + '\n' + style*len(text) + '\n'
class FunctionDoc(NumpyDocString):
def __init__(self, func, role='func', doc=None):
self._f = func
self._role = role # e.g. "func" or "meth"
if doc is None:
doc = inspect.getdoc(func) or ''
try:
NumpyDocString.__init__(self, doc)
except ValueError as e:
print('*'*78)
print("ERROR: '%s' while parsing `%s`" % (e, self._f))
print('*'*78)
#print "Docstring follows:"
#print doclines
#print '='*78
if not self['Signature']:
func, func_name = self.get_func()
try:
# try to read signature
argspec = inspect.getargspec(func)
argspec = inspect.formatargspec(*argspec)
argspec = argspec.replace('*','\*')
signature = '%s%s' % (func_name, argspec)
except TypeError as e:
signature = '%s()' % func_name
self['Signature'] = signature
def get_func(self):
func_name = getattr(self._f, '__name__', self.__class__.__name__)
if inspect.isclass(self._f):
func = getattr(self._f, '__call__', self._f.__init__)
else:
func = self._f
return func, func_name
def __str__(self):
out = ''
func, func_name = self.get_func()
#signature = self['Signature'].replace('*', '\*')
roles = {'func': 'function',
'meth': 'method'}
if self._role:
if self._role not in roles:
print("Warning: invalid role %s" % self._role)
out += '.. %s:: %s\n \n\n' % (roles.get(self._role,''),
func_name)
out += super(FunctionDoc, self).__str__(func_role=self._role)
return out
class ClassDoc(NumpyDocString):
def __init__(self,cls,modulename='',func_doc=FunctionDoc,doc=None):
if not inspect.isclass(cls):
raise ValueError("Initialise using a class. Got %r" % cls)
self._cls = cls
if modulename and not modulename.endswith('.'):
modulename += '.'
self._mod = modulename
self._name = cls.__name__
self._func_doc = func_doc
if doc is None:
doc = pydoc.getdoc(cls)
NumpyDocString.__init__(self, doc)
@property
def methods(self):
return [name for name,func in inspect.getmembers(self._cls)
if not name.startswith('_') and callable(func)]
def __str__(self):
out = ''
out += super(ClassDoc, self).__str__()
out += "\n\n"
#for m in self.methods:
# print "Parsing `%s`" % m
# out += str(self._func_doc(getattr(self._cls,m), 'meth')) + '\n\n'
# out += '.. index::\n single: %s; %s\n\n' % (self._name, m)
return out
| |
import os
from typing import List, Optional
from qtpy import QtWidgets, QtCore
import pydicom
import numpy as np
from pydiq.dicom_data import DicomData
from pydiq.dicom_widget import DicomWidget
from pydiq.utils import dicom_files_in_dir
class Viewer(QtWidgets.QMainWindow):
def __init__(self, path = None):
super(Viewer, self).__init__()
self.setWindowTitle("pydiq - Python DICOM Viewer in Qt")
self.file = None
self._file_name = None
self.high_hu = 2000
self.low_hu = -1024
# self.pix_label = TrackingLabel(self)
self.pix_label = DicomWidget(self)
# self.color_table = [QtWidgets.qRgb(i, i, i) for i in range(256)]
scroll_area = QtWidgets.QScrollArea()
scroll_area.setWidget(self.pix_label)
# self.setCentralWidget(self.pix_label)
self.setCentralWidget(scroll_area)
self.series_dock = QtWidgets.QDockWidget("Series", self)
self.addDockWidget(QtCore.Qt.LeftDockWidgetArea, self.series_dock)
self.file_dock = QtWidgets.QDockWidget("Images", self)
self.addDockWidget(QtCore.Qt.LeftDockWidgetArea, self.file_dock)
self.file_list = QtWidgets.QListWidget()
self.file_list.itemSelectionChanged.connect(self.on_file_item_change)
self.file_dock.setWidget(self.file_list)
self.series_list = QtWidgets.QListWidget()
# self.studies_list.itemSelectionChanged.connect(self.on_study_item_change)
self.series_dock.setWidget(self.series_list)
self.hu_label = QtWidgets.QLabel("No image")
self.c_label = QtWidgets.QLabel("")
self.cw_label = QtWidgets.QLabel("")
self.x_label = QtWidgets.QLabel("")
self.y_label = QtWidgets.QLabel("")
self.z_label = QtWidgets.QLabel("")
self.use_fractional_coordinates = True
self.ij_label = QtWidgets.QLabel("")
self._zoom_level = 1
self.mouse_x = -1
self.mouse_y = -1
self.statusBar().addPermanentWidget(self.cw_label)
self.statusBar().addPermanentWidget(self.ij_label)
self.statusBar().addPermanentWidget(self.x_label)
self.statusBar().addPermanentWidget(self.y_label)
self.statusBar().addPermanentWidget(self.z_label)
self.statusBar().addPermanentWidget(self.hu_label)
self.data = np.ndarray((512, 512), np.int8)
self.update_cw()
if os.path.isfile(path):
self.load_files([path])
elif os.path.isdir(path):
self.load_files(dicom_files_in_dir(path))
self.build_menu()
def open_directory(self):
dialog = QtWidgets.QFileDialog(self)
dialog.setFileMode(QtWidgets.QFileDialog.DirectoryOnly)
dialog.setViewMode(QtWidgets.QFileDialog.List)
dialog.setOption(QtWidgets.QFileDialog.ShowDirsOnly, True)
if dialog.exec_():
directory = str(dialog.selectedFiles()[0])
self.load_files(dicom_files_in_dir(directory))
def export_image(self):
file_name, _ = QtWidgets.QFileDialog.getSaveFileName(
self,
"Save file",
os.path.expanduser("~/dicom-export.png"),
"PNG images (*.png)"
)
if file_name:
self.pix_label._image.save(file_name)
def build_menu(self):
self.file_menu = QtWidgets.QMenu('&File', self)
self.file_menu.addAction('&Open directory', self.open_directory, QtCore.Qt.CTRL + QtCore.Qt.Key_O)
self.file_menu.addAction('&Export image', self.export_image, QtCore.Qt.CTRL + QtCore.Qt.Key_S)
self.file_menu.addAction('&Quit', self.close, QtCore.Qt.CTRL + QtCore.Qt.Key_Q)
self.view_menu = QtWidgets.QMenu('&View', self)
self.view_menu.addAction('Zoom In', self.pix_label.increase_zoom, QtCore.Qt.CTRL + QtCore.Qt.Key_Plus)
self.view_menu.addAction('Zoom Out', self.pix_label.decrease_zoom, QtCore.Qt.CTRL + QtCore.Qt.Key_Minus)
self.view_menu.addAction('Zoom 1:1', self.pix_label.reset_zoom, QtCore.Qt.CTRL + QtCore.Qt.Key_0)
fullscreen = QtWidgets.QAction('&Full Screen', self)
fullscreen.setCheckable(True)
fullscreen.setShortcut(QtCore.Qt.Key_F11)
fullscreen.toggled.connect(self.toggle_full_screen)
self.view_menu.addAction(fullscreen)
self.tools_menu = QtWidgets.QMenu("&Tools", self)
self.tools_menu.addAction('&Show DICOM structure', self.show_structure, QtCore.Qt.Key_F2)
self.menuBar().addMenu(self.file_menu)
self.menuBar().addMenu(self.view_menu)
self.menuBar().addMenu(self.tools_menu)
def show_structure(self):
if self.file_name:
f = pydicom.read_file(self.file_name)
l = QtWidgets.QLabel(str(f))
l.show()
# print(str(f))
def toggle_full_screen(self, toggled):
if toggled:
self.setWindowState(QtCore.Qt.WindowFullScreen)
else:
self.setWindowState(QtCore.Qt.WindowNoState)
def on_file_item_change(self):
if not len(self.file_list.selectedItems()):
self.file_name = None
else:
item = self.file_list.selectedItems()[0]
# print item.text()
self.file_name = str(item.toolTip())
def load_files(self, files: List[str]):
self.series_list.clear()
self.series = {}
self.file_list.clear()
self.files = files
for file_name in self.files:
item = QtWidgets.QListWidgetItem(os.path.basename(file_name))
item.setToolTip(file_name)
self.file_list.addItem(item)
self.file_list.setMinimumWidth(self.file_list.sizeHintForColumn(0) + 20)
if self.files:
self.file_name = self.files[0]
def get_coordinates(self, i, j):
x = self.image_position[0] + self.pixel_spacing[0] * i
y = self.image_position[1] + self.pixel_spacing[1] * j
z = self.image_position[2]
return x, y, z
@property
def mouse_ij(self):
'''Mouse position as voxel index in current DICOM slice.'''
return self.mouse_y // self.zoom_factor, self.mouse_x // self.zoom_factor
@property
def mouse_xyz(self):
'''Mouse position in DICOM coordinates.'''
if self.use_fractional_coordinates:
# TODO: Fix for zoom out
correction = (self.zoom_factor - 1.) / (2. * self.zoom_factor) # To get center of left top pixel in a zoom grid
return self.get_coordinates(self.mouse_x / self.zoom_factor - correction, self.mouse_y / self.zoom_factor - correction)
else:
return self.get_coordinates(self.mouse_x // self.zoom_factor, self.mouse_y // self.zoom_factor)
def update_coordinates(self):
if self.pix_label.data and False:
x, y, z = self.mouse_xyz
i, j = self.mouse_ij
self.z_label.setText("z: %.2f" % z)
if i >= 0 and j >= 0 and i < self.data.shape[0] and j < self.data.shape[1]:
self.x_label.setText("x: %.2f" % x)
self.y_label.setText("y: %.2f" % y)
self.ij_label.setText("Pos: (%d, %d)" % self.mouse_ij)
self.hu_label.setText("HU: %d" % int(self.data[i, j]))
return
else:
self.hu_label.setText("HU: ???")
else:
self.hu_label.setText("No image")
self.ij_label.setText("")
self.x_label.setText("")
self.y_label.setText("")
def update_cw(self):
# self.cw_label.setText("W: %d C: %d" % (int(self.pix_label.w), int(self.pix_label.c)))
# self.update_image()
pass
@property
def file_name(self):
return self._file_name
@file_name.setter
def file_name(self, value):
try:
self._file_name = value
data = DicomData.from_files([self._file_name])
self.pix_label.data = data
self.setWindowTitle("pydiq: " + self._file_name)
except BaseException as exc:
print(exc)
self.pix_label.data = None
self.setWindowTitle("pydiq: No image")
# try:
# self.image_position = np.array([float(t) for t in self.file.ImagePositionPatient])
# except:
# self.image_position = np.array([1., 1., 1.])
# self.pixel_spacing = np.array([float(t) for t in self.file.PixelSpacing])
| |
# ex:ts=4:sw=4:sts=4:et
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
#
# BitBake 'Build' implementation
#
# Core code for function execution and task handling in the
# BitBake build tools.
#
# Copyright (C) 2003, 2004 Chris Larson
#
# Based on Gentoo's portage.py.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#Based on functions from the base bb module, Copyright 2003 Holger Schurig
from bb import data, fetch, event, mkdirhier, utils
import bb, os
# events
class FuncFailed(Exception):
"""Executed function failed"""
class EventException(Exception):
"""Exception which is associated with an Event."""
def __init__(self, msg, event):
self.args = msg, event
class TaskBase(event.Event):
"""Base class for task events"""
def __init__(self, t, d ):
self._task = t
event.Event.__init__(self, d)
def getTask(self):
return self._task
def setTask(self, task):
self._task = task
task = property(getTask, setTask, None, "task property")
class TaskStarted(TaskBase):
"""Task execution started"""
class TaskSucceeded(TaskBase):
"""Task execution completed"""
class TaskFailed(TaskBase):
"""Task execution failed"""
class InvalidTask(TaskBase):
"""Invalid Task"""
# functions
def exec_func(func, d, dirs = None):
"""Execute an BB 'function'"""
body = data.getVar(func, d)
if not body:
return
flags = data.getVarFlags(func, d)
for item in ['deps', 'check', 'interactive', 'python', 'cleandirs', 'dirs', 'lockfiles', 'fakeroot']:
if not item in flags:
flags[item] = None
ispython = flags['python']
cleandirs = (data.expand(flags['cleandirs'], d) or "").split()
for cdir in cleandirs:
os.system("rm -rf %s" % cdir)
if dirs:
dirs = data.expand(dirs, d)
else:
dirs = (data.expand(flags['dirs'], d) or "").split()
for adir in dirs:
mkdirhier(adir)
if len(dirs) > 0:
adir = dirs[-1]
else:
adir = data.getVar('B', d, 1)
try:
prevdir = os.getcwd()
except OSError:
prevdir = data.getVar('TOPDIR', d, True)
if adir and os.access(adir, os.F_OK):
os.chdir(adir)
locks = []
lockfiles = (data.expand(flags['lockfiles'], d) or "").split()
for lock in lockfiles:
locks.append(bb.utils.lockfile(lock))
if flags['python']:
exec_func_python(func, d)
else:
exec_func_shell(func, d, flags)
for lock in locks:
bb.utils.unlockfile(lock)
if os.path.exists(prevdir):
os.chdir(prevdir)
def exec_func_python(func, d):
"""Execute a python BB 'function'"""
import re, os
bbfile = bb.data.getVar('FILE', d, 1)
tmp = "def " + func + "():\n%s" % data.getVar(func, d)
tmp += '\n' + func + '()'
comp = utils.better_compile(tmp, func, bbfile)
prevdir = os.getcwd()
g = {} # globals
g['bb'] = bb
g['os'] = os
g['d'] = d
utils.better_exec(comp, g, tmp, bbfile)
if os.path.exists(prevdir):
os.chdir(prevdir)
def exec_func_shell(func, d, flags):
"""Execute a shell BB 'function' Returns true if execution was successful.
For this, it creates a bash shell script in the tmp dectory, writes the local
data into it and finally executes. The output of the shell will end in a log file and stdout.
Note on directory behavior. The 'dirs' varflag should contain a list
of the directories you need created prior to execution. The last
item in the list is where we will chdir/cd to.
"""
import sys
deps = flags['deps']
check = flags['check']
interact = flags['interactive']
if check in globals():
if globals()[check](func, deps):
return
global logfile
t = data.getVar('T', d, 1)
if not t:
return 0
mkdirhier(t)
logfile = "%s/log.%s.%s" % (t, func, str(os.getpid()))
runfile = "%s/run.%s.%s" % (t, func, str(os.getpid()))
f = open(runfile, "w")
f.write("#!/bin/sh -e\n")
if bb.msg.debug_level['default'] > 0: f.write("set -x\n")
data.emit_env(f, d)
f.write("cd %s\n" % os.getcwd())
if func: f.write("%s\n" % func)
f.close()
os.chmod(runfile, 0775)
if not func:
bb.msg.error(bb.msg.domain.Build, "Function not specified")
raise FuncFailed()
# open logs
si = file('/dev/null', 'r')
try:
if bb.msg.debug_level['default'] > 0:
so = os.popen("tee \"%s\"" % logfile, "w")
else:
so = file(logfile, 'w')
except OSError, e:
bb.msg.error(bb.msg.domain.Build, "opening log file: %s" % e)
pass
se = so
if not interact:
# dup the existing fds so we dont lose them
osi = [os.dup(sys.stdin.fileno()), sys.stdin.fileno()]
oso = [os.dup(sys.stdout.fileno()), sys.stdout.fileno()]
ose = [os.dup(sys.stderr.fileno()), sys.stderr.fileno()]
# replace those fds with our own
os.dup2(si.fileno(), osi[1])
os.dup2(so.fileno(), oso[1])
os.dup2(se.fileno(), ose[1])
# execute function
prevdir = os.getcwd()
if flags['fakeroot']:
maybe_fakeroot = "PATH=\"%s\" fakeroot " % bb.data.getVar("PATH", d, 1)
else:
maybe_fakeroot = ''
lang_environment = "LC_ALL=C "
ret = os.system('%s%ssh -e %s' % (lang_environment, maybe_fakeroot, runfile))
try:
os.chdir(prevdir)
except:
pass
if not interact:
# restore the backups
os.dup2(osi[0], osi[1])
os.dup2(oso[0], oso[1])
os.dup2(ose[0], ose[1])
# close our logs
si.close()
so.close()
se.close()
# close the backup fds
os.close(osi[0])
os.close(oso[0])
os.close(ose[0])
if ret==0:
if bb.msg.debug_level['default'] > 0:
os.remove(runfile)
# os.remove(logfile)
return
else:
bb.msg.error(bb.msg.domain.Build, "function %s failed" % func)
if data.getVar("BBINCLUDELOGS", d):
bb.msg.error(bb.msg.domain.Build, "log data follows (%s)" % logfile)
number_of_lines = data.getVar("BBINCLUDELOGS_LINES", d)
if number_of_lines:
os.system('tail -n%s %s' % (number_of_lines, logfile))
else:
f = open(logfile, "r")
while True:
l = f.readline()
if l == '':
break
l = l.rstrip()
print '| %s' % l
f.close()
else:
bb.msg.error(bb.msg.domain.Build, "see log in %s" % logfile)
raise FuncFailed( logfile )
def exec_task(task, d):
"""Execute an BB 'task'
The primary difference between executing a task versus executing
a function is that a task exists in the task digraph, and therefore
has dependencies amongst other tasks."""
# Check whther this is a valid task
if not data.getVarFlag(task, 'task', d):
raise EventException("No such task", InvalidTask(task, d))
try:
bb.msg.debug(1, bb.msg.domain.Build, "Executing task %s" % task)
old_overrides = data.getVar('OVERRIDES', d, 0)
localdata = data.createCopy(d)
data.setVar('OVERRIDES', 'task-%s:%s' % (task[3:], old_overrides), localdata)
data.update_data(localdata)
data.expandKeys(localdata)
event.fire(TaskStarted(task, localdata))
exec_func(task, localdata)
event.fire(TaskSucceeded(task, localdata))
except FuncFailed, reason:
bb.msg.note(1, bb.msg.domain.Build, "Task failed: %s" % reason )
failedevent = TaskFailed(task, d)
event.fire(failedevent)
raise EventException("Function failed in task: %s" % reason, failedevent)
# make stamp, or cause event and raise exception
if not data.getVarFlag(task, 'nostamp', d) and not data.getVarFlag(task, 'selfstamp', d):
make_stamp(task, d)
def extract_stamp(d, fn):
"""
Extracts stamp format which is either a data dictonary (fn unset)
or a dataCache entry (fn set).
"""
if fn:
return d.stamp[fn]
return data.getVar('STAMP', d, 1)
def stamp_internal(task, d, file_name):
"""
Internal stamp helper function
Removes any stamp for the given task
Makes sure the stamp directory exists
Returns the stamp path+filename
"""
stamp = extract_stamp(d, file_name)
if not stamp:
return
stamp = "%s.%s" % (stamp, task)
mkdirhier(os.path.dirname(stamp))
# Remove the file and recreate to force timestamp
# change on broken NFS filesystems
if os.access(stamp, os.F_OK):
os.remove(stamp)
return stamp
def make_stamp(task, d, file_name = None):
"""
Creates/updates a stamp for a given task
(d can be a data dict or dataCache)
"""
stamp = stamp_internal(task, d, file_name)
if stamp:
f = open(stamp, "w")
f.close()
def del_stamp(task, d, file_name = None):
"""
Removes a stamp for a given task
(d can be a data dict or dataCache)
"""
stamp_internal(task, d, file_name)
def add_tasks(tasklist, d):
task_deps = data.getVar('_task_deps', d)
if not task_deps:
task_deps = {}
if not 'tasks' in task_deps:
task_deps['tasks'] = []
if not 'parents' in task_deps:
task_deps['parents'] = {}
for task in tasklist:
task = data.expand(task, d)
data.setVarFlag(task, 'task', 1, d)
if not task in task_deps['tasks']:
task_deps['tasks'].append(task)
flags = data.getVarFlags(task, d)
def getTask(name):
if not name in task_deps:
task_deps[name] = {}
if name in flags:
deptask = data.expand(flags[name], d)
task_deps[name][task] = deptask
getTask('depends')
getTask('deptask')
getTask('rdeptask')
getTask('recrdeptask')
getTask('nostamp')
task_deps['parents'][task] = []
for dep in flags['deps']:
dep = data.expand(dep, d)
task_deps['parents'][task].append(dep)
# don't assume holding a reference
data.setVar('_task_deps', task_deps, d)
def remove_task(task, kill, d):
"""Remove an BB 'task'.
If kill is 1, also remove tasks that depend on this task."""
data.delVarFlag(task, 'task', d)
| |
# -*- coding: utf-8 -*-
from datetime import timedelta
from django.contrib.contenttypes.models import ContentType
from django.core import mail
from django.test import TestCase
from django.utils import timezone
from django_dynamic_fixture import G
from apps.authentication.models import OnlineUser as User
from apps.events.models import AttendanceEvent, Attendee, Event
from apps.payment.models import Payment, PaymentDelay, PaymentPrice, PaymentRelation
from apps.payment.mommy import PaymentDelayHandler, PaymentReminder
class PaymentTest(TestCase):
def setUp(self):
self.event = G(Event, title='Sjakkturnering')
self.attendance_event = G(
AttendanceEvent,
event=self.event,
unnatend_deadline=timezone.now() + timedelta(days=1)
)
self.user = G(
User,
username='ola123',
ntnu_username='ola123ntnu',
first_name="ola",
last_name="nordmann"
)
self.event_payment = G(
Payment,
object_id=self.event.id,
content_type=ContentType.objects.get_for_model(AttendanceEvent)
)
self.payment_price = G(PaymentPrice, price=200, payment=self.event_payment)
def simulate_user_payment(self, user):
G(PaymentRelation, payment=self.event_payment,
user=user, payment_price=self.payment_price)
self.event_payment.handle_payment(user)
def testPaymentCreation(self):
PaymentRelation.objects.create(
payment=self.event_payment,
payment_price=self.payment_price,
user=self.user
)
payment_relation = PaymentRelation.objects.all()[0]
self.assertEqual(payment_relation.user, self.user)
self.assertEqual(payment_relation.payment, self.event_payment)
def testEventDescription(self):
self.assertEqual(self.event_payment.description(), "Sjakkturnering")
def testEventPostPaymentCreateAttendee(self):
self.event_payment.handle_payment(self.user)
attendee = Attendee.objects.all()[0]
self.assertEqual(attendee.user, self.user)
self.assertEqual(attendee.event, self.attendance_event)
def testEventPaymentCompleteModifyAttendee(self):
G(Attendee, event=self.attendance_event, user=self.user)
self.event_payment.handle_payment(self.user)
attendee = Attendee.objects.all()[0]
self.assertTrue(attendee.paid)
def testEventPaymentReceipt(self):
G(Attendee, event=self.attendance_event, user=self.user)
payment_relation = G(
PaymentRelation,
payment=self.event_payment,
user=self.user,
payment_price=self.payment_price
)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].subject, "[Kvittering] " + payment_relation.payment.description())
self.assertEqual(mail.outbox[0].to, [payment_relation.user.email])
def testEventPaymentRefundCheckUnatendDeadlinePassed(self):
G(Attendee, event=self.attendance_event, user=self.user)
payment_relation = G(
PaymentRelation,
payment=self.event_payment,
user=self.user,
payment_price=self.payment_price
)
self.attendance_event.unattend_deadline = timezone.now() - timedelta(days=1)
self.attendance_event.save()
self.assertFalse(self.event_payment.check_refund(payment_relation)[0])
def testEventPaymentRefundCheckAtendeeExists(self):
payment_relation = G(
PaymentRelation,
payment=self.event_payment,
user=self.user,
payment_price=self.payment_price
)
self.assertFalse(self.event_payment.check_refund(payment_relation)[0])
def testEventPaymentRefundCheckEventStarted(self):
G(Attendee, event=self.attendance_event, user=self.user)
payment_relation = G(
PaymentRelation,
payment=self.event_payment,
user=self.user,
payment_price=self.payment_price
)
self.event.event_start = timezone.now() - timedelta(days=1)
self.event.save()
self.assertFalse(self.event_payment.check_refund(payment_relation)[0])
def testEventPaymentRefundCheck(self):
G(Attendee, event=self.attendance_event, user=self.user)
self.attendance_event.unattend_deadline = timezone.now() + timedelta(days=1)
self.attendance_event.save()
self.event.event_start = timezone.now() + timedelta(days=1)
self.event.save()
payment_relation = G(
PaymentRelation,
payment=self.event_payment,
user=self.user,
payment_price=self.payment_price
)
print(self.event_payment.check_refund(payment_relation))
self.assertTrue(self.event_payment.check_refund(payment_relation)[0])
def testEventPaymentRefund(self):
G(Attendee, event=self.attendance_event, user=self.user)
self.event_payment.handle_payment(self.user)
payment_relation = G(
PaymentRelation,
payment=self.event_payment,
user=self.user,
payment_price=self.payment_price
)
self.assertFalse(payment_relation.refunded)
self.event_payment.handle_refund(payment_relation)
attendees = Attendee.objects.all()
self.assertTrue(payment_relation.refunded)
self.assertEqual(set([]), set(attendees))
# Mommy
def testEventMommyNotPaid(self):
user1 = G(User)
user2 = G(User)
G(Attendee, event=self.attendance_event, user=user1)
G(Attendee, event=self.attendance_event, user=user2)
self.simulate_user_payment(user1)
not_paid = PaymentReminder.not_paid(self.event_payment)
self.assertEqual([user2], not_paid)
def testEventMommyPaid(self):
user1 = G(User)
user2 = G(User)
G(Attendee, event=self.attendance_event, user=user1)
G(Attendee, event=self.attendance_event, user=user2)
self.simulate_user_payment(user1)
self.simulate_user_payment(user2)
not_paid = PaymentReminder.not_paid(self.event_payment)
self.assertFalse(not_paid)
def testEventMommyPaidWithDelays(self):
user1 = G(User)
user2 = G(User)
user3 = G(User)
G(Attendee, event=self.attendance_event, user=user1)
G(Attendee, event=self.attendance_event, user=user2)
G(Attendee, event=self.attendance_event, user=user3)
G(PaymentDelay, payment=self.event_payment, user=user2,
valid_to=timezone.now() + timedelta(days=1))
self.simulate_user_payment(user3)
not_paid = PaymentReminder.not_paid(self.event_payment)
self.assertEqual([user1], not_paid)
def testEventMommyNotPaidMailAddress(self):
G(Attendee, event=self.attendance_event, user=self.user)
not_paid_email = PaymentReminder.not_paid_mail_addresses(self.event_payment)
self.assertEqual([self.user.email], not_paid_email)
def testMommyPaymentDelay(self):
G(Attendee, event=self.attendance_event, user=self.user)
payment_delay = G(
PaymentDelay,
payment=self.event_payment,
user=self.user,
valid_to=timezone.now() + timedelta(days=1)
)
self.assertTrue(payment_delay.active)
PaymentDelayHandler.handle_deadline_passed(payment_delay, False)
self.assertFalse(payment_delay.active)
def testMommyPaymentDelayExcluding(self):
G(Attendee, event=self.attendance_event, user=self.user)
not_paid = PaymentReminder.not_paid(self.event_payment)
self.assertEqual([self.user], not_paid)
G(
PaymentDelay,
payment=self.event_payment,
user=self.user,
valid_to=timezone.now() + timedelta(days=1)
)
not_paid = PaymentReminder.not_paid(self.event_payment)
self.assertFalse(not_paid)
# TODO Test waislist bump
| |
#!/usr/bin/env python
# coding: utf-8
# ## 17 - AgriPV - Jack Solar Site Modeling
# Modeling Jack Solar AgriPV site in Longmonth CO, for crop season May September. The site has two configurations:
#
#
# <b> Configuration A: </b>
# * Under 6 ft panels : 1.8288m
# * Hub height: 6 ft : 1.8288m
#
#
# Configuration B:
# * 8 ft panels : 2.4384m
# * Hub height 8 ft : 2.4384m
#
# Other general parameters:
# * Module Size: 3ft x 6ft (portrait mode)
# * Row-to-row spacing: 17 ft --> 5.1816
# * Torquetube: square, diam 15 cm, zgap = 0
# * Albedo = green grass
#
#
# ### Steps in this Journal:
# <ol>
# <li> <a href='#step1'> Load Bifacial Radiance and other essential packages</a> </li>
# <li> <a href='#step2'> Define all the system variables </a> </li>
# <li> <a href='#step3'> Build Scene for a pretty Image </a> </li>
# </ol>
#
# #### More details
# There are three methods to perform the following analyzis:
# <ul><li>A. Hourly with Fixed tilt, getTrackerAngle to update tilt of tracker </li>
# <li>B. Hourly with gendaylit1axis using the tracking dictionary </li>
# <li>C. Cumulatively with gencumsky1axis </li>
# </ul>
#
#
# The analysis itself is performed with the HPC with method A, and results are compared to GHI (equations below). The code below shows how to build the geometry and view it for accuracy, as well as evaluate monthly GHI, as well as how to model it with `gencumsky1axis` which is more suited for non-hpc environments.
#
#
#
# 
#
# <a id='step1'></a>
# ## 1. Load Bifacial Radiance and other essential packages
# In[1]:
import bifacial_radiance
import numpy as np
import os # this operative system to do the relative-path testfolder for this example.
import pprint # We will be pretty-printing the trackerdictionary throughout to show its structure.
from pathlib import Path
import pandas as pd
# <a id='step2'></a>
# ## 2. Define all the system variables
# In[2]:
testfolder = str(Path().resolve().parent.parent / 'bifacial_radiance' / 'Tutorial_17')
if not os.path.exists(testfolder):
os.makedirs(testfolder)
timestamp = 4020 # Noon, June 17th.
simulationName = 'tutorial_17' # Optionally adding a simulation name when defning RadianceObj
#Location
lat = 40.1217 # Given for the project site at Colorado
lon = -105.1310 # Given for the project site at Colorado
# MakeModule Parameters
moduletype='test-module'
numpanels = 1 # This site have 1 module in Y-direction
x = 1
y = 2
#xgap = 0.15 # Leaving 15 centimeters between modules on x direction
#ygap = 0.10 # Leaving 10 centimeters between modules on y direction
zgap = 0 # no gap to torquetube.
sensorsy = 6 # this will give 6 sensors per module in y-direction
sensorsx = 3 # this will give 3 sensors per module in x-direction
torquetube = True
axisofrotationTorqueTube = True
diameter = 0.15 # 15 cm diameter for the torquetube
tubetype = 'square' # Put the right keyword upon reading the document
material = 'black' # Torque tube of this material (0% reflectivity)
# Scene variables
nMods = 20
nRows = 7
hub_height = 1.8 # meters
pitch = 5.1816 # meters # Pitch is the known parameter
albedo = 0.2 #'Grass' # ground albedo
gcr = y/pitch
cumulativesky = False
limit_angle = 60 # tracker rotation limit angle
angledelta = 0.01 # we will be doing hourly simulation, we want the angle to be as close to real tracking as possible.
backtrack = True
# In[3]:
test_folder_fmt = 'Hour_{}'
# <a id='step3'></a>
# # 3. Build Scene for a pretty Image
# In[4]:
idx = 272
test_folderinner = os.path.join(testfolder, test_folder_fmt.format(f'{idx:04}'))
if not os.path.exists(test_folderinner):
os.makedirs(test_folderinner)
rad_obj = bifacial_radiance.RadianceObj(simulationName,path = test_folderinner) # Create a RadianceObj 'object'
rad_obj.setGround(albedo)
epwfile = rad_obj.getEPW(lat,lon)
metdata = rad_obj.readWeatherFile(epwfile, label='center', coerce_year=2021)
solpos = rad_obj.metdata.solpos.iloc[idx]
zen = float(solpos.zenith)
azm = float(solpos.azimuth) - 180
dni = rad_obj.metdata.dni[idx]
dhi = rad_obj.metdata.dhi[idx]
rad_obj.gendaylit(idx)
# rad_obj.gendaylit2manual(dni, dhi, 90 - zen, azm)
#print(rad_obj.metdata.datetime[idx])
tilt = round(rad_obj.getSingleTimestampTrackerAngle(rad_obj.metdata, idx, gcr, limit_angle=65),1)
sceneDict = {'pitch': pitch, 'tilt': tilt, 'azimuth': 90, 'hub_height':hub_height, 'nMods':nMods, 'nRows': nRows}
scene = rad_obj.makeScene(module=moduletype,sceneDict=sceneDict)
octfile = rad_obj.makeOct()
# #### The scene generated can be viewed by navigating on the terminal to the testfolder and typing
#
# > rvu -vf views\front.vp -e .0265652 -vp 2 -21 2.5 -vd 0 1 0 tutorial_17.oct
#
# #### OR Comment the ! line below to run rvu from the Jupyter notebook instead of your terminal.
#
# In[5]:
## Comment the ! line below to run rvu from the Jupyter notebook instead of your terminal.
## Simulation will stop until you close the rvu window
#!rvu -vf views\front.vp -e .0265652 -vp 2 -21 2.5 -vd 0 1 0 tutorial_17.oct
# <a id='step4'></a>
# # GHI Calculations
#
# ### From Weather File
# In[6]:
# BOULDER
# Simple method where I know the index where the month starts and collect the monthly values this way.
# In 8760 TMY, this were the indexes:
starts = [2881, 3626, 4346, 5090, 5835]
ends = [3621, 4341, 5085, 5829, 6550]
starts = [metdata.datetime.index(pd.to_datetime('2021-05-01 6:0:0 -7')),
metdata.datetime.index(pd.to_datetime('2021-06-01 6:0:0 -7')),
metdata.datetime.index(pd.to_datetime('2021-07-01 6:0:0 -7')),
metdata.datetime.index(pd.to_datetime('2021-08-01 6:0:0 -7')),
metdata.datetime.index(pd.to_datetime('2021-09-01 6:0:0 -7'))]
ends = [metdata.datetime.index(pd.to_datetime('2021-05-31 18:0:0 -7')),
metdata.datetime.index(pd.to_datetime('2021-06-30 18:0:0 -7')),
metdata.datetime.index(pd.to_datetime('2021-07-31 18:0:0 -7')),
metdata.datetime.index(pd.to_datetime('2021-08-31 18:0:0 -7')),
metdata.datetime.index(pd.to_datetime('2021-09-30 18:0:0 -7'))]
ghi_Boulder = []
for ii in range(0, len(starts)):
start = starts[ii]
end = ends[ii]
ghi_Boulder.append(metdata.ghi[start:end].sum())
print(" GHI Boulder Monthly May to September Wh/m2:", ghi_Boulder)
# ### With raytrace
# In[15]:
simulationName = 'EMPTYFIELD_MAY'
starttime = pd.to_datetime('2021-05-01 6:0:0')
endtime = pd.to_datetime('2021-05-31 18:0:0')
rad_obj = bifacial_radiance.RadianceObj(simulationName)
rad_obj.setGround(albedo)
metdata = rad_obj.readWeatherFile(epwfile, label='center', coerce_year=2021, starttime=starttime, endtime=endtime)
rad_obj.genCumSky()
#print(rad_obj.metdata.datetime[idx])
sceneDict = {'pitch': pitch, 'tilt': 0, 'azimuth': 90, 'hub_height':-0.2, 'nMods':1, 'nRows': 1}
scene = rad_obj.makeScene(module=moduletype,sceneDict=sceneDict)
octfile = rad_obj.makeOct()
analysis = bifacial_radiance.AnalysisObj()
frontscan, backscan = analysis.moduleAnalysis(scene, sensorsy=1)
frontscan['zstart'] = 0.5
frontdict, backdict = analysis.analysis(octfile = octfile, name='FIELDTotal', frontscan=frontscan, backscan=backscan)
print("FIELD TOTAL MAY:", analysis.Wm2Front[0])
# # Next STEPS: Raytrace Every hour of the Month on the HPC -- Check HPC Scripts for Jack Solar
| |
# coding=utf-8
# ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
import pytest
import uuid
import functools
from devtools_testutils import recorded_by_proxy
from azure.core.exceptions import HttpResponseError
from azure.ai.formrecognizer._generated.v2022_01_30_preview.models import GetOperationResponse, ModelInfo
from azure.ai.formrecognizer import DocumentModel
from azure.ai.formrecognizer import DocumentModelAdministrationClient
from testcase import FormRecognizerTest
from preparers import GlobalClientPreparer as _GlobalClientPreparer
from preparers import FormRecognizerPreparer
DocumentModelAdministrationClientPreparer = functools.partial(_GlobalClientPreparer, DocumentModelAdministrationClient)
@pytest.mark.skip()
class TestCopyModel(FormRecognizerTest):
@FormRecognizerPreparer()
@DocumentModelAdministrationClientPreparer()
def test_copy_model_none_model_id(self, client):
with pytest.raises(ValueError):
client.begin_copy_model(model_id=None, target={})
@FormRecognizerPreparer()
@DocumentModelAdministrationClientPreparer()
def test_copy_model_empty_model_id(self, client):
with pytest.raises(ValueError):
client.begin_copy_model(model_id="", target={})
@FormRecognizerPreparer()
@DocumentModelAdministrationClientPreparer()
@pytest.mark.skip()
@recorded_by_proxy
def test_copy_model_successful(self, client, formrecognizer_storage_container_sas_url, **kwargs):
poller = client.begin_build_model(formrecognizer_storage_container_sas_url)
model = poller.result()
target = client.get_copy_authorization(tags={"testkey": "testvalue"})
poller = client.begin_copy_model(model.model_id, target=target)
copy = poller.result()
assert copy.model_id == target["targetModelId"]
assert copy.description is None
assert copy.created_on
assert copy.tags == {"testkey": "testvalue"}
for name, doc_type in copy.doc_types.items():
assert name == target["targetModelId"]
for key, field in doc_type.field_schema.items():
assert key
assert field["type"]
assert doc_type.field_confidence[key] is not None
@FormRecognizerPreparer()
@DocumentModelAdministrationClientPreparer()
@pytest.mark.skip()
@recorded_by_proxy
def test_copy_model_with_model_id_and_desc(self, client, formrecognizer_storage_container_sas_url, **kwargs):
poller = client.begin_build_model(formrecognizer_storage_container_sas_url)
model = poller.result()
model_id = str(uuid.uuid4())
description = "this is my copied model"
target = client.get_copy_authorization(model_id=model_id, description=description)
poller = client.begin_copy_model(model.model_id, target=target)
copy = poller.result()
if self.is_live:
assert copy.model_id == model_id
assert copy.model_id
# assert copy.description == "this is my copied model" TODO not showing up?
assert copy.created_on
for name, doc_type in copy.doc_types.items():
if self.is_live:
assert name == target["targetModelId"]
for key, field in doc_type.field_schema.items():
assert key
assert field["type"]
assert doc_type.field_confidence[key] is not None
@FormRecognizerPreparer()
@DocumentModelAdministrationClientPreparer()
@recorded_by_proxy
def test_copy_model_fail_bad_model_id(self, client, formrecognizer_storage_container_sas_url, **kwargs):
poller = client.begin_build_model(formrecognizer_storage_container_sas_url)
model = poller.result()
target = client.get_copy_authorization()
with pytest.raises(HttpResponseError):
# give bad model_id
poller = client.begin_copy_model("00000000-0000-0000-0000-000000000000", target=target)
copy = poller.result()
@FormRecognizerPreparer()
@DocumentModelAdministrationClientPreparer()
@pytest.mark.skip()
@recorded_by_proxy
def test_copy_model_transform(self, client, formrecognizer_storage_container_sas_url, **kwargs):
poller = client.begin_build_model(formrecognizer_storage_container_sas_url)
model = poller.result()
target = client.get_copy_authorization()
raw_response = []
def callback(response, _, headers):
op_response = client._deserialize(GetOperationResponse, response)
model_info = client._deserialize(ModelInfo, op_response.result)
document_model = DocumentModel._from_generated(model_info)
raw_response.append(model_info)
raw_response.append(document_model)
poller = client.begin_copy_model(model.model_id, target=target, cls=callback)
copy = poller.result()
generated = raw_response[0]
copy = raw_response[1]
self.assertModelTransformCorrect(copy, generated)
@FormRecognizerPreparer()
@DocumentModelAdministrationClientPreparer()
@recorded_by_proxy
def test_copy_authorization(self, client, formrecognizer_region, formrecognizer_resource_id, **kwargs):
target = client.get_copy_authorization()
assert target["targetResourceId"] == formrecognizer_resource_id
assert target["targetResourceRegion"] == formrecognizer_region
assert target["targetModelId"]
assert target["accessToken"]
assert target["expirationDateTime"]
assert target["targetModelLocation"]
@FormRecognizerPreparer()
@DocumentModelAdministrationClientPreparer()
@pytest.mark.skip()
@recorded_by_proxy
def test_copy_model_with_composed_model(self, client, formrecognizer_storage_container_sas_url, **kwargs):
poller_1 = client.begin_build_model(formrecognizer_storage_container_sas_url)
model_1 = poller_1.result()
poller_2 = client.begin_build_model(formrecognizer_storage_container_sas_url)
model_2 = poller_2.result()
composed_poller = client.begin_create_composed_model([model_1.model_id, model_2.model_id])
composed_model = composed_poller.result()
target = client.get_copy_authorization()
poller = client.begin_copy_model(composed_model.model_id, target=target)
copy = poller.result()
assert target["targetModelId"] == copy.model_id
assert target["targetModelId"] != composed_model.model_id
assert copy.model_id
assert copy.description is None
assert copy.created_on
for name, doc_type in copy.doc_types.items():
assert name in [model_1.model_id, model_2.model_id]
for key, field in doc_type.field_schema.items():
assert key
assert field["type"]
assert doc_type.field_confidence[key] is not None
@pytest.mark.live_test_only
@FormRecognizerPreparer()
@DocumentModelAdministrationClientPreparer()
@pytest.mark.skip()
def test_copy_continuation_token(self, **kwargs):
client = kwargs.pop("client")
formrecognizer_storage_container_sas_url = kwargs.pop("formrecognizer_storage_container_sas_url")
poller = client.begin_build_model(formrecognizer_storage_container_sas_url)
model = poller.result()
target = client.get_copy_authorization()
initial_poller = client.begin_copy_model(model.model_id, target=target)
cont_token = initial_poller.continuation_token()
poller = client.begin_copy_model(model.model_id, None, continuation_token=cont_token)
result = poller.result()
assert result
initial_poller.wait() # necessary so azure-devtools doesn't throw assertion error
@FormRecognizerPreparer()
@DocumentModelAdministrationClientPreparer()
@pytest.mark.skip()
@recorded_by_proxy
def test_poller_metadata(self, client, formrecognizer_storage_container_sas_url, **kwargs):
poller = client.begin_build_model(formrecognizer_storage_container_sas_url)
model = poller.result()
target = client.get_copy_authorization()
poller = client.begin_copy_model(model.model_id, target=target)
assert poller.operation_id
assert poller.percent_completed is not None
poller.result()
assert poller.operation_kind == "documentModelCopyTo"
assert poller.percent_completed == 100
assert poller.resource_location_url
assert poller.created_on
assert poller.last_updated_on
| |
import os
from dolfin import MPI, XDMFFile, HDF5File, Mesh
import dolfin as df
from .cmd import info_red, info_cyan, MPI_rank, MPI_size, info_on_red
import simplejson as json
from xml.etree import cElementTree as ET
import mpi4py
__author__ = "Gaute Linga"
__date__ = "2017-05-26"
__copyright__ = "Copyright (C) 2017 " + __author__
__license__ = "MIT"
__all__ = ["mpi_is_root", "makedirs_safe", "load_parameters",
"dump_parameters", "create_initial_folders",
"save_solution", "save_checkpoint", "load_checkpoint",
"load_mesh", "remove_safe", "parse_xdmf",
"get_mesh_max", "get_mesh_min"]
def mpi_is_root():
""" Check if current MPI node is root node. """
return MPI_rank == 0
def mpi_barrier():
""" Safe barrier """
mpi4py.MPI.COMM_WORLD.Barrier()
def mpi_comm():
if int(df.__version__.split(".")[0]) >= 2018:
# Consider removing support for earlier versions.
return MPI.comm_world
return df.mpi_comm_world()
def makedirs_safe(folder):
""" Make directory in a safe way. """
if mpi_is_root() and not os.path.exists(folder):
os.makedirs(folder)
def remove_safe(path):
""" Remove file in a safe way. """
if mpi_is_root() and os.path.exists(path):
os.remove(path)
def dump_parameters(parameters, settingsfilename):
""" Dump parameters to file """
with open(settingsfilename, "w") as settingsfile:
json.dump(parameters, settingsfile, indent=4*' ', sort_keys=True)
def load_parameters(parameters, settingsfilename):
if not os.path.exists(settingsfilename):
info_on_red("File " + settingsfilename + " does not exist.")
exit()
with open(settingsfilename, "r") as settingsfile:
parameters.update(json.load(settingsfile))
def create_initial_folders(folder, restart_folder, fields, tstep,
parameters):
""" Create initial folders """
info_cyan("Creating folders.")
makedirs_safe(folder)
mpi_barrier()
if restart_folder:
newfolder = restart_folder.split("Checkpoint")[0]
else:
previous_list = os.listdir(folder)
if len(previous_list) == 0:
newfolder = os.path.join(folder, "1")
else:
previous = max([int(entry) if entry.isdigit() else 0
for entry in previous_list])
newfolder = os.path.join(folder, str(previous+1))
mpi_barrier()
tstepfolder = os.path.join(newfolder, "Timeseries")
makedirs_safe(tstepfolder)
makedirs_safe(os.path.join(newfolder, "Statistics"))
settingsfolder = os.path.join(newfolder, "Settings")
makedirs_safe(settingsfolder)
makedirs_safe(os.path.join(newfolder, "Checkpoint"))
# Initialize timestep files
tstepfiles = dict()
for field in fields:
filename = os.path.join(tstepfolder,
field + "_from_tstep_{}.xdmf".format(tstep))
tstepfiles[field] = XDMFFile(mpi_comm(), filename)
tstepfiles[field].parameters["rewrite_function_mesh"] = False
tstepfiles[field].parameters["flush_output"] = True
# Dump settings
if mpi_is_root():
dump_parameters(parameters, os.path.join(
settingsfolder, "parameters_from_tstep_{}.dat".format(tstep)))
return newfolder, tstepfiles
def save_solution(tstep, t, T, mesh, w_, w_1, folder, newfolder,
save_intv, checkpoint_intv,
parameters, tstepfiles, subproblems,
**namespace):
""" Save solution either to """
if tstep % save_intv == 0:
# Save snapshot to xdmf
save_xdmf(t, w_, subproblems, tstepfiles)
stop = check_if_kill(folder) or t >= T
if tstep % checkpoint_intv == 0 or stop:
# Save checkpoint
save_checkpoint(tstep, t, mesh, w_, w_1, newfolder, parameters)
return stop
def check_if_kill(folder):
""" Check if user has ordered to kill the simulation. """
found = 0
if "kill" in os.listdir(folder):
found = 1
found_all = MPI.sum(mpi_comm(), found)
if found_all > 0:
remove_safe(os.path.join(folder, "kill"))
info_red("Stopping simulation.")
return True
else:
return False
def save_xdmf(t, w_, subproblems, tstepfiles):
""" Save snapshot of solution to xdmf file. """
for name, subproblem in subproblems.items():
q_ = w_[name].split()
if len(subproblem) > 1:
for s, q in zip(subproblem, q_):
field = s["name"]
if field in tstepfiles:
q.rename(field, "tmp")
tstepfiles[field].write(q, float(t))
else:
field = subproblem[0]["name"]
if field in tstepfiles:
q = w_[name]
q.rename(field, "tmp")
tstepfiles[field].write(q, float(t))
def save_checkpoint(tstep, t, mesh, w_, w_1, newfolder, parameters):
""" Save checkpoint files.
A part of this is taken from the Oasis code."""
checkpointfolder = os.path.join(newfolder, "Checkpoint")
parameters["num_processes"] = MPI_size
parameters["t_0"] = t
parameters["tstep"] = tstep
parametersfile = os.path.join(checkpointfolder, "parameters.dat")
parametersfile_old = parametersfile + ".old"
if mpi_is_root():
# In case of failure, keep old file.
if os.path.exists(parametersfile):
os.system("mv {0} {1}".format(parametersfile,
parametersfile_old))
dump_parameters(parameters, parametersfile)
mpi_barrier()
h5filename = os.path.join(checkpointfolder, "fields.h5")
h5filename_old = h5filename + ".old"
# In case of failure, keep old file.
if mpi_is_root() and os.path.exists(h5filename):
os.system("mv {0} {1}".format(h5filename, h5filename_old))
h5file = HDF5File(mpi_comm(), h5filename, "w")
h5file.flush()
info_red("Storing mesh")
h5file.write(mesh, "mesh")
for field in w_:
info_red("Storing subproblem: " + field)
mpi_barrier()
h5file.write(w_[field], "{}/current".format(field))
if field in w_1:
h5file.write(w_1[field], "{}/previous".format(field))
mpi_barrier()
h5file.close()
# Since program is still running, delete the old files.
remove_safe(h5filename_old)
mpi_barrier()
remove_safe(parametersfile_old)
def load_checkpoint(checkpointfolder, w_, w_1):
if checkpointfolder:
h5filename = os.path.join(checkpointfolder, "fields.h5")
h5file = HDF5File(mpi_comm(), h5filename, "r")
for field in w_:
info_red("Loading subproblem: {}".format(field))
h5file.read(w_[field], "{}/current".format(field))
h5file.read(w_1[field], "{}/previous".format(field))
h5file.close()
def load_mesh(filename, subdir="mesh",
use_partition_from_file=False):
""" Loads the mesh specified by the argument filename. """
info_cyan("Loading mesh: " + filename)
mesh = Mesh()
h5file = HDF5File(mesh.mpi_comm(), filename, "r")
h5file.read(mesh, subdir, use_partition_from_file)
h5file.close()
return mesh
def parse_xdmf(xml_file, get_mesh_address=False):
tree = ET.parse(xml_file)
root = tree.getroot()
dsets = []
timestamps = []
geometry_found = not get_mesh_address
topology_found = not get_mesh_address
for i, step in enumerate(root[0][0]):
if step.tag == "Time":
# Support for earlier dolfin formats
timestamps = [float(time) for time in
step[0].text.strip().split(" ")]
elif step.tag == "Grid":
timestamp = None
dset_address = None
for prop in step:
if prop.tag == "Time":
timestamp = float(prop.attrib["Value"])
elif prop.tag == "Attribute":
dset_address = prop[0].text.split(":")[1]
elif not topology_found and prop.tag == "Topology":
topology_address = prop[0].text.split(":")
topology_address[0] = os.path.join(
os.path.dirname(xml_file), topology_address[0])
topology_found = True
elif not geometry_found and prop.tag == "Geometry":
geometry_address = prop[0].text.split(":")
geometry_address[0] = os.path.join(
os.path.dirname(xml_file), geometry_address[0])
geometry_found = True
if timestamp is None:
timestamp = timestamps[i-1]
dsets.append((timestamp, dset_address))
if get_mesh_address and topology_found and geometry_found:
return (dsets, topology_address, geometry_address)
return dsets
def get_mesh_max(mesh, dim):
coords = mesh.coordinates()[:]
comm = mpi4py.MPI.COMM_WORLD
max_x_loc = coords[:, dim].max()
max_x = comm.reduce(max_x_loc, op=mpi4py.MPI.MAX, root=0)
max_x_loc = comm.bcast(max_x, root=0)
return max_x_loc
def get_mesh_min(mesh, dim):
coords = mesh.coordinates()[:]
comm = mpi4py.MPI.COMM_WORLD
min_x_loc = coords[:, dim].min()
min_x = comm.reduce(min_x_loc, op=mpi4py.MPI.MIN, root=0)
min_x_loc = comm.bcast(min_x, root=0)
return min_x_loc
| |
#!/usr/bin/env python
###
## Walk Root Directory and Return List or all Files in all Subdirs too
def recursive_dirlist(rootdir):
import os
walkedlist = []
for dirname, subdirnames, filenames in os.walk(rootdir):
# append path of all filenames to walkedlist
for filename in filenames:
file_path = os.path.abspath(os.path.join(dirname, filename))
if os.path.isfile(file_path):
walkedlist.append(file_path)
# Advanced usage:
# editing the 'dirnames' list will stop os.walk() from recursing into there.
#if '.git' in dirnames:
# don't go into any .git directories.
# dirnames.remove('.git')
walkedset = list(set(sorted(walkedlist)))
return walkedset
###
## Extract All Metadata from Image File as Dict
def get_exif(file_path):
from PIL import Image
from PIL.ExifTags import TAGS
exifdata = {}
im = Image.open(file_path)
info = im._getexif()
for tag, value in info.items():
decoded = TAGS.get(tag, tag)
exifdata[decoded] = value
return exifdata
###
## Make Lowres Thumnails from Image files or Directory Full of Image Files
def make_lowres_thumbnails_dir_or_singlefile(pathname):
from PIL import Image
import glob, os, re
size = 600, 720
regex_jpeg = re.compile(r'.+?\.[jpgJPG]{3}$')
# regex_jpeg_colorstyle = re.compile(r'.+?[0-9]{9}_[1-6][.jpg|.JPG]$')
if re.findall(regex_jpeg, pathname):
## If input variable is a single File Create 1 Thumb
if os.path.isfile(pathname):
try:
infile = os.path.abspath(pathname)
filename, ext = os.path.splitext(infile)
im = Image.open(infile)
im.thumbnail(size, Image.ANTIALIAS)
im.save(filename , "JPEG")
except:
print "Error Creating Single File Thumbnail for {0}".format(infile)
## If input variable is a Directory Decend into Dir and Crate Thumnails for all jpgs
elif os.path.isdir(pathname):
dirname = os.path.abspath(pathname)
for infile in glob.glob(os.path.join(dirname, "*.jpg")):
try:
filename, ext = os.path.splitext(infile)
im = Image.open(infile)
im.thumbnail(size, Image.ANTIALIAS)
im.save(filename, "JPEG")
except:
print "Error Creating Thumbnail for {0}".format(infile)
###
## Write Rows to Dated CSV in Users Home Dir If Desired
def csv_write_datedOutfile(lines):
import csv,datetime,os
dt = str(datetime.datetime.now())
today = dt.split(' ')[0]
f = os.path.join(os.path.expanduser('~'), today + '_stylestrings.csv')
for line in lines:
with open(f, 'ab+') as csvwritefile:
writer = csv.writer(csvwritefile, delimiter='\n')
writer.writerows([lines])
############################ Run ######################################################## Run ############################
############################ Run ######################################################## Run ############################
from PIL import Image
import os,sys,re
rootdir = sys.argv[1]
walkedout = recursive_dirlist(rootdir)
regex = re.compile(r'.*?[0-9]{9}_[1-6]\.[jpgJPG]{3}$')
#regex = re.compile(r'.+?\.[jpgJPG]{3}$')
stylestrings = []
stylestringsdict = {}
for line in walkedout:
stylestringsdict_tmp = {}
if re.findall(regex,line):
try:
file_path = line
filename = file_path.split('/')[-1]
colorstyle = filename.split('_')[0]
alt_ext = file_path.split('_')[-1]
alt = alt_ext.split('.')[0]
ext = alt_ext.split('.')[-1]
try:
photo_date = get_exif(file_path)['DateTimeOriginal'][:10]
except KeyError:
try:
photo_date = get_exif(file_path)['DateTime'][:10]
except KeyError:
photo_date = 0000-00-00
photo_date = photo_date.replace(':','-')
stylestringsdict_tmp['colorstyle'] = colorstyle
stylestringsdict_tmp['photo_date'] = photo_date
stylestringsdict_tmp['file_path'] = file_path
stylestringsdict_tmp['alt'] = alt
stylestringsdict[file_path] = stylestringsdict_tmp
file_path_reletive = file_path.replace('/mnt/Post_Ready/zImages_1/', '/zImages/')
file_path_reletive = file_path.replace('JPG', 'jpg')
## Format CSV Rows
row = "{0},{1},{2},{3}".format(colorstyle,photo_date,file_path_reletive,alt)
print row
stylestrings.append(row)
except IOError:
print "IOError on {0}".format(line)
except AttributeError:
print "AttributeError on {0}".format(line)
## Write CSV List to dated file for Import to MySQL
#csv_write_datedOutfile(stylestrings)
## Create Dir Struct under ZIMAGES_1 if dir doesnt Exist and copy files to it
for k,v in stylestringsdict.iteritems():
import os,sys,shutil, re
regex_zimages = re.compile(r'^.*?/zImages.*?[0-9]{9}_[1-6]\.[jpgJPG]{3}$')
src = k
## Dont Move or Resize Files found in zImages copy everything else to zImages
if re.findall(regex_zimages, src):
pass
else:
destdir = os.path.join('/mnt/Post_Ready/zImages_1', v['colorstyle'][:4])
destfilename = src.split('/')[-1]
destpath = os.path.join(destdir,destfilename)
## Test if File Exists in zimage Directory else copy it and resize
if os.path.isfile(destpath):
#print "Not Copying Over File {0}".format(destpath)
pass
else:
## Mkdir if not there
try:
os.mkdirs(destdir)
except:
pass
## Mk Thumbs Then move thumbs to Destdir
try:
make_lowres_thumbnails_dir_or_singlefile(src)
successthumb = "Created Thumbnail --> {0}".format(src)
csv_write_datedOutfile(successthumb)
## Success on Thumb Creation Now Move to Dest Dir
try:
src = os.replace('.jpg', '.jpeg')
os.rename(src,destpath)
success = "Success Moving {0} --> {1}".format(src,destpath)
#print success
csv_write_datedOutfile(success)
except:
errthumb = "Error Moving {0} --> {0}".format(src,destpath)
print errthumb
csv_write_datedOutfile(errthumb)
except:
errthumb = "Error Creating Thumbnail for {0}".format(src)
print errthumb
csv_write_datedOutfile(errthumb)
#Iterate through Dict of Walked Directory, then Import to MySql DB
import sqlalchemy
## First compile the SQL Fields as key value pairs
fulldict = {}
for k,v in stylestringsdict.iteritems():
dfill = {}
dfill['colorstyle'] = v['colorstyle']
dfill['photo_date'] = v['photo_date']
file_path = k
file_path = file_path.replace('/mnt/Post_Ready/zImages_1/', '/zImages/')
file_path = file_path.replace('/mnt/Post_Ready/Retouch_', '/Retouch_')
dfill['file_path'] = file_path
dfill['alt'] = v['alt']
fulldict[k] = dfill
## Take the compiled k/v pairs and Format + Insert into MySQL DB
for k,v in fulldict.iteritems():
try:
mysql_engine = sqlalchemy.create_engine('mysql+mysqldb://root:mysql@prodimages.ny.bluefly.com:3301/data_imagepaths')
connection = mysql_engine.connect()
## Test File path String to Determine which Table needs to be Updated Then Insert SQL statement
sqlinsert_choose_test = v['file_path']
regex_photoselects = re.compile(r'^/.+?/Post_Ready/.+?Push/.*?[0-9]{9}_[1-6]\.[jpgJPG]{3}$')
regex_postreadyoriginal = re.compile(r'^/Retouch_.+?/.*?[0-9]{9}_[1-6]\.[jpgJPG]{3}$')
regex_zimages = re.compile(r'^/zImages.*?[0-9]{9}_[1-6]\.[jpgJPG]{3}$')
if re.findall(regex_photoselects, sqlinsert_choose_test):
connection.execute("""INSERT INTO push_photoselects (colorstyle, photo_date, file_path, alt) VALUES (%s, %s, %s, %s)""", v['colorstyle'], v['photo_date'], v['file_path'], v['alt'])
print "Successful Insert Push_Photoselecs --> {0}".format(k)
elif re.findall(regex_postreadyoriginal, sqlinsert_choose_test):
connection.execute("""INSERT INTO post_ready_original (colorstyle, photo_date, file_path, alt) VALUES (%s, %s, %s, %s)""", v['colorstyle'], v['photo_date'], v['file_path'], v['alt'])
print "Successful Insert to Post_Ready_Originals --> {0}".format(k)
elif re.findall(regex_zimages, sqlinsert_choose_test):
connection.execute("""INSERT INTO zimages1_photoselects (colorstyle, photo_date, file_path, alt) VALUES (%s, %s, %s, %s)""", v['colorstyle'], v['photo_date'], v['file_path'], v['alt'])
print "Successful Insert to Zimages --> {0}".format(k)
else:
print "Database Table not Found for Inserting {0}".format(k)
except sqlalchemy.exc.IntegrityError:
print "Duplicate Entry {0}".format(k)
#for vals in v:
# print v[vals]
#push_photoselects = Table('push_photoselects', mysql_engine)
#i = push_photoselects.insert()
#sql = "INSERT INTO data_imagepaths.push_photoselects (colorstyle, photo_date, file_path, alt) VALUES (%('" + colorstyle + "')s,%('" + photo_date + "')d,%('" + file_path + "')s,%(('" + alt + "'))s"
#print sql
#
#
#class PhotoMetaData(f):
#
# def __init__(self, f):
# #self.files_list = []
## self.recursivefilelist = []
# self.MetaDict = {}
# self.f = f
## self.__update(directory)
#
#
# def get_exif(self,f):
# from PIL import Image
# from PIL.ExifTags import TAGS
# i = Image.open(f)
# info = i._getexif()
# exifdict = {}
# for tag, value in info.items():
# decoded = TAGS.get(tag, tag)
# exifdict[decoded] = value
# return exifdict
#
#
# def get_photodate_dict(self,f):
# self.MetaDict = {}
# for f in rcrsedir:
# MetaDict = {}
# try:
# dtod = {}
# dto = get_exif(f)['DateTimeOriginal'][0:10]
# f.split('/')[-1]
# #dtod['ext'] = fn.split('.')[0]
# dtod['colorstyle'] = f.split('_')[0]
# dtod['photo_Date'] = dto
# dtod['file_path'] = f
# dtod['alt'] = f.split('_')[-1]
#
# self.MetaDict[f] = dtod
#
# except AttributeError:
# print 'End -- None Type'
# except IOError:
# print 'IO Identity Error'
# except KeyError:
# print "No Date Time Field"
| |
"""
This module gathers tree-based methods, including decision, regression and
randomized trees. Single and multi-output problems are both handled.
"""
# Authors: Gilles Louppe <g.louppe@gmail.com>
# Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Brian Holt <bdholt1@gmail.com>
# Noel Dawe <noel@dawe.me>
# Satrajit Gosh <satrajit.ghosh@gmail.com>
# Joly Arnaud <arnaud.v.joly@gmail.com>
# Fares Hedayati <fares.hedayati@gmail.com>
#
# Licence: BSD 3 clause
from __future__ import division
import numbers
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import issparse
from ..base import BaseEstimator, ClassifierMixin, RegressorMixin
from ..externals import six
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import check_array, check_random_state
from ..utils.validation import NotFittedError, check_is_fitted
from ._tree import Criterion
from ._tree import Splitter
from ._tree import DepthFirstTreeBuilder, BestFirstTreeBuilder
from ._tree import Tree
from . import _tree
__all__ = ["DecisionTreeClassifier",
"DecisionTreeRegressor",
"ExtraTreeClassifier",
"ExtraTreeRegressor"]
# =============================================================================
# Types and constants
# =============================================================================
DTYPE = _tree.DTYPE
DOUBLE = _tree.DOUBLE
CRITERIA_CLF = {"gini": _tree.Gini, "entropy": _tree.Entropy}
CRITERIA_REG = {"mse": _tree.MSE, "friedman_mse": _tree.FriedmanMSE}
DENSE_SPLITTERS = {"best": _tree.BestSplitter,
"presort-best": _tree.PresortBestSplitter,
"random": _tree.RandomSplitter}
SPARSE_SPLITTERS = {"best": _tree.BestSparseSplitter,
"random": _tree.RandomSparseSplitter}
# =============================================================================
# Base decision tree
# =============================================================================
class BaseDecisionTree(six.with_metaclass(ABCMeta, BaseEstimator,
_LearntSelectorMixin)):
"""Base class for decision trees.
Warning: This class should not be used directly.
Use derived classes instead.
"""
@abstractmethod
def __init__(self,
criterion,
splitter,
max_depth,
min_samples_split,
min_samples_leaf,
min_weight_fraction_leaf,
max_features,
max_leaf_nodes,
random_state):
self.criterion = criterion
self.splitter = splitter
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.random_state = random_state
self.max_leaf_nodes = max_leaf_nodes
self.n_features_ = None
self.n_outputs_ = None
self.classes_ = None
self.n_classes_ = None
self.tree_ = None
self.max_features_ = None
def fit(self, X, y, sample_weight=None, check_input=True):
"""Build a decision tree from the training set (X, y).
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The training input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csc_matrix``.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target values (class labels in classification, real numbers in
regression). In the regression case, use ``dtype=np.float64`` and
``order='C'`` for maximum efficiency.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
self : object
Returns self.
"""
random_state = check_random_state(self.random_state)
if check_input:
X = check_array(X, dtype=DTYPE, accept_sparse="csc")
if issparse(X):
X.sort_indices()
if X.indices.dtype != np.intc or X.indptr.dtype != np.intc:
raise ValueError("No support for np.int64 index based "
"sparse matrices")
# Determine output settings
n_samples, self.n_features_ = X.shape
is_classification = isinstance(self, ClassifierMixin)
y = np.atleast_1d(y)
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
if is_classification:
y = np.copy(y)
self.classes_ = []
self.n_classes_ = []
for k in range(self.n_outputs_):
classes_k, y[:, k] = np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes_k)
self.n_classes_.append(classes_k.shape[0])
else:
self.classes_ = [None] * self.n_outputs_
self.n_classes_ = [1] * self.n_outputs_
self.n_classes_ = np.array(self.n_classes_, dtype=np.intp)
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
# Check parameters
max_depth = ((2 ** 31) - 1 if self.max_depth is None
else self.max_depth)
max_leaf_nodes = (-1 if self.max_leaf_nodes is None
else self.max_leaf_nodes)
if isinstance(self.max_features, six.string_types):
if self.max_features == "auto":
if is_classification:
max_features = max(1, int(np.sqrt(self.n_features_)))
else:
max_features = self.n_features_
elif self.max_features == "sqrt":
max_features = max(1, int(np.sqrt(self.n_features_)))
elif self.max_features == "log2":
max_features = max(1, int(np.log2(self.n_features_)))
else:
raise ValueError(
'Invalid value for max_features. Allowed string '
'values are "auto", "sqrt" or "log2".')
elif self.max_features is None:
max_features = self.n_features_
elif isinstance(self.max_features, (numbers.Integral, np.integer)):
max_features = self.max_features
else: # float
if self.max_features > 0.0:
max_features = max(1, int(self.max_features * self.n_features_))
else:
max_features = 0
self.max_features_ = max_features
if len(y) != n_samples:
raise ValueError("Number of labels=%d does not match "
"number of samples=%d" % (len(y), n_samples))
if self.min_samples_split <= 0:
raise ValueError("min_samples_split must be greater than zero.")
if self.min_samples_leaf <= 0:
raise ValueError("min_samples_leaf must be greater than zero.")
if not 0 <= self.min_weight_fraction_leaf <= 0.5:
raise ValueError("min_weight_fraction_leaf must in [0, 0.5]")
if max_depth <= 0:
raise ValueError("max_depth must be greater than zero. ")
if not (0 < max_features <= self.n_features_):
raise ValueError("max_features must be in (0, n_features]")
if not isinstance(max_leaf_nodes, (numbers.Integral, np.integer)):
raise ValueError("max_leaf_nodes must be integral number but was "
"%r" % max_leaf_nodes)
if -1 < max_leaf_nodes < 2:
raise ValueError(("max_leaf_nodes {0} must be either smaller than "
"0 or larger than 1").format(max_leaf_nodes))
if sample_weight is not None:
if (getattr(sample_weight, "dtype", None) != DOUBLE or
not sample_weight.flags.contiguous):
sample_weight = np.ascontiguousarray(
sample_weight, dtype=DOUBLE)
if len(sample_weight.shape) > 1:
raise ValueError("Sample weights array has more "
"than one dimension: %d" %
len(sample_weight.shape))
if len(sample_weight) != n_samples:
raise ValueError("Number of weights=%d does not match "
"number of samples=%d" %
(len(sample_weight), n_samples))
# Set min_weight_leaf from min_weight_fraction_leaf
if self.min_weight_fraction_leaf != 0. and sample_weight is not None:
min_weight_leaf = (self.min_weight_fraction_leaf *
np.sum(sample_weight))
else:
min_weight_leaf = 0.
# Set min_samples_split sensibly
min_samples_split = max(self.min_samples_split,
2 * self.min_samples_leaf)
# Build tree
criterion = self.criterion
if not isinstance(criterion, Criterion):
if is_classification:
criterion = CRITERIA_CLF[self.criterion](self.n_outputs_,
self.n_classes_)
else:
criterion = CRITERIA_REG[self.criterion](self.n_outputs_)
SPLITTERS = SPARSE_SPLITTERS if issparse(X) else DENSE_SPLITTERS
splitter = self.splitter
if not isinstance(self.splitter, Splitter):
splitter = SPLITTERS[self.splitter](criterion,
self.max_features_,
self.min_samples_leaf,
min_weight_leaf,
random_state)
self.tree_ = Tree(self.n_features_, self.n_classes_, self.n_outputs_)
# Use BestFirst if max_leaf_nodes given; use DepthFirst otherwise
if max_leaf_nodes < 0:
builder = DepthFirstTreeBuilder(splitter, min_samples_split,
self.min_samples_leaf,
min_weight_leaf,
max_depth)
else:
builder = BestFirstTreeBuilder(splitter, min_samples_split,
self.min_samples_leaf,
min_weight_leaf,
max_depth,
max_leaf_nodes)
builder.build(self.tree_, X, y, sample_weight)
if self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
return self
def predict(self, X):
"""Predict class or regression value for X.
For a classification model, the predicted class for each sample in X is
returned. For a regression model, the predicted value based on X is
returned.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted classes, or the predict values.
"""
X = check_array(X, dtype=DTYPE, accept_sparse="csr")
if issparse(X) and (X.indices.dtype != np.intc or
X.indptr.dtype != np.intc):
raise ValueError("No support for np.int64 index based "
"sparse matrices")
n_samples, n_features = X.shape
if self.tree_ is None:
raise NotFittedError("Tree not initialized. Perform a fit first")
if self.n_features_ != n_features:
raise ValueError("Number of features of the model must "
" match the input. Model n_features is %s and "
" input n_features is %s "
% (self.n_features_, n_features))
proba = self.tree_.predict(X)
# Classification
if isinstance(self, ClassifierMixin):
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
predictions = np.zeros((n_samples, self.n_outputs_))
for k in range(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(
np.argmax(proba[:, k], axis=1),
axis=0)
return predictions
# Regression
else:
if self.n_outputs_ == 1:
return proba[:, 0]
else:
return proba[:, :, 0]
@property
def feature_importances_(self):
"""Return the feature importances.
The importance of a feature is computed as the (normalized) total
reduction of the criterion brought by that feature.
It is also known as the Gini importance.
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.tree_ is None:
raise NotFittedError("Estimator not fitted, call `fit` before"
" `feature_importances_`.")
return self.tree_.compute_feature_importances()
# =============================================================================
# Public estimators
# =============================================================================
class DecisionTreeClassifier(BaseDecisionTree, ClassifierMixin):
"""A decision tree classifier.
Parameters
----------
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : int, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
tree_ : Tree object
The underlying Tree object.
max_features_ : int,
The infered value of max_features.
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem),
or a list of arrays of class labels (multi-output problem).
n_classes_ : int or list
The number of classes (for single output problems),
or a list containing the number of classes for each
output (for multi-output problems).
feature_importances_ : array of shape = [n_features]
The feature importances. The higher, the more important the
feature. The importance of a feature is computed as the (normalized)
total reduction of the criterion brought by that feature. It is also
known as the Gini importance [4]_.
See also
--------
DecisionTreeRegressor
References
----------
.. [1] http://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.cross_validation import cross_val_score
>>> from sklearn.tree import DecisionTreeClassifier
>>> clf = DecisionTreeClassifier(random_state=0)
>>> iris = load_iris()
>>> cross_val_score(clf, iris.data, iris.target, cv=10)
... # doctest: +SKIP
...
array([ 1. , 0.93..., 0.86..., 0.93..., 0.93...,
0.93..., 0.93..., 1. , 0.93..., 1. ])
"""
def __init__(self,
criterion="gini",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None):
super(DecisionTreeClassifier, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state)
def predict_proba(self, X):
"""Predict class probabilities of the input samples X.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
check_is_fitted(self, 'n_outputs_')
X = check_array(X, dtype=DTYPE, accept_sparse="csr")
if issparse(X) and (X.indices.dtype != np.intc or
X.indptr.dtype != np.intc):
raise ValueError("No support for np.int64 index based "
"sparse matrices")
n_samples, n_features = X.shape
if self.tree_ is None:
raise NotFittedError("Tree not initialized. Perform a fit first.")
if self.n_features_ != n_features:
raise ValueError("Number of features of the model must "
" match the input. Model n_features is %s and "
" input n_features is %s "
% (self.n_features_, n_features))
proba = self.tree_.predict(X)
if self.n_outputs_ == 1:
proba = proba[:, :self.n_classes_]
normalizer = proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba /= normalizer
return proba
else:
all_proba = []
for k in range(self.n_outputs_):
proba_k = proba[:, k, :self.n_classes_[k]]
normalizer = proba_k.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba_k /= normalizer
all_proba.append(proba_k)
return all_proba
def predict_log_proba(self, X):
"""Predict class log-probabilities of the input samples X.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class log-probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in range(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
class DecisionTreeRegressor(BaseDecisionTree, RegressorMixin):
"""A decision tree regressor.
Parameters
----------
criterion : string, optional (default="mse")
The function to measure the quality of a split. The only supported
criterion is "mse" for the mean squared error.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : int, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
tree_ : Tree object
The underlying Tree object.
max_features_ : int,
The infered value of max_features.
feature_importances_ : array of shape = [n_features]
The feature importances.
The higher, the more important the feature.
The importance of a feature is computed as the
(normalized) total reduction of the criterion brought
by that feature. It is also known as the Gini importance [4]_.
See also
--------
DecisionTreeClassifier
References
----------
.. [1] http://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_boston
>>> from sklearn.cross_validation import cross_val_score
>>> from sklearn.tree import DecisionTreeRegressor
>>> boston = load_boston()
>>> regressor = DecisionTreeRegressor(random_state=0)
>>> cross_val_score(regressor, boston.data, boston.target, cv=10)
... # doctest: +SKIP
...
array([ 0.61..., 0.57..., -0.34..., 0.41..., 0.75...,
0.07..., 0.29..., 0.33..., -1.42..., -1.77...])
"""
def __init__(self,
criterion="mse",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None):
super(DecisionTreeRegressor, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state)
class ExtraTreeClassifier(DecisionTreeClassifier):
"""An extremely randomized tree classifier.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
See also
--------
ExtraTreeRegressor, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="gini",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
random_state=None,
max_leaf_nodes=None):
super(ExtraTreeClassifier, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state)
class ExtraTreeRegressor(DecisionTreeRegressor):
"""An extremely randomized tree regressor.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
See also
--------
ExtraTreeClassifier, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="mse",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
random_state=None,
max_leaf_nodes=None):
super(ExtraTreeRegressor, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state)
| |
#!/usr/bin/env python
from cryptography import x509
from cryptography.hazmat.primitives import serialization, hashes
from cryptography.hazmat.primitives.asymmetric import rsa, padding
from cryptography.hazmat.backends import default_backend
from cryptography.x509.oid import NameOID
from urllib.request import Request, urlopen
from urllib.error import HTTPError, URLError
from acme_lite.utils import b64,long2hex, long2b64, thumbprint, linkurl
from acme_lite.agent import send_request
from acme_lite.error import ACMEError, ACMEGetNonceError, ACMEPollingTimeOutError
import dns.resolver
import json
import copy
import time
import os, sys, io
import signal
__author__ = 'holly'
__version__ = '1.0'
class ACMELite(object):
# genrsa
KEY_SIZE = 4096
PUBLIC_EXPONENT = 65537
# polling
POLLING_DELAY = 1
POLLING_MAX_TIMES = 10
# GET_
REG_URL_FORMAT = "/acme/reg/{0}"
AUTHZ_URL_FORMAT = "/acme/authz/{0}"
CHALLENGE_URL_FORMAT = "/acme/challenge/{0}/{1}"
CERT_URL_FORMAT = "/acme/cert/{0}"
def __init__(self, **kwargs):
self._staging = kwargs["staging"] if "staging" in kwargs else True
self._verbose = kwargs["verbose"] if "verbose" in kwargs else False
self._account_key = None
self._header = None
self._thumbprint = None
self._api_host = None
# for genrsa
self._key_size = kwargs["key_size"] if "key_size" in kwargs else __class__.KEY_SIZE
if "account_key" in kwargs:
self.set_account_key(kwargs["account_key"])
self.set_header_and_thumbprint()
self.set_api_host()
_, directory = self.get_nonce_and_directory()
self._directory = directory
def set_account_key(self, account_key):
with open(account_key, "r") as f:
self.set_account_key_from_key_data(f.read())
def set_account_key_from_key_data(self, key_data):
self._account_key = serialization.load_pem_private_key(key_data.encode("utf-8"), password=None, backend=default_backend())
def set_header_and_thumbprint(self):
public_numbers = self.account_key.public_key().public_numbers()
jwk = {
"kty": "RSA",
"e": long2b64(public_numbers.e),
"n": long2b64(public_numbers.n),
}
self._header = { "alg": "RS256", "jwk": jwk }
self._thumbprint = thumbprint(json.dumps(jwk, sort_keys=True, separators=(',', ':')).encode('utf-8'))
def set_api_host(self):
if self.staging:
self._api_host = "https://acme-staging.api.letsencrypt.org"
else:
self._api_host = "https://acme-v01.api.letsencrypt.org"
self.logging("acme endpoint: {0}".format(self.api_host))
def get_nonce_and_directory(self):
url = self.api_host + "/" + "directory"
res = send_request(url)
if res.is_error():
raise ACMEError(res.error)
if 'Replay-Nonce' not in res.headers:
raise ACMEGetNonceError("nonce field is not exists in response headers")
nonce = res.headers['Replay-Nonce']
return nonce, res.json
def initial_account_key(self):
key = rsa.generate_private_key(public_exponent=__class__.PUBLIC_EXPONENT, key_size=self.key_size, backend=default_backend())
self.account_key = key
def make_signed_payload(self, payload):
nonce, _ = self.get_nonce_and_directory()
payload64 = b64(json.dumps(payload).encode('utf-8'))
header = self.header
protected = copy.deepcopy(header)
protected["nonce"] = nonce
protected64 = b64(json.dumps(protected).encode('utf-8'))
signature = b64(self.sign("{0}.{1}".format(protected64, payload64)))
payload = {
"header": header,
"protected": protected64,
"payload": payload64,
"signature": signature,
}
return payload
def csr2domains(self, csr):
with open(csr, "r") as f:
csr_data = f.read()
return self.csr_data2domains(csr_data)
def csr_data2domains(self, csr_data):
domains = []
req = self.validate_csr_from_csr_data(csr_data)
for attr in req.subject.get_attributes_for_oid(NameOID.COMMON_NAME):
domains.append(attr.value)
for ext in req.extensions:
sans = ext.value
for dns_name in sans:
domains.append(dns_name.value)
return domains
def sign(self, data):
# echo -n $data | openssl dgst -sha256 -sign /path/to/account.key
sign = self.account_key.sign(data.encode("utf-8"), padding.PKCS1v15(), hashes.SHA256())
return sign
def request(self, payload):
resource = payload["resource"]
signed_payload = self.make_signed_payload(payload)
signed_payload_json = json.dumps(signed_payload).encode("utf-8")
self.logging("debug request payload")
self.logging("===================================")
self.logging("payload: {0}".format(json.dumps(payload, indent=2)))
self.logging("===================================")
return self.single_request(self.directory[resource], resource=resource, payload=signed_payload_json)
def single_request(self, url, resource=None, payload=None):
res = send_request(url, resource=resource, payload=payload)
self.logging("debug response contents")
self.logging("===================================")
self.logging("url: {0}".format(res.url))
self.logging("code: {0}".format(res.code))
self.logging("")
self.logging(res.headers)
self.logging("")
self.logging(res.text)
self.logging("===================================")
return res
def register(self):
payload = {
"resource": "new-reg",
"agreement": self.directory["meta"]["terms-of-service"]
}
return self.request(payload=payload)
def key_change(self, new_account_key=None, account_url=None):
with open(new_account_key, "r") as f:
return self.key_change_from_key_data(f.read(), account_url)
def key_change_from_key_data(self, key_data=None, account_url=None):
new_account_key = serialization.load_pem_private_key(key_data.encode("utf-8"), password=None, backend=default_backend())
public_numbers = new_account_key.public_key().public_numbers()
jwk = {
"kty": "RSA",
"e": long2b64(public_numbers.e),
"n": long2b64(public_numbers.n),
}
header = { "alg": "RS256", "jwk": jwk }
new_payload = { "account": account_url, "newKey": jwk }
protected64 = b64(json.dumps(header).encode('utf-8'))
payload64 = b64(json.dumps(new_payload).encode('utf-8'))
sign = new_account_key.sign("{0}.{1}".format(protected64, payload64).encode("utf-8"), padding.PKCS1v15(), hashes.SHA256())
signature = b64(sign)
payload = {
"resource": "key-change",
"protected": protected64,
"payload": payload64,
"signature": signature
}
res = self.request(payload=payload)
if res.is_success():
self.account_key = new_account_key
return res
def new_authz(self, domain):
payload = {
"resource": "new-authz",
"identifier": {"type": "dns", "value": domain },
}
res = self.request(payload=payload)
self.logging("code:{0}".format(res.code))
self.logging("body:{0}".format(res.text))
return res
def authz(self, authz_token):
authz_url = self.api_host + __class__.AUTHZ_URL_FORMAT.format(authz_token)
res = self.single_request(authz_url)
if res.is_success():
res.resource = "new-authz"
return res
def validate_real_challenge(self, challenge):
challenge_type = challenge["type"]
setting_location = challenge["setting_location"]
auth_key = challenge["auth_key"]
if challenge["status"] == "valid":
return True
if challenge["status"] != "pending":
raise ACMEError("{0} status is not pending".format(challenge_type))
if challenge_type == "http-01" or challenge_type == "tls-sni-01":
res = self.single_request(setting_location)
real_auth_key = res.text
if res.code != 200:
raise ACMEError("setting_location:{0} is unreachable".format(setting_location))
if auth_key != real_auth_key:
raise ACMEError("auth.key({0}) and remote.key({1}) are mismatch".format(auth_key, real_key))
return True
elif challenge_type == "dns-01":
txt = dns.resolver.query(setting_location, 'TXT')
for i in txt.response.answer:
for item in i.items:
if item.to_text() == auth_key:
return True
raise ACMEError("txt records({0}) are mismatch".format(setting_location))
def handle_challenge(self, challenge, polling_max_times=POLLING_MAX_TIMES):
res = self.new_challenge(challenge)
if res.is_error():
raise ACMEError(res.error)
times = 0
while True:
res = self.challenge(challenge)
if res.is_error():
raise ACMEError(res.error)
if res.code == 202 and res.json["status"] == "valid":
return res
else:
times += 1
if times > polling_max_times:
raise ACMEError("polling runtime error. over {0} times".format(times))
else:
self.logging("challenge status still is not valid...{0} times".format(times))
time.sleep(__class__.POLLING_DELAY)
def new_challenge(self, challenge):
payload = {
"resource": "challenge",
"keyAuthorization": challenge["auth_key"]
}
signed_payload = self.make_signed_payload(payload)
signed_payload_json = json.dumps(signed_payload).encode("utf-8")
return self.single_request(challenge["uri"], resource="challenge", payload=signed_payload_json)
def challenge(self, challenge):
res = self.single_request(challenge["uri"])
if res.is_success():
res.resource = "challenge"
return res
def new_cert(self, csr):
with open(csr, "r") as f:
csr_data = f.read()
return self.new_cert_from_csr_data(csr_data)
def new_cert_from_csr_data(self, csr_data):
req = x509.load_pem_x509_csr(csr_data.encode("utf-8"), default_backend())
csr_der = req.public_bytes(serialization.Encoding.DER)
payload = {
"resource": "new-cert",
"csr": b64(csr_der)
}
res = self.request(payload=payload)
return res
def cert(self, cert_id):
cert_url = self.api_host + __class__.CERT_URL_FORMAT.format(cert_id)
res = self.single_request(cert_url)
if res.is_success():
res.resource = "new-cert"
self.logging("code:{0}".format(res.code))
self.logging("body:{0}".format(res.text))
return res
def revoke(self, cert):
with open(cert, "r") as f:
cert_data = f.read()
return self.revoke_from_cert_data(cert_data)
def revoke_from_cert_data(self, cert_data):
x509_cert = x509.load_pem_x509_certificate(cert_data.encode("utf-8"), default_backend())
cert_der = x509_cert.public_bytes(serialization.Encoding.DER)
payload = {
"resource": "revoke-cert",
"certificate": b64(cert_der)
}
res = self.request(payload=payload)
return res
def validate_csr(self, csr):
with open(csr, "r") as f:
return self.validate_csr_from_csr_data(f.read())
def validate_csr_from_csr_data(self, csr_data):
return x509.load_pem_x509_csr(csr_data.encode("utf-8"), default_backend())
def logging(self, message):
if self.verbose is False:
return
print(message, file=sys.stderr)
@property
def account_key(self):
return self._account_key
@property
def api_host(self):
return self._api_host
@property
def verbose(self):
return self._verbose
@property
def directory(self):
return self._directory
@property
def header(self):
return self._header
@property
def staging(self):
return self._staging
@property
def thumbprint(self):
return self._thumbprint
@property
def key_size(self):
return self._key_size
@api_host.setter
def api_host(self, api_host):
self._api_host = api_host
@account_key.setter
def account_key(self, account_key):
if isinstance(account_key, (rsa.RSAPrivateKey)):
self._account_key = account_key
self.set_header_and_thumbprint()
@verbose.setter
def verbose(self, verbose):
self._verbose = verbose
@staging.setter
def staging(self, flag):
if isinstance(flag, (bool)):
self._staging = flag
self.set_api_host()
_, directory = self.get_nonce_and_directory()
self._directory = directory
@key_size.setter
def key_size(self, key_size):
self._key_size = key_size
if __name__ == "__main__":
pass
| |
#!/usr/bin/env python
#
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example adds two rule-based remarketing user lists.
Adds two rule-based remarketing user lists; one with no site visit date
restrictions and another that will only include users who visit your site in
the next six months.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
"""
import calendar
from datetime import date
from datetime import datetime
from datetime import timedelta
from googleads import adwords
def main(client):
# Initialize appropriate service.
adwords_user_list_service = client.GetService(
'AdwordsUserListService', version='v201809')
# First rule item group - users who visited the checkout page and had more
# than one item in their shopping cart.
checkout_rule_item = {
'StringRuleItem': {
'key': {
'name': 'ecomm_pagetype'
},
'op': 'EQUALS',
'value': 'checkout'
}
}
cart_size_rule_item = {
'NumberRuleItem': {
'key': {
'name': 'cartsize'
},
'op': 'GREATER_THAN',
'value': '1.0'
}
}
# Combine the two rule items into a RuleItemGroup so AdWords will logically
# AND the rules together.
checkout_multiple_item_group = {
'items': [checkout_rule_item, cart_size_rule_item]
}
# Second rule item group - users who checked out within the next 3 months.
today = date.today()
start_date_rule_item = {
'DateRuleItem': {
'key': {
'name': 'checkoutdate'
},
'op': 'AFTER',
'value': today.strftime('%Y%m%d')
}
}
three_months_later = AddMonths(today, 3)
three_months_later_rule_item = {
'DateRuleItem': {
'key': {
'name': 'checkoutdate'
},
'op': 'BEFORE',
'value': three_months_later.strftime('%Y%m%d')
}
}
# Combine the date rule items into a RuleItemGroup
checked_out_date_range_item_group = {
'items': [start_date_rule_item, three_months_later_rule_item]
}
# Combine the rule item groups into a Rule so AdWords knows how to apply the
# rules.
rule = {
'groups': [
checkout_multiple_item_group,
checked_out_date_range_item_group
],
# ExpressionRuleUserLists can use either CNF or DNF for matching. CNF
# means 'at least one item in each rule item group must match', and DNF
# means 'at least one entire rule item group must match'.
# DateSpecificRuleUserList only supports DNF. You can also omit the rule
# type altogether to default to DNF.
'ruleType': 'DNF'
}
# Third and fourth rule item groups.
# Visitors of a page who visited another page.
site1_rule_item = {
'StringRuleItem': {
'key': {'name': 'url__'},
'op': 'EQUALS',
'value': 'example.com/example1'
}
}
site2_rule_item = {
'StringRuleItem': {
'key': {'name': 'url__'},
'op': 'EQUALS',
'value': 'example.com/example2'
}
}
# Create two rules to show that a visitor browsed two sites.
user_visited_site1_rule = {
'groups': [{
'items': [site1_rule_item]
}]
}
user_visited_site2_rule = {
'groups': [{
'items': [site2_rule_item]
}]
}
# Create the user list with no restrictions on site visit date.
expression_user_list = {
'xsi_type': 'ExpressionRuleUserList',
'name': 'Expression-based user list created at %s'
% datetime.today().strftime('%Y%m%d %H:%M:%S'),
'description': 'Users who checked out in three month window OR visited'
' the checkout page with more than one item in their'
' cart.',
'rule': rule,
# Optional: Set the populationStatus to REQUESTED to include past users in
# the user list.
'prepopulationStatus': 'REQUESTED'
}
# Create the user list restricted to users who visit your site within the next
# six months.
end_date = AddMonths(today, 6)
date_user_list = {
'xsi_type': 'DateSpecificRuleUserList',
'name': 'Date rule user list created at %s'
% datetime.today().strftime('%Y%m%d %H:%M:%S'),
'description': 'Users who visited the site between %s and %s and checked'
' out in three month window OR visited the checkout page'
' with more than one item in their cart.'
% (today.strftime('%Y%m%d'), end_date.strftime('%Y%m%d')),
'rule': rule,
'startDate': today.strftime('%Y%m%d'),
'endDate': end_date.strftime('%Y%m%d')
}
# Create the user list for "Visitors of a page who did visit another page".
# To create a user list for "Visitors of a page who did not visit another
# page", change the ruleOperator from AND to AND_NOT.
combined_user_list = {
'xsi_type': 'CombinedRuleUserList',
'name': 'Combined rule user lst create at ${creation_time}',
'description': 'Users who visited two sites.',
'leftOperand': user_visited_site1_rule,
'rightOperand': user_visited_site2_rule,
'ruleOperator': 'AND'
}
# Create operations to add the user lists.
operations = [
{
'operand': user_list,
'operator': 'ADD',
} for user_list in [expression_user_list, date_user_list,
combined_user_list]
]
# Submit the operations.
user_lists = adwords_user_list_service.mutate(operations)
# Display results.
for user_list in user_lists['value']:
print (('User list added with ID %d, name "%s", status "%s", list type'
' "%s", accountUserListStatus "%s", description "%s".') %
(user_list['id'], user_list['name'],
user_list['status'], user_list['listType'],
user_list['accountUserListStatus'], user_list['description']))
def AddMonths(start_date, months):
"""A simple convenience utility for adding months to a given start date.
This increments the months by adding the number of days in the current month
to the current month, for each month.
Args:
start_date: date The date months are being added to.
months: int The number of months to add.
Returns:
A date equal to the start date incremented by the given number of months.
"""
current_date = start_date
i = 0
while i < months:
month_days = calendar.monthrange(current_date.year, current_date.month)[1]
current_date += timedelta(days=month_days)
i += 1
return current_date
if __name__ == '__main__':
# Initialize client object.
adwords_client = adwords.AdWordsClient.LoadFromStorage()
main(adwords_client)
| |
from bleach import clean
from crispy_forms.bootstrap import Tab, TabHolder
from crispy_forms.helper import FormHelper
from crispy_forms.layout import ButtonHolder, Layout, Submit
from django import forms
from django.conf import settings
from django.contrib.auth import get_user_model
from django.core.exceptions import ObjectDoesNotExist, ValidationError
from django.db.models.functions import Lower
from django.forms import ModelChoiceField
from django.utils.html import format_html
from django.utils.text import format_lazy
from django_select2.forms import Select2Widget
from django_summernote.widgets import SummernoteInplaceWidget
from guardian.shortcuts import get_objects_for_user
from grandchallenge.algorithms.models import Algorithm
from grandchallenge.core.forms import SaveFormInitMixin
from grandchallenge.core.templatetags.remove_whitespace import oxford_comma
from grandchallenge.core.validators import (
ExtensionValidator,
MimeTypeValidator,
)
from grandchallenge.core.widgets import JSONEditorWidget
from grandchallenge.evaluation.models import (
EXTRA_RESULT_COLUMNS_SCHEMA,
Method,
Phase,
Submission,
)
from grandchallenge.jqfileupload.widgets import uploader
from grandchallenge.jqfileupload.widgets.uploader import UploadedAjaxFileList
from grandchallenge.subdomains.utils import reverse, reverse_lazy
phase_options = ("title",)
submission_options = (
"submission_page_html",
"creator_must_be_verified",
"submission_limit",
"submission_limit_period",
"allow_submission_comments",
"supplementary_file_choice",
"supplementary_file_label",
"supplementary_file_help_text",
"supplementary_url_choice",
"supplementary_url_label",
"supplementary_url_help_text",
)
scoring_options = (
"score_title",
"score_jsonpath",
"score_error_jsonpath",
"score_default_sort",
"score_decimal_places",
"extra_results_columns",
"scoring_method_choice",
"auto_publish_new_results",
"result_display_choice",
)
leaderboard_options = (
"display_submission_comments",
"show_supplementary_file_link",
"show_supplementary_url",
"evaluation_comparison_observable_url",
)
result_detail_options = (
"display_all_metrics",
"evaluation_detail_observable_url",
)
class PhaseTitleMixin:
def __init__(self, *args, challenge, **kwargs):
self.challenge = challenge
super().__init__(*args, **kwargs)
def clean_title(self):
title = self.cleaned_data["title"].strip()
qs = self.challenge.phase_set.filter(title__iexact=title)
if self.instance:
qs = qs.exclude(pk=self.instance.pk)
if qs.exists():
raise ValidationError(
"This challenge already has a phase with this title"
)
return title
class PhaseCreateForm(PhaseTitleMixin, SaveFormInitMixin, forms.ModelForm):
class Meta:
model = Phase
fields = ("title",)
class PhaseUpdateForm(PhaseTitleMixin, forms.ModelForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.helper = FormHelper(self)
self.helper.layout = Layout(
TabHolder(
Tab("Phase", *phase_options),
Tab("Submission", *submission_options),
Tab("Scoring", *scoring_options),
Tab("Leaderboard", *leaderboard_options),
Tab("Result Detail", *result_detail_options),
),
ButtonHolder(Submit("save", "Save")),
)
class Meta:
model = Phase
fields = (
*phase_options,
*submission_options,
*scoring_options,
*leaderboard_options,
*result_detail_options,
)
widgets = {
"submission_page_html": SummernoteInplaceWidget(),
"extra_results_columns": JSONEditorWidget(
schema=EXTRA_RESULT_COLUMNS_SCHEMA
),
}
class MethodForm(SaveFormInitMixin, forms.ModelForm):
phase = ModelChoiceField(
queryset=None,
help_text="Which phase is this evaluation container for?",
)
chunked_upload = UploadedAjaxFileList(
widget=uploader.AjaxUploadWidget(multifile=False, auto_commit=False),
label="Evaluation Method Container",
validators=[
ExtensionValidator(
allowed_extensions=(".tar", ".tar.gz", ".tar.xz")
)
],
help_text=(
".tar.xz archive of the container image produced from the command "
"'docker save IMAGE | xz -c > IMAGE.tar.xz'. See "
"https://docs.docker.com/engine/reference/commandline/save/"
),
)
def __init__(self, *args, user, challenge, **kwargs):
super().__init__(*args, **kwargs)
self.fields["chunked_upload"].widget.user = user
self.fields["phase"].queryset = challenge.phase_set.all()
def clean_chunked_upload(self):
files = self.cleaned_data["chunked_upload"]
if (
sum([f.size for f in files])
> settings.COMPONENTS_MAXIMUM_IMAGE_SIZE
):
raise ValidationError("File size limit exceeded")
return files
class Meta:
model = Method
fields = ["phase", "chunked_upload"]
submission_fields = (
"creator",
"phase",
"comment",
"supplementary_file",
"supplementary_url",
"chunked_upload",
)
class SubmissionForm(forms.ModelForm):
chunked_upload = UploadedAjaxFileList(
widget=uploader.AjaxUploadWidget(multifile=False, auto_commit=False),
label="Predictions File",
validators=[
MimeTypeValidator(allowed_types=("application/zip", "text/plain")),
ExtensionValidator(allowed_extensions=(".zip", ".csv")),
],
)
algorithm = ModelChoiceField(
queryset=None,
help_text=format_lazy(
"Select one of your algorithms to submit as a solution to this "
"challenge. If you have not created your algorithm yet you can "
"do so <a href={}>on this page</a>.",
reverse_lazy("algorithms:create"),
),
)
def __init__( # noqa: C901
self, *args, user, phase: Phase, **kwargs,
):
super().__init__(*args, **kwargs)
self.fields["creator"].queryset = get_user_model().objects.filter(
pk=user.pk
)
self.fields["creator"].initial = user
# Note that the validation of creator and algorithm require
# access to the phase properties, so those validations
# would need to be updated if phase selections are allowed.
self._phase = phase
self.fields["phase"].queryset = Phase.objects.filter(pk=phase.pk)
self.fields["phase"].initial = phase
if not self._phase.allow_submission_comments:
del self.fields["comment"]
if self._phase.supplementary_file_label:
self.fields[
"supplementary_file"
].label = self._phase.supplementary_file_label
if self._phase.supplementary_file_help_text:
self.fields["supplementary_file"].help_text = clean(
self._phase.supplementary_file_help_text
)
if self._phase.supplementary_file_choice == Phase.REQUIRED:
self.fields["supplementary_file"].required = True
elif self._phase.supplementary_file_choice == Phase.OFF:
del self.fields["supplementary_file"]
if self._phase.supplementary_url_label:
self.fields[
"supplementary_url"
].label = self._phase.supplementary_url_label
if self._phase.supplementary_url_help_text:
self.fields["supplementary_url"].help_text = clean(
self._phase.supplementary_url_help_text
)
if self._phase.supplementary_url_choice == Phase.REQUIRED:
self.fields["supplementary_url"].required = True
elif self._phase.supplementary_url_choice == Phase.OFF:
del self.fields["supplementary_url"]
if self._phase.submission_kind == self._phase.SubmissionKind.ALGORITHM:
del self.fields["chunked_upload"]
self.fields["algorithm"].queryset = get_objects_for_user(
user, "algorithms.change_algorithm", Algorithm,
).order_by("title")
self._algorithm_inputs = self._phase.algorithm_inputs.all()
self._algorithm_outputs = self._phase.algorithm_outputs.all()
else:
del self.fields["algorithm"]
self.fields["chunked_upload"].widget.user = user
self.helper = FormHelper(self)
self.helper.layout.append(Submit("save", "Save"))
def clean_chunked_upload(self):
chunked_upload = self.cleaned_data["chunked_upload"]
if (
sum([f.size for f in chunked_upload])
> settings.PREDICTIONS_FILE_MAX_BYTES
):
raise ValidationError("Predictions file is too large.")
return chunked_upload
def clean_algorithm(self):
algorithm = self.cleaned_data["algorithm"]
if set(self._algorithm_inputs) != set(algorithm.inputs.all()):
raise ValidationError(
"The inputs for your algorithm do not match the ones "
"required by this phase, please update your algorithm "
"to work with: "
f"{oxford_comma(self._algorithm_inputs)}. "
)
if set(self._algorithm_outputs) != set(algorithm.outputs.all()):
raise ValidationError(
"The outputs from your algorithm do not match the ones "
"required by this phase, please update your algorithm "
"to produce: "
f"{oxford_comma(self._algorithm_outputs)}. "
)
if algorithm.latest_ready_image is None:
raise ValidationError(
"This algorithm does not have a usable container image. "
"Please add one and try again."
)
if Submission.objects.filter(
algorithm_image=algorithm.latest_ready_image, phase=self._phase,
).exists():
raise ValidationError(
"A submission for this algorithm container image "
"for this phase already exists."
)
return algorithm
def clean_creator(self):
creator = self.cleaned_data["creator"]
try:
user_is_verified = creator.verification.is_verified
except ObjectDoesNotExist:
user_is_verified = False
if self._phase.creator_must_be_verified and not user_is_verified:
error_message = format_html(
"You must verify your account before you can make a "
"submission to this phase. Please "
'<a href="{}"> request verification here</a>.',
reverse("verifications:create"),
)
# Add this to the non-field errors as we use a HiddenInput
self.add_error(None, error_message)
raise ValidationError(error_message)
is_challenge_admin = self._phase.challenge.is_admin(user=creator)
has_remaining_submissions = (
self._phase.get_next_submission(user=creator)[
"remaining_submissions"
]
>= 1
)
has_pending_evaluations = self._phase.has_pending_evaluations(
user=creator
)
can_submit = is_challenge_admin or (
has_remaining_submissions and not has_pending_evaluations
)
if not can_submit:
error_message = "A new submission cannot be created for this user"
self.add_error(None, error_message)
raise ValidationError(error_message)
return creator
class Meta:
model = Submission
fields = submission_fields
widgets = {"creator": forms.HiddenInput, "phase": forms.HiddenInput}
class LegacySubmissionForm(SubmissionForm):
def __init__(self, *args, challenge, **kwargs):
super().__init__(*args, **kwargs)
self.fields[
"creator"
].queryset = challenge.participants_group.user_set.all().order_by(
Lower("username")
)
class Meta(SubmissionForm.Meta):
widgets = {"creator": Select2Widget, "phase": forms.HiddenInput}
| |
#!/usr/bin/python -u
import sys
import os
import subprocess
import time
from datetime import datetime
import shutil
import tempfile
import hashlib
import re
import logging
import argparse
################
#### Telegraf Variables
################
# Packaging variables
PACKAGE_NAME = "telegraf"
INSTALL_ROOT_DIR = "/usr/bin"
LOG_DIR = "/var/log/telegraf"
SCRIPT_DIR = "/usr/lib/telegraf/scripts"
CONFIG_DIR = "/etc/telegraf"
LOGROTATE_DIR = "/etc/logrotate.d"
INIT_SCRIPT = "scripts/init.sh"
SYSTEMD_SCRIPT = "scripts/telegraf.service"
LOGROTATE_SCRIPT = "etc/logrotate.d/telegraf"
DEFAULT_CONFIG = "etc/telegraf.conf"
DEFAULT_WINDOWS_CONFIG = "etc/telegraf_windows.conf"
POSTINST_SCRIPT = "scripts/post-install.sh"
PREINST_SCRIPT = "scripts/pre-install.sh"
POSTREMOVE_SCRIPT = "scripts/post-remove.sh"
PREREMOVE_SCRIPT = "scripts/pre-remove.sh"
# Default AWS S3 bucket for uploads
DEFAULT_BUCKET = "dl.influxdata.com/telegraf/artifacts"
CONFIGURATION_FILES = [
CONFIG_DIR + '/telegraf.conf',
LOGROTATE_DIR + '/telegraf',
]
# META-PACKAGE VARIABLES
PACKAGE_LICENSE = "MIT"
PACKAGE_URL = "https://github.com/influxdata/telegraf"
MAINTAINER = "support@influxdb.com"
VENDOR = "InfluxData"
DESCRIPTION = "Plugin-driven server agent for reporting metrics into InfluxDB."
# SCRIPT START
prereqs = [ 'git', 'go' ]
go_vet_command = "go tool vet -composites=true ./"
optional_prereqs = [ 'gvm', 'fpm', 'rpmbuild' ]
fpm_common_args = "-f -s dir --log error \
--vendor {} \
--url {} \
--license {} \
--maintainer {} \
--config-files {} \
--config-files {} \
--after-install {} \
--before-install {} \
--after-remove {} \
--before-remove {} \
--description \"{}\"".format(
VENDOR,
PACKAGE_URL,
PACKAGE_LICENSE,
MAINTAINER,
CONFIG_DIR + '/telegraf.conf',
LOGROTATE_DIR + '/telegraf',
POSTINST_SCRIPT,
PREINST_SCRIPT,
POSTREMOVE_SCRIPT,
PREREMOVE_SCRIPT,
DESCRIPTION)
targets = {
'telegraf' : './cmd/telegraf',
}
supported_builds = {
"windows": [ "amd64" ],
"linux": [ "amd64", "i386", "armhf", "armel", "arm64", "static_amd64" ],
"freebsd": [ "amd64" ]
}
supported_packages = {
"linux": [ "deb", "rpm", "tar" ],
"windows": [ "zip" ],
"freebsd": [ "tar" ]
}
################
#### Telegraf Functions
################
def print_banner():
logging.info("""
_____ _ __
/__ \\___| | ___ __ _ _ __ __ _ / _|
/ /\\/ _ \\ |/ _ \\/ _` | '__/ _` | |_
/ / | __/ | __/ (_| | | | (_| | _|
\\/ \\___|_|\\___|\\__, |_| \\__,_|_|
|___/
Build Script
""")
def create_package_fs(build_root):
"""Create a filesystem structure to mimic the package filesystem.
"""
logging.debug("Creating a filesystem hierarchy from directory: {}".format(build_root))
# Using [1:] for the path names due to them being absolute
# (will overwrite previous paths, per 'os.path.join' documentation)
dirs = [ INSTALL_ROOT_DIR[1:], LOG_DIR[1:], SCRIPT_DIR[1:], CONFIG_DIR[1:], LOGROTATE_DIR[1:] ]
for d in dirs:
os.makedirs(os.path.join(build_root, d))
os.chmod(os.path.join(build_root, d), 0o755)
def package_scripts(build_root, config_only=False, windows=False):
"""Copy the necessary scripts and configuration files to the package
filesystem.
"""
if config_only or windows:
logging.info("Copying configuration to build directory")
if windows:
shutil.copyfile(DEFAULT_WINDOWS_CONFIG, os.path.join(build_root, "telegraf.conf"))
else:
shutil.copyfile(DEFAULT_CONFIG, os.path.join(build_root, "telegraf.conf"))
os.chmod(os.path.join(build_root, "telegraf.conf"), 0o644)
else:
logging.info("Copying scripts and configuration to build directory")
shutil.copyfile(INIT_SCRIPT, os.path.join(build_root, SCRIPT_DIR[1:], INIT_SCRIPT.split('/')[1]))
os.chmod(os.path.join(build_root, SCRIPT_DIR[1:], INIT_SCRIPT.split('/')[1]), 0o644)
shutil.copyfile(SYSTEMD_SCRIPT, os.path.join(build_root, SCRIPT_DIR[1:], SYSTEMD_SCRIPT.split('/')[1]))
os.chmod(os.path.join(build_root, SCRIPT_DIR[1:], SYSTEMD_SCRIPT.split('/')[1]), 0o644)
shutil.copyfile(LOGROTATE_SCRIPT, os.path.join(build_root, LOGROTATE_DIR[1:], "telegraf"))
os.chmod(os.path.join(build_root, LOGROTATE_DIR[1:], "telegraf"), 0o644)
shutil.copyfile(DEFAULT_CONFIG, os.path.join(build_root, CONFIG_DIR[1:], "telegraf.conf"))
os.chmod(os.path.join(build_root, CONFIG_DIR[1:], "telegraf.conf"), 0o644)
def run_generate():
# NOOP for Telegraf
return True
def go_get(branch, update=False, no_uncommitted=False):
"""Retrieve build dependencies or restore pinned dependencies.
"""
if local_changes() and no_uncommitted:
logging.error("There are uncommitted changes in the current directory.")
return False
if not check_path_for("gdm"):
logging.info("Downloading `gdm`...")
get_command = "go get github.com/sparrc/gdm"
run(get_command)
logging.info("Retrieving dependencies with `gdm`...")
run("{}/bin/gdm restore -v -f Godeps_windows".format(os.environ.get("GOPATH")))
run("{}/bin/gdm restore -v".format(os.environ.get("GOPATH")))
return True
def run_tests(race, parallel, timeout, no_vet):
# Currently a NOOP for Telegraf
return True
################
#### All Telegraf-specific content above this line
################
def run(command, allow_failure=False, shell=False):
"""Run shell command (convenience wrapper around subprocess).
"""
out = None
logging.debug("{}".format(command))
try:
if shell:
out = subprocess.check_output(command, stderr=subprocess.STDOUT, shell=shell)
else:
out = subprocess.check_output(command.split(), stderr=subprocess.STDOUT)
out = out.decode('utf-8').strip()
# logging.debug("Command output: {}".format(out))
except subprocess.CalledProcessError as e:
if allow_failure:
logging.warn("Command '{}' failed with error: {}".format(command, e.output))
return None
else:
logging.error("Command '{}' failed with error: {}".format(command, e.output))
sys.exit(1)
except OSError as e:
if allow_failure:
logging.warn("Command '{}' failed with error: {}".format(command, e))
return out
else:
logging.error("Command '{}' failed with error: {}".format(command, e))
sys.exit(1)
else:
return out
def create_temp_dir(prefix = None):
""" Create temporary directory with optional prefix.
"""
if prefix is None:
return tempfile.mkdtemp(prefix="{}-build.".format(PACKAGE_NAME))
else:
return tempfile.mkdtemp(prefix=prefix)
def increment_minor_version(version):
"""Return the version with the minor version incremented and patch
version set to zero.
"""
ver_list = version.split('.')
if len(ver_list) != 3:
logging.warn("Could not determine how to increment version '{}', will just use provided version.".format(version))
return version
ver_list[1] = str(int(ver_list[1]) + 1)
ver_list[2] = str(0)
inc_version = '.'.join(ver_list)
logging.debug("Incremented version from '{}' to '{}'.".format(version, inc_version))
return inc_version
def get_current_version_tag():
"""Retrieve the raw git version tag.
"""
version = run("git describe --always --tags --abbrev=0")
return version
def get_current_version():
"""Parse version information from git tag output.
"""
version_tag = get_current_version_tag()
# Remove leading 'v'
if version_tag[0] == 'v':
version_tag = version_tag[1:]
# Replace any '-'/'_' with '~'
if '-' in version_tag:
version_tag = version_tag.replace("-","~")
if '_' in version_tag:
version_tag = version_tag.replace("_","~")
return version_tag
def get_current_commit(short=False):
"""Retrieve the current git commit.
"""
command = None
if short:
command = "git log --pretty=format:'%h' -n 1"
else:
command = "git rev-parse HEAD"
out = run(command)
return out.strip('\'\n\r ')
def get_current_branch():
"""Retrieve the current git branch.
"""
command = "git rev-parse --abbrev-ref HEAD"
out = run(command)
return out.strip()
def local_changes():
"""Return True if there are local un-committed changes.
"""
output = run("git diff-files --ignore-submodules --").strip()
if len(output) > 0:
return True
return False
def get_system_arch():
"""Retrieve current system architecture.
"""
arch = os.uname()[4]
if arch == "x86_64":
arch = "amd64"
elif arch == "386":
arch = "i386"
elif 'arm' in arch:
# Prevent uname from reporting full ARM arch (eg 'armv7l')
arch = "arm"
return arch
def get_system_platform():
"""Retrieve current system platform.
"""
if sys.platform.startswith("linux"):
return "linux"
else:
return sys.platform
def get_go_version():
"""Retrieve version information for Go.
"""
out = run("go version")
matches = re.search('go version go(\S+)', out)
if matches is not None:
return matches.groups()[0].strip()
return None
def check_path_for(b):
"""Check the the user's path for the provided binary.
"""
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
full_path = os.path.join(path, b)
if os.path.isfile(full_path) and os.access(full_path, os.X_OK):
return full_path
def check_environ(build_dir = None):
"""Check environment for common Go variables.
"""
logging.info("Checking environment...")
for v in [ "GOPATH", "GOBIN", "GOROOT" ]:
logging.debug("Using '{}' for {}".format(os.environ.get(v), v))
cwd = os.getcwd()
if build_dir is None and os.environ.get("GOPATH") and os.environ.get("GOPATH") not in cwd:
logging.warn("Your current directory is not under your GOPATH. This may lead to build failures.")
return True
def check_prereqs():
"""Check user path for required dependencies.
"""
logging.info("Checking for dependencies...")
for req in prereqs:
if not check_path_for(req):
logging.error("Could not find dependency: {}".format(req))
return False
return True
def upload_packages(packages, bucket_name=None, overwrite=False):
"""Upload provided package output to AWS S3.
"""
logging.debug("Uploading files to bucket '{}': {}".format(bucket_name, packages))
try:
import boto
from boto.s3.key import Key
from boto.s3.connection import OrdinaryCallingFormat
logging.getLogger("boto").setLevel(logging.WARNING)
except ImportError:
logging.warn("Cannot upload packages without 'boto' Python library!")
return False
logging.info("Connecting to AWS S3...")
# Up the number of attempts to 10 from default of 1
boto.config.add_section("Boto")
boto.config.set("Boto", "metadata_service_num_attempts", "10")
c = boto.connect_s3(calling_format=OrdinaryCallingFormat())
if bucket_name is None:
bucket_name = DEFAULT_BUCKET
bucket = c.get_bucket(bucket_name.split('/')[0])
for p in packages:
if '/' in bucket_name:
# Allow for nested paths within the bucket name (ex:
# bucket/folder). Assuming forward-slashes as path
# delimiter.
name = os.path.join('/'.join(bucket_name.split('/')[1:]),
os.path.basename(p))
else:
name = os.path.basename(p)
logging.debug("Using key: {}".format(name))
if bucket.get_key(name) is None or overwrite:
logging.info("Uploading file {}".format(name))
k = Key(bucket)
k.key = name
if overwrite:
n = k.set_contents_from_filename(p, replace=True)
else:
n = k.set_contents_from_filename(p, replace=False)
k.make_public()
else:
logging.warn("Not uploading file {}, as it already exists in the target bucket.".format(name))
return True
def go_list(vendor=False, relative=False):
"""
Return a list of packages
If vendor is False vendor package are not included
If relative is True the package prefix defined by PACKAGE_URL is stripped
"""
p = subprocess.Popen(["go", "list", "./..."], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
packages = out.split('\n')
if packages[-1] == '':
packages = packages[:-1]
if not vendor:
non_vendor = []
for p in packages:
if '/vendor/' not in p:
non_vendor.append(p)
packages = non_vendor
if relative:
relative_pkgs = []
for p in packages:
r = p.replace(PACKAGE_URL, '.')
if r != '.':
relative_pkgs.append(r)
packages = relative_pkgs
return packages
def build(version=None,
platform=None,
arch=None,
nightly=False,
race=False,
clean=False,
outdir=".",
tags=[],
static=False):
"""Build each target for the specified architecture and platform.
"""
logging.info("Starting build for {}/{}...".format(platform, arch))
logging.info("Using Go version: {}".format(get_go_version()))
logging.info("Using git branch: {}".format(get_current_branch()))
logging.info("Using git commit: {}".format(get_current_commit()))
if static:
logging.info("Using statically-compiled output.")
if race:
logging.info("Race is enabled.")
if len(tags) > 0:
logging.info("Using build tags: {}".format(','.join(tags)))
logging.info("Sending build output to: {}".format(outdir))
if not os.path.exists(outdir):
os.makedirs(outdir)
elif clean and outdir != '/' and outdir != ".":
logging.info("Cleaning build directory '{}' before building.".format(outdir))
shutil.rmtree(outdir)
os.makedirs(outdir)
logging.info("Using version '{}' for build.".format(version))
tmp_build_dir = create_temp_dir()
for target, path in targets.items():
logging.info("Building target: {}".format(target))
build_command = ""
# Handle static binary output
if static is True or "static_" in arch:
if "static_" in arch:
static = True
arch = arch.replace("static_", "")
build_command += "CGO_ENABLED=0 "
# Handle variations in architecture output
if arch == "i386" or arch == "i686":
arch = "386"
elif "arm" in arch:
arch = "arm"
build_command += "GOOS={} GOARCH={} ".format(platform, arch)
if "arm" in arch:
if arch == "armel":
build_command += "GOARM=5 "
elif arch == "armhf" or arch == "arm":
build_command += "GOARM=6 "
elif arch == "arm64":
# TODO(rossmcdonald) - Verify this is the correct setting for arm64
build_command += "GOARM=7 "
else:
logging.error("Invalid ARM architecture specified: {}".format(arch))
logging.error("Please specify either 'armel', 'armhf', or 'arm64'.")
return False
if platform == 'windows':
target = target + '.exe'
build_command += "go build -o {} ".format(os.path.join(outdir, target))
if race:
build_command += "-race "
if len(tags) > 0:
build_command += "-tags {} ".format(','.join(tags))
if "1.4" in get_go_version():
if static:
build_command += "-ldflags=\"-s -X main.version {} -X main.branch {} -X main.commit {}\" ".format(version,
get_current_branch(),
get_current_commit())
else:
build_command += "-ldflags=\"-X main.version {} -X main.branch {} -X main.commit {}\" ".format(version,
get_current_branch(),
get_current_commit())
else:
# Starting with Go 1.5, the linker flag arguments changed to 'name=value' from 'name value'
if static:
build_command += "-ldflags=\"-s -X main.version={} -X main.branch={} -X main.commit={}\" ".format(version,
get_current_branch(),
get_current_commit())
else:
build_command += "-ldflags=\"-X main.version={} -X main.branch={} -X main.commit={}\" ".format(version,
get_current_branch(),
get_current_commit())
if static:
build_command += "-a -installsuffix cgo "
build_command += path
start_time = datetime.utcnow()
run(build_command, shell=True)
end_time = datetime.utcnow()
logging.info("Time taken: {}s".format((end_time - start_time).total_seconds()))
return True
def generate_md5_from_file(path):
"""Generate MD5 signature based on the contents of the file at path.
"""
m = hashlib.md5()
with open(path, 'rb') as f:
for chunk in iter(lambda: f.read(4096), b""):
m.update(chunk)
return m.hexdigest()
def generate_sig_from_file(path):
"""Generate a detached GPG signature from the file at path.
"""
logging.debug("Generating GPG signature for file: {}".format(path))
gpg_path = check_path_for('gpg')
if gpg_path is None:
logging.warn("gpg binary not found on path! Skipping signature creation.")
return False
if os.environ.get("GNUPG_HOME") is not None:
run('gpg --homedir {} --armor --yes --detach-sign {}'.format(os.environ.get("GNUPG_HOME"), path))
else:
run('gpg --armor --detach-sign --yes {}'.format(path))
return True
def package(build_output, pkg_name, version, nightly=False, iteration=1, static=False, release=False):
"""Package the output of the build process.
"""
outfiles = []
tmp_build_dir = create_temp_dir()
logging.debug("Packaging for build output: {}".format(build_output))
logging.info("Using temporary directory: {}".format(tmp_build_dir))
try:
for platform in build_output:
# Create top-level folder displaying which platform (linux, etc)
os.makedirs(os.path.join(tmp_build_dir, platform))
for arch in build_output[platform]:
logging.info("Creating packages for {}/{}".format(platform, arch))
# Create second-level directory displaying the architecture (amd64, etc)
current_location = build_output[platform][arch]
# Create directory tree to mimic file system of package
build_root = os.path.join(tmp_build_dir,
platform,
arch,
PACKAGE_NAME)
os.makedirs(build_root)
# Copy packaging scripts to build directory
if platform == "windows":
# For windows and static builds, just copy
# binaries to root of package (no other scripts or
# directories)
package_scripts(build_root, config_only=True, windows=True)
elif static or "static_" in arch:
package_scripts(build_root, config_only=True)
else:
create_package_fs(build_root)
package_scripts(build_root)
for binary in targets:
# Copy newly-built binaries to packaging directory
if platform == 'windows':
binary = binary + '.exe'
if platform == 'windows' or static or "static_" in arch:
# Where the binary should go in the package filesystem
to = os.path.join(build_root, binary)
# Where the binary currently is located
fr = os.path.join(current_location, binary)
else:
# Where the binary currently is located
fr = os.path.join(current_location, binary)
# Where the binary should go in the package filesystem
to = os.path.join(build_root, INSTALL_ROOT_DIR[1:], binary)
shutil.copy(fr, to)
for package_type in supported_packages[platform]:
# Package the directory structure for each package type for the platform
logging.debug("Packaging directory '{}' as '{}'.".format(build_root, package_type))
name = pkg_name
# Reset version, iteration, and current location on each run
# since they may be modified below.
package_version = version
package_iteration = iteration
if "static_" in arch:
# Remove the "static_" from the displayed arch on the package
package_arch = arch.replace("static_", "")
elif package_type == "rpm" and arch == 'armhf':
package_arch = 'armv6hl'
else:
package_arch = arch
if not release and not nightly:
# For non-release builds, just use the commit hash as the version
package_version = "{}~{}".format(version,
get_current_commit(short=True))
package_iteration = "0"
package_build_root = build_root
current_location = build_output[platform][arch]
if package_type in ['zip', 'tar']:
# For tars and zips, start the packaging one folder above
# the build root (to include the package name)
package_build_root = os.path.join('/', '/'.join(build_root.split('/')[:-1]))
if nightly:
if static or "static_" in arch:
name = '{}-static-nightly_{}_{}'.format(name,
platform,
package_arch)
else:
name = '{}-nightly_{}_{}'.format(name,
platform,
package_arch)
else:
if static or "static_" in arch:
name = '{}-{}-static_{}_{}'.format(name,
package_version,
platform,
package_arch)
else:
name = '{}-{}_{}_{}'.format(name,
package_version,
platform,
package_arch)
current_location = os.path.join(os.getcwd(), current_location)
if package_type == 'tar':
tar_command = "cd {} && tar -cvzf {}.tar.gz ./*".format(package_build_root, name)
run(tar_command, shell=True)
run("mv {}.tar.gz {}".format(os.path.join(package_build_root, name), current_location), shell=True)
outfile = os.path.join(current_location, name + ".tar.gz")
outfiles.append(outfile)
elif package_type == 'zip':
zip_command = "cd {} && zip -r {}.zip ./*".format(package_build_root, name)
run(zip_command, shell=True)
run("mv {}.zip {}".format(os.path.join(package_build_root, name), current_location), shell=True)
outfile = os.path.join(current_location, name + ".zip")
outfiles.append(outfile)
elif package_type not in ['zip', 'tar'] and static or "static_" in arch:
logging.info("Skipping package type '{}' for static builds.".format(package_type))
else:
fpm_command = "fpm {} --name {} -a {} -t {} --version {} --iteration {} -C {} -p {} ".format(
fpm_common_args,
name,
package_arch,
package_type,
package_version,
package_iteration,
package_build_root,
current_location)
if package_type == "rpm":
fpm_command += "--depends coreutils --rpm-posttrans {}".format(POSTINST_SCRIPT)
out = run(fpm_command, shell=True)
matches = re.search(':path=>"(.*)"', out)
outfile = None
if matches is not None:
outfile = matches.groups()[0]
if outfile is None:
logging.warn("Could not determine output from packaging output!")
else:
if nightly:
# Strip nightly version from package name
new_outfile = outfile.replace("{}-{}".format(package_version, package_iteration), "nightly")
os.rename(outfile, new_outfile)
outfile = new_outfile
else:
if package_type == 'rpm':
# rpm's convert any dashes to underscores
package_version = package_version.replace("-", "_")
new_outfile = outfile.replace("{}-{}".format(package_version, package_iteration), package_version)
os.rename(outfile, new_outfile)
outfile = new_outfile
outfiles.append(os.path.join(os.getcwd(), outfile))
logging.debug("Produced package files: {}".format(outfiles))
return outfiles
finally:
# Cleanup
shutil.rmtree(tmp_build_dir)
def main(args):
global PACKAGE_NAME
if args.release and args.nightly:
logging.error("Cannot be both a nightly and a release.")
return 1
if args.nightly:
args.version = increment_minor_version(args.version)
args.version = "{}~n{}".format(args.version,
datetime.utcnow().strftime("%Y%m%d%H%M"))
args.iteration = 0
# Pre-build checks
check_environ()
if not check_prereqs():
return 1
if args.build_tags is None:
args.build_tags = []
else:
args.build_tags = args.build_tags.split(',')
orig_commit = get_current_commit(short=True)
orig_branch = get_current_branch()
if args.platform not in supported_builds and args.platform != 'all':
logging.error("Invalid build platform: {}".format(target_platform))
return 1
build_output = {}
if args.branch != orig_branch and args.commit != orig_commit:
logging.error("Can only specify one branch or commit to build from.")
return 1
elif args.branch != orig_branch:
logging.info("Moving to git branch: {}".format(args.branch))
run("git checkout {}".format(args.branch))
elif args.commit != orig_commit:
logging.info("Moving to git commit: {}".format(args.commit))
run("git checkout {}".format(args.commit))
if not args.no_get:
if not go_get(args.branch, update=args.update, no_uncommitted=args.no_uncommitted):
return 1
if args.generate:
if not run_generate():
return 1
if args.test:
if not run_tests(args.race, args.parallel, args.timeout, args.no_vet):
return 1
platforms = []
single_build = True
if args.platform == 'all':
platforms = supported_builds.keys()
single_build = False
else:
platforms = [args.platform]
for platform in platforms:
build_output.update( { platform : {} } )
archs = []
if args.arch == "all":
single_build = False
archs = supported_builds.get(platform)
else:
archs = [args.arch]
for arch in archs:
od = args.outdir
if not single_build:
od = os.path.join(args.outdir, platform, arch)
if not build(version=args.version,
platform=platform,
arch=arch,
nightly=args.nightly,
race=args.race,
clean=args.clean,
outdir=od,
tags=args.build_tags,
static=args.static):
return 1
build_output.get(platform).update( { arch : od } )
# Build packages
if args.package:
if not check_path_for("fpm"):
logging.error("FPM ruby gem required for packaging. Stopping.")
return 1
packages = package(build_output,
args.name,
args.version,
nightly=args.nightly,
iteration=args.iteration,
static=args.static,
release=args.release)
if args.sign:
logging.debug("Generating GPG signatures for packages: {}".format(packages))
sigs = [] # retain signatures so they can be uploaded with packages
for p in packages:
if generate_sig_from_file(p):
sigs.append(p + '.asc')
else:
logging.error("Creation of signature for package [{}] failed!".format(p))
return 1
packages += sigs
if args.upload:
logging.debug("Files staged for upload: {}".format(packages))
if args.nightly:
args.upload_overwrite = True
if not upload_packages(packages, bucket_name=args.bucket, overwrite=args.upload_overwrite):
return 1
logging.info("Packages created:")
for p in packages:
logging.info("{} (MD5={})".format(p.split('/')[-1:][0],
generate_md5_from_file(p)))
if orig_branch != get_current_branch():
logging.info("Moving back to original git branch: {}".format(args.branch))
run("git checkout {}".format(orig_branch))
return 0
if __name__ == '__main__':
LOG_LEVEL = logging.INFO
if '--debug' in sys.argv[1:]:
LOG_LEVEL = logging.DEBUG
log_format = '[%(levelname)s] %(funcName)s: %(message)s'
logging.basicConfig(level=LOG_LEVEL,
format=log_format)
parser = argparse.ArgumentParser(description='InfluxDB build and packaging script.')
parser.add_argument('--verbose','-v','--debug',
action='store_true',
help='Use debug output')
parser.add_argument('--outdir', '-o',
metavar='<output directory>',
default='./build/',
type=os.path.abspath,
help='Output directory')
parser.add_argument('--name', '-n',
metavar='<name>',
default=PACKAGE_NAME,
type=str,
help='Name to use for package name (when package is specified)')
parser.add_argument('--arch',
metavar='<amd64|i386|armhf|arm64|armel|all>',
type=str,
default=get_system_arch(),
help='Target architecture for build output')
parser.add_argument('--platform',
metavar='<linux|darwin|windows|all>',
type=str,
default=get_system_platform(),
help='Target platform for build output')
parser.add_argument('--branch',
metavar='<branch>',
type=str,
default=get_current_branch(),
help='Build from a specific branch')
parser.add_argument('--commit',
metavar='<commit>',
type=str,
default=get_current_commit(short=True),
help='Build from a specific commit')
parser.add_argument('--version',
metavar='<version>',
type=str,
default=get_current_version(),
help='Version information to apply to build output (ex: 0.12.0)')
parser.add_argument('--iteration',
metavar='<package iteration>',
type=str,
default="1",
help='Package iteration to apply to build output (defaults to 1)')
parser.add_argument('--stats',
action='store_true',
help='Emit build metrics (requires InfluxDB Python client)')
parser.add_argument('--stats-server',
metavar='<hostname:port>',
type=str,
help='Send build stats to InfluxDB using provided hostname and port')
parser.add_argument('--stats-db',
metavar='<database name>',
type=str,
help='Send build stats to InfluxDB using provided database name')
parser.add_argument('--nightly',
action='store_true',
help='Mark build output as nightly build (will incremement the minor version)')
parser.add_argument('--update',
action='store_true',
help='Update build dependencies prior to building')
parser.add_argument('--package',
action='store_true',
help='Package binary output')
parser.add_argument('--release',
action='store_true',
help='Mark build output as release')
parser.add_argument('--clean',
action='store_true',
help='Clean output directory before building')
parser.add_argument('--no-get',
action='store_true',
help='Do not retrieve pinned dependencies when building')
parser.add_argument('--no-uncommitted',
action='store_true',
help='Fail if uncommitted changes exist in the working directory')
parser.add_argument('--upload',
action='store_true',
help='Upload output packages to AWS S3')
parser.add_argument('--upload-overwrite','-w',
action='store_true',
help='Upload output packages to AWS S3')
parser.add_argument('--bucket',
metavar='<S3 bucket name>',
type=str,
default=DEFAULT_BUCKET,
help='Destination bucket for uploads')
parser.add_argument('--generate',
action='store_true',
help='Run "go generate" before building')
parser.add_argument('--build-tags',
metavar='<tags>',
help='Optional build tags to use for compilation')
parser.add_argument('--static',
action='store_true',
help='Create statically-compiled binary output')
parser.add_argument('--sign',
action='store_true',
help='Create GPG detached signatures for packages (when package is specified)')
parser.add_argument('--test',
action='store_true',
help='Run tests (does not produce build output)')
parser.add_argument('--no-vet',
action='store_true',
help='Do not run "go vet" when running tests')
parser.add_argument('--race',
action='store_true',
help='Enable race flag for build output')
parser.add_argument('--parallel',
metavar='<num threads>',
type=int,
help='Number of tests to run simultaneously')
parser.add_argument('--timeout',
metavar='<timeout>',
type=str,
help='Timeout for tests before failing')
args = parser.parse_args()
print_banner()
sys.exit(main(args))
| |
"""Test the arraymodule.
Roger E. Masse
"""
import unittest
import warnings
from test import test_support
from weakref import proxy
import array, cStringIO
from cPickle import loads, dumps, HIGHEST_PROTOCOL
import sys
class ArraySubclass(array.array):
pass
class ArraySubclassWithKwargs(array.array):
def __init__(self, typecode, newarg=None):
array.array.__init__(self, typecode)
tests = [] # list to accumulate all tests
typecodes = "cubBhHiIlLfd"
class BadConstructorTest(unittest.TestCase):
def test_constructor(self):
self.assertRaises(TypeError, array.array)
self.assertRaises(TypeError, array.array, spam=42)
self.assertRaises(TypeError, array.array, 'xx')
self.assertRaises(ValueError, array.array, 'x')
tests.append(BadConstructorTest)
class BaseTest(unittest.TestCase):
# Required class attributes (provided by subclasses
# typecode: the typecode to test
# example: an initializer usable in the constructor for this type
# smallerexample: the same length as example, but smaller
# biggerexample: the same length as example, but bigger
# outside: An entry that is not in example
# minitemsize: the minimum guaranteed itemsize
def assertEntryEqual(self, entry1, entry2):
self.assertEqual(entry1, entry2)
def badtypecode(self):
# Return a typecode that is different from our own
return typecodes[(typecodes.index(self.typecode)+1) % len(typecodes)]
def test_constructor(self):
a = array.array(self.typecode)
self.assertEqual(a.typecode, self.typecode)
self.assertGreaterEqual(a.itemsize, self.minitemsize)
self.assertRaises(TypeError, array.array, self.typecode, None)
def test_len(self):
a = array.array(self.typecode)
a.append(self.example[0])
self.assertEqual(len(a), 1)
a = array.array(self.typecode, self.example)
self.assertEqual(len(a), len(self.example))
def test_buffer_info(self):
a = array.array(self.typecode, self.example)
self.assertRaises(TypeError, a.buffer_info, 42)
bi = a.buffer_info()
self.assertIsInstance(bi, tuple)
self.assertEqual(len(bi), 2)
self.assertIsInstance(bi[0], (int, long))
self.assertIsInstance(bi[1], int)
self.assertEqual(bi[1], len(a))
def test_byteswap(self):
a = array.array(self.typecode, self.example)
self.assertRaises(TypeError, a.byteswap, 42)
if a.itemsize in (1, 2, 4, 8):
b = array.array(self.typecode, self.example)
b.byteswap()
if a.itemsize==1:
self.assertEqual(a, b)
else:
self.assertNotEqual(a, b)
b.byteswap()
self.assertEqual(a, b)
def test_copy(self):
import copy
a = array.array(self.typecode, self.example)
b = copy.copy(a)
self.assertNotEqual(id(a), id(b))
self.assertEqual(a, b)
def test_deepcopy(self):
import copy
a = array.array(self.typecode, self.example)
b = copy.deepcopy(a)
self.assertNotEqual(id(a), id(b))
self.assertEqual(a, b)
def test_pickle(self):
for protocol in range(HIGHEST_PROTOCOL + 1):
a = array.array(self.typecode, self.example)
b = loads(dumps(a, protocol))
self.assertNotEqual(id(a), id(b))
self.assertEqual(a, b)
a = ArraySubclass(self.typecode, self.example)
a.x = 10
b = loads(dumps(a, protocol))
self.assertNotEqual(id(a), id(b))
self.assertEqual(a, b)
self.assertEqual(a.x, b.x)
self.assertEqual(type(a), type(b))
def test_pickle_for_empty_array(self):
for protocol in range(HIGHEST_PROTOCOL + 1):
a = array.array(self.typecode)
b = loads(dumps(a, protocol))
self.assertNotEqual(id(a), id(b))
self.assertEqual(a, b)
a = ArraySubclass(self.typecode)
a.x = 10
b = loads(dumps(a, protocol))
self.assertNotEqual(id(a), id(b))
self.assertEqual(a, b)
self.assertEqual(a.x, b.x)
self.assertEqual(type(a), type(b))
def test_insert(self):
a = array.array(self.typecode, self.example)
a.insert(0, self.example[0])
self.assertEqual(len(a), 1+len(self.example))
self.assertEqual(a[0], a[1])
self.assertRaises(TypeError, a.insert)
self.assertRaises(TypeError, a.insert, None)
self.assertRaises(TypeError, a.insert, 0, None)
a = array.array(self.typecode, self.example)
a.insert(-1, self.example[0])
self.assertEqual(
a,
array.array(
self.typecode,
self.example[:-1] + self.example[:1] + self.example[-1:]
)
)
a = array.array(self.typecode, self.example)
a.insert(-1000, self.example[0])
self.assertEqual(
a,
array.array(self.typecode, self.example[:1] + self.example)
)
a = array.array(self.typecode, self.example)
a.insert(1000, self.example[0])
self.assertEqual(
a,
array.array(self.typecode, self.example + self.example[:1])
)
def test_tofromfile(self):
a = array.array(self.typecode, 2*self.example)
self.assertRaises(TypeError, a.tofile)
self.assertRaises(TypeError, a.tofile, cStringIO.StringIO())
test_support.unlink(test_support.TESTFN)
f = open(test_support.TESTFN, 'wb')
try:
a.tofile(f)
f.close()
b = array.array(self.typecode)
f = open(test_support.TESTFN, 'rb')
self.assertRaises(TypeError, b.fromfile)
self.assertRaises(
TypeError,
b.fromfile,
cStringIO.StringIO(), len(self.example)
)
b.fromfile(f, len(self.example))
self.assertEqual(b, array.array(self.typecode, self.example))
self.assertNotEqual(a, b)
b.fromfile(f, len(self.example))
self.assertEqual(a, b)
self.assertRaises(EOFError, b.fromfile, f, 1)
f.close()
finally:
if not f.closed:
f.close()
test_support.unlink(test_support.TESTFN)
def test_fromfile_ioerror(self):
# Issue #5395: Check if fromfile raises a proper IOError
# instead of EOFError.
a = array.array(self.typecode)
f = open(test_support.TESTFN, 'wb')
try:
self.assertRaises(IOError, a.fromfile, f, len(self.example))
finally:
f.close()
test_support.unlink(test_support.TESTFN)
def test_filewrite(self):
a = array.array(self.typecode, 2*self.example)
f = open(test_support.TESTFN, 'wb')
try:
f.write(a)
f.close()
b = array.array(self.typecode)
f = open(test_support.TESTFN, 'rb')
b.fromfile(f, len(self.example))
self.assertEqual(b, array.array(self.typecode, self.example))
self.assertNotEqual(a, b)
b.fromfile(f, len(self.example))
self.assertEqual(a, b)
f.close()
finally:
if not f.closed:
f.close()
test_support.unlink(test_support.TESTFN)
def test_tofromlist(self):
a = array.array(self.typecode, 2*self.example)
b = array.array(self.typecode)
self.assertRaises(TypeError, a.tolist, 42)
self.assertRaises(TypeError, b.fromlist)
self.assertRaises(TypeError, b.fromlist, 42)
self.assertRaises(TypeError, b.fromlist, [None])
b.fromlist(a.tolist())
self.assertEqual(a, b)
def test_tofromstring(self):
a = array.array(self.typecode, 2*self.example)
b = array.array(self.typecode)
self.assertRaises(TypeError, a.tostring, 42)
self.assertRaises(TypeError, b.fromstring)
self.assertRaises(TypeError, b.fromstring, 42)
b.fromstring(a.tostring())
self.assertEqual(a, b)
if a.itemsize>1:
self.assertRaises(ValueError, b.fromstring, "x")
def test_repr(self):
a = array.array(self.typecode, 2*self.example)
self.assertEqual(a, eval(repr(a), {"array": array.array}))
a = array.array(self.typecode)
self.assertEqual(repr(a), "array('%s')" % self.typecode)
def test_str(self):
a = array.array(self.typecode, 2*self.example)
str(a)
def test_cmp(self):
a = array.array(self.typecode, self.example)
self.assertIs(a == 42, False)
self.assertIs(a != 42, True)
self.assertIs(a == a, True)
self.assertIs(a != a, False)
self.assertIs(a < a, False)
self.assertIs(a <= a, True)
self.assertIs(a > a, False)
self.assertIs(a >= a, True)
al = array.array(self.typecode, self.smallerexample)
ab = array.array(self.typecode, self.biggerexample)
self.assertIs(a == 2*a, False)
self.assertIs(a != 2*a, True)
self.assertIs(a < 2*a, True)
self.assertIs(a <= 2*a, True)
self.assertIs(a > 2*a, False)
self.assertIs(a >= 2*a, False)
self.assertIs(a == al, False)
self.assertIs(a != al, True)
self.assertIs(a < al, False)
self.assertIs(a <= al, False)
self.assertIs(a > al, True)
self.assertIs(a >= al, True)
self.assertIs(a == ab, False)
self.assertIs(a != ab, True)
self.assertIs(a < ab, True)
self.assertIs(a <= ab, True)
self.assertIs(a > ab, False)
self.assertIs(a >= ab, False)
def test_add(self):
a = array.array(self.typecode, self.example) \
+ array.array(self.typecode, self.example[::-1])
self.assertEqual(
a,
array.array(self.typecode, self.example + self.example[::-1])
)
b = array.array(self.badtypecode())
with self.assertRaises(TypeError):
a + b
with self.assertRaises(TypeError):
a + 'bad'
def test_iadd(self):
a = array.array(self.typecode, self.example[::-1])
b = a
a += array.array(self.typecode, 2*self.example)
self.assertIs(a, b)
self.assertEqual(
a,
array.array(self.typecode, self.example[::-1]+2*self.example)
)
a = array.array(self.typecode, self.example)
a += a
self.assertEqual(
a,
array.array(self.typecode, self.example + self.example)
)
b = array.array(self.badtypecode())
with self.assertRaises(TypeError):
a += b
with self.assertRaises(TypeError):
a += 'bad'
def test_mul(self):
a = 5*array.array(self.typecode, self.example)
self.assertEqual(
a,
array.array(self.typecode, 5*self.example)
)
a = array.array(self.typecode, self.example)*5
self.assertEqual(
a,
array.array(self.typecode, self.example*5)
)
a = 0*array.array(self.typecode, self.example)
self.assertEqual(
a,
array.array(self.typecode)
)
a = (-1)*array.array(self.typecode, self.example)
self.assertEqual(
a,
array.array(self.typecode)
)
with self.assertRaises(TypeError):
a * 'bad'
def test_imul(self):
a = array.array(self.typecode, self.example)
b = a
a *= 5
self.assertIs(a, b)
self.assertEqual(
a,
array.array(self.typecode, 5*self.example)
)
a *= 0
self.assertIs(a, b)
self.assertEqual(a, array.array(self.typecode))
a *= 1000
self.assertIs(a, b)
self.assertEqual(a, array.array(self.typecode))
a *= -1
self.assertIs(a, b)
self.assertEqual(a, array.array(self.typecode))
a = array.array(self.typecode, self.example)
a *= -1
self.assertEqual(a, array.array(self.typecode))
with self.assertRaises(TypeError):
a *= 'bad'
def test_getitem(self):
a = array.array(self.typecode, self.example)
self.assertEntryEqual(a[0], self.example[0])
self.assertEntryEqual(a[0L], self.example[0])
self.assertEntryEqual(a[-1], self.example[-1])
self.assertEntryEqual(a[-1L], self.example[-1])
self.assertEntryEqual(a[len(self.example)-1], self.example[-1])
self.assertEntryEqual(a[-len(self.example)], self.example[0])
self.assertRaises(TypeError, a.__getitem__)
self.assertRaises(IndexError, a.__getitem__, len(self.example))
self.assertRaises(IndexError, a.__getitem__, -len(self.example)-1)
def test_setitem(self):
a = array.array(self.typecode, self.example)
a[0] = a[-1]
self.assertEntryEqual(a[0], a[-1])
a = array.array(self.typecode, self.example)
a[0L] = a[-1]
self.assertEntryEqual(a[0], a[-1])
a = array.array(self.typecode, self.example)
a[-1] = a[0]
self.assertEntryEqual(a[0], a[-1])
a = array.array(self.typecode, self.example)
a[-1L] = a[0]
self.assertEntryEqual(a[0], a[-1])
a = array.array(self.typecode, self.example)
a[len(self.example)-1] = a[0]
self.assertEntryEqual(a[0], a[-1])
a = array.array(self.typecode, self.example)
a[-len(self.example)] = a[-1]
self.assertEntryEqual(a[0], a[-1])
self.assertRaises(TypeError, a.__setitem__)
self.assertRaises(TypeError, a.__setitem__, None)
self.assertRaises(TypeError, a.__setitem__, 0, None)
self.assertRaises(
IndexError,
a.__setitem__,
len(self.example), self.example[0]
)
self.assertRaises(
IndexError,
a.__setitem__,
-len(self.example)-1, self.example[0]
)
def test_delitem(self):
a = array.array(self.typecode, self.example)
del a[0]
self.assertEqual(
a,
array.array(self.typecode, self.example[1:])
)
a = array.array(self.typecode, self.example)
del a[-1]
self.assertEqual(
a,
array.array(self.typecode, self.example[:-1])
)
a = array.array(self.typecode, self.example)
del a[len(self.example)-1]
self.assertEqual(
a,
array.array(self.typecode, self.example[:-1])
)
a = array.array(self.typecode, self.example)
del a[-len(self.example)]
self.assertEqual(
a,
array.array(self.typecode, self.example[1:])
)
self.assertRaises(TypeError, a.__delitem__)
self.assertRaises(TypeError, a.__delitem__, None)
self.assertRaises(IndexError, a.__delitem__, len(self.example))
self.assertRaises(IndexError, a.__delitem__, -len(self.example)-1)
def test_getslice(self):
a = array.array(self.typecode, self.example)
self.assertEqual(a[:], a)
self.assertEqual(
a[1:],
array.array(self.typecode, self.example[1:])
)
self.assertEqual(
a[:1],
array.array(self.typecode, self.example[:1])
)
self.assertEqual(
a[:-1],
array.array(self.typecode, self.example[:-1])
)
self.assertEqual(
a[-1:],
array.array(self.typecode, self.example[-1:])
)
self.assertEqual(
a[-1:-1],
array.array(self.typecode)
)
self.assertEqual(
a[2:1],
array.array(self.typecode)
)
self.assertEqual(
a[1000:],
array.array(self.typecode)
)
self.assertEqual(a[-1000:], a)
self.assertEqual(a[:1000], a)
self.assertEqual(
a[:-1000],
array.array(self.typecode)
)
self.assertEqual(a[-1000:1000], a)
self.assertEqual(
a[2000:1000],
array.array(self.typecode)
)
def test_extended_getslice(self):
# Test extended slicing by comparing with list slicing
# (Assumes list conversion works correctly, too)
a = array.array(self.typecode, self.example)
indices = (0, None, 1, 3, 19, 100, -1, -2, -31, -100)
for start in indices:
for stop in indices:
# Everything except the initial 0 (invalid step)
for step in indices[1:]:
self.assertEqual(list(a[start:stop:step]),
list(a)[start:stop:step])
def test_setslice(self):
a = array.array(self.typecode, self.example)
a[:1] = a
self.assertEqual(
a,
array.array(self.typecode, self.example + self.example[1:])
)
a = array.array(self.typecode, self.example)
a[:-1] = a
self.assertEqual(
a,
array.array(self.typecode, self.example + self.example[-1:])
)
a = array.array(self.typecode, self.example)
a[-1:] = a
self.assertEqual(
a,
array.array(self.typecode, self.example[:-1] + self.example)
)
a = array.array(self.typecode, self.example)
a[1:] = a
self.assertEqual(
a,
array.array(self.typecode, self.example[:1] + self.example)
)
a = array.array(self.typecode, self.example)
a[1:-1] = a
self.assertEqual(
a,
array.array(
self.typecode,
self.example[:1] + self.example + self.example[-1:]
)
)
a = array.array(self.typecode, self.example)
a[1000:] = a
self.assertEqual(
a,
array.array(self.typecode, 2*self.example)
)
a = array.array(self.typecode, self.example)
a[-1000:] = a
self.assertEqual(
a,
array.array(self.typecode, self.example)
)
a = array.array(self.typecode, self.example)
a[:1000] = a
self.assertEqual(
a,
array.array(self.typecode, self.example)
)
a = array.array(self.typecode, self.example)
a[:-1000] = a
self.assertEqual(
a,
array.array(self.typecode, 2*self.example)
)
a = array.array(self.typecode, self.example)
a[1:0] = a
self.assertEqual(
a,
array.array(self.typecode, self.example[:1] + self.example + self.example[1:])
)
a = array.array(self.typecode, self.example)
a[2000:1000] = a
self.assertEqual(
a,
array.array(self.typecode, 2*self.example)
)
a = array.array(self.typecode, self.example)
self.assertRaises(TypeError, a.__setslice__, 0, 0, None)
self.assertRaises(TypeError, a.__setitem__, slice(0, 0), None)
self.assertRaises(TypeError, a.__setitem__, slice(0, 1), None)
b = array.array(self.badtypecode())
self.assertRaises(TypeError, a.__setslice__, 0, 0, b)
self.assertRaises(TypeError, a.__setitem__, slice(0, 0), b)
self.assertRaises(TypeError, a.__setitem__, slice(0, 1), b)
def test_extended_set_del_slice(self):
indices = (0, None, 1, 3, 19, 100, -1, -2, -31, -100)
for start in indices:
for stop in indices:
# Everything except the initial 0 (invalid step)
for step in indices[1:]:
a = array.array(self.typecode, self.example)
L = list(a)
# Make sure we have a slice of exactly the right length,
# but with (hopefully) different data.
data = L[start:stop:step]
data.reverse()
L[start:stop:step] = data
a[start:stop:step] = array.array(self.typecode, data)
self.assertEqual(a, array.array(self.typecode, L))
del L[start:stop:step]
del a[start:stop:step]
self.assertEqual(a, array.array(self.typecode, L))
def test_index(self):
example = 2*self.example
a = array.array(self.typecode, example)
self.assertRaises(TypeError, a.index)
for x in example:
self.assertEqual(a.index(x), example.index(x))
self.assertRaises(ValueError, a.index, None)
self.assertRaises(ValueError, a.index, self.outside)
def test_count(self):
example = 2*self.example
a = array.array(self.typecode, example)
self.assertRaises(TypeError, a.count)
for x in example:
self.assertEqual(a.count(x), example.count(x))
self.assertEqual(a.count(self.outside), 0)
self.assertEqual(a.count(None), 0)
def test_remove(self):
for x in self.example:
example = 2*self.example
a = array.array(self.typecode, example)
pos = example.index(x)
example2 = example[:pos] + example[pos+1:]
a.remove(x)
self.assertEqual(a, array.array(self.typecode, example2))
a = array.array(self.typecode, self.example)
self.assertRaises(ValueError, a.remove, self.outside)
self.assertRaises(ValueError, a.remove, None)
def test_pop(self):
a = array.array(self.typecode)
self.assertRaises(IndexError, a.pop)
a = array.array(self.typecode, 2*self.example)
self.assertRaises(TypeError, a.pop, 42, 42)
self.assertRaises(TypeError, a.pop, None)
self.assertRaises(IndexError, a.pop, len(a))
self.assertRaises(IndexError, a.pop, -len(a)-1)
self.assertEntryEqual(a.pop(0), self.example[0])
self.assertEqual(
a,
array.array(self.typecode, self.example[1:]+self.example)
)
self.assertEntryEqual(a.pop(1), self.example[2])
self.assertEqual(
a,
array.array(self.typecode, self.example[1:2]+self.example[3:]+self.example)
)
self.assertEntryEqual(a.pop(0), self.example[1])
self.assertEntryEqual(a.pop(), self.example[-1])
self.assertEqual(
a,
array.array(self.typecode, self.example[3:]+self.example[:-1])
)
def test_reverse(self):
a = array.array(self.typecode, self.example)
self.assertRaises(TypeError, a.reverse, 42)
a.reverse()
self.assertEqual(
a,
array.array(self.typecode, self.example[::-1])
)
def test_extend(self):
a = array.array(self.typecode, self.example)
self.assertRaises(TypeError, a.extend)
a.extend(array.array(self.typecode, self.example[::-1]))
self.assertEqual(
a,
array.array(self.typecode, self.example+self.example[::-1])
)
a = array.array(self.typecode, self.example)
a.extend(a)
self.assertEqual(
a,
array.array(self.typecode, self.example+self.example)
)
b = array.array(self.badtypecode())
self.assertRaises(TypeError, a.extend, b)
a = array.array(self.typecode, self.example)
a.extend(self.example[::-1])
self.assertEqual(
a,
array.array(self.typecode, self.example+self.example[::-1])
)
def test_constructor_with_iterable_argument(self):
a = array.array(self.typecode, iter(self.example))
b = array.array(self.typecode, self.example)
self.assertEqual(a, b)
# non-iterable argument
self.assertRaises(TypeError, array.array, self.typecode, 10)
# pass through errors raised in __iter__
class A:
def __iter__(self):
raise UnicodeError
self.assertRaises(UnicodeError, array.array, self.typecode, A())
# pass through errors raised in next()
def B():
raise UnicodeError
yield None
self.assertRaises(UnicodeError, array.array, self.typecode, B())
def test_coveritertraverse(self):
try:
import gc
except ImportError:
self.skipTest('gc module not available')
a = array.array(self.typecode)
l = [iter(a)]
l.append(l)
gc.collect()
def test_buffer(self):
a = array.array(self.typecode, self.example)
with test_support.check_py3k_warnings():
b = buffer(a)
self.assertEqual(b[0], a.tostring()[0])
def test_weakref(self):
s = array.array(self.typecode, self.example)
p = proxy(s)
self.assertEqual(p.tostring(), s.tostring())
s = None
test_support.gc_collect()
self.assertRaises(ReferenceError, len, p)
@unittest.skipUnless(hasattr(sys, 'getrefcount'),
'test needs sys.getrefcount()')
def test_bug_782369(self):
for i in range(10):
b = array.array('B', range(64))
rc = sys.getrefcount(10)
for i in range(10):
b = array.array('B', range(64))
self.assertEqual(rc, sys.getrefcount(10))
def test_subclass_with_kwargs(self):
# SF bug #1486663 -- this used to erroneously raise a TypeError
with warnings.catch_warnings():
warnings.filterwarnings("ignore", '', DeprecationWarning)
ArraySubclassWithKwargs('b', newarg=1)
class StringTest(BaseTest):
def test_setitem(self):
super(StringTest, self).test_setitem()
a = array.array(self.typecode, self.example)
self.assertRaises(TypeError, a.__setitem__, 0, self.example[:2])
class CharacterTest(StringTest):
typecode = 'c'
example = '\x01azAZ\x00\xfe'
smallerexample = '\x01azAY\x00\xfe'
biggerexample = '\x01azAZ\x00\xff'
outside = '\x33'
minitemsize = 1
def test_subbclassing(self):
class EditableString(array.array):
def __new__(cls, s, *args, **kwargs):
return array.array.__new__(cls, 'c', s)
def __init__(self, s, color='blue'):
self.color = color
def strip(self):
self[:] = array.array('c', self.tostring().strip())
def __repr__(self):
return 'EditableString(%r)' % self.tostring()
s = EditableString("\ttest\r\n")
s.strip()
self.assertEqual(s.tostring(), "test")
self.assertEqual(s.color, "blue")
s.color = "red"
self.assertEqual(s.color, "red")
self.assertEqual(s.__dict__.keys(), ["color"])
def test_nounicode(self):
a = array.array(self.typecode, self.example)
self.assertRaises(ValueError, a.fromunicode, unicode(''))
self.assertRaises(ValueError, a.tounicode)
tests.append(CharacterTest)
if test_support.have_unicode:
class UnicodeTest(StringTest):
typecode = 'u'
example = unicode(r'\x01\u263a\x00\ufeff', 'unicode-escape')
smallerexample = unicode(r'\x01\u263a\x00\ufefe', 'unicode-escape')
biggerexample = unicode(r'\x01\u263a\x01\ufeff', 'unicode-escape')
outside = unicode('\x33')
minitemsize = 2
def test_unicode(self):
self.assertRaises(TypeError, array.array, 'b', unicode('foo', 'ascii'))
a = array.array('u', unicode(r'\xa0\xc2\u1234', 'unicode-escape'))
a.fromunicode(unicode(' ', 'ascii'))
a.fromunicode(unicode('', 'ascii'))
a.fromunicode(unicode('', 'ascii'))
a.fromunicode(unicode(r'\x11abc\xff\u1234', 'unicode-escape'))
s = a.tounicode()
self.assertEqual(
s,
unicode(r'\xa0\xc2\u1234 \x11abc\xff\u1234', 'unicode-escape')
)
s = unicode(r'\x00="\'a\\b\x80\xff\u0000\u0001\u1234', 'unicode-escape')
a = array.array('u', s)
self.assertEqual(
repr(a),
r"""array('u', u'\x00="\'a\\b\x80\xff\x00\x01\u1234')"""
)
self.assertRaises(TypeError, a.fromunicode)
tests.append(UnicodeTest)
class NumberTest(BaseTest):
def test_extslice(self):
a = array.array(self.typecode, range(5))
self.assertEqual(a[::], a)
self.assertEqual(a[::2], array.array(self.typecode, [0,2,4]))
self.assertEqual(a[1::2], array.array(self.typecode, [1,3]))
self.assertEqual(a[::-1], array.array(self.typecode, [4,3,2,1,0]))
self.assertEqual(a[::-2], array.array(self.typecode, [4,2,0]))
self.assertEqual(a[3::-2], array.array(self.typecode, [3,1]))
self.assertEqual(a[-100:100:], a)
self.assertEqual(a[100:-100:-1], a[::-1])
self.assertEqual(a[-100L:100L:2L], array.array(self.typecode, [0,2,4]))
self.assertEqual(a[1000:2000:2], array.array(self.typecode, []))
self.assertEqual(a[-1000:-2000:-2], array.array(self.typecode, []))
def test_delslice(self):
a = array.array(self.typecode, range(5))
del a[::2]
self.assertEqual(a, array.array(self.typecode, [1,3]))
a = array.array(self.typecode, range(5))
del a[1::2]
self.assertEqual(a, array.array(self.typecode, [0,2,4]))
a = array.array(self.typecode, range(5))
del a[1::-2]
self.assertEqual(a, array.array(self.typecode, [0,2,3,4]))
a = array.array(self.typecode, range(10))
del a[::1000]
self.assertEqual(a, array.array(self.typecode, [1,2,3,4,5,6,7,8,9]))
# test issue7788
a = array.array(self.typecode, range(10))
del a[9::1<<333]
def test_assignment(self):
a = array.array(self.typecode, range(10))
a[::2] = array.array(self.typecode, [42]*5)
self.assertEqual(a, array.array(self.typecode, [42, 1, 42, 3, 42, 5, 42, 7, 42, 9]))
a = array.array(self.typecode, range(10))
a[::-4] = array.array(self.typecode, [10]*3)
self.assertEqual(a, array.array(self.typecode, [0, 10, 2, 3, 4, 10, 6, 7, 8 ,10]))
a = array.array(self.typecode, range(4))
a[::-1] = a
self.assertEqual(a, array.array(self.typecode, [3, 2, 1, 0]))
a = array.array(self.typecode, range(10))
b = a[:]
c = a[:]
ins = array.array(self.typecode, range(2))
a[2:3] = ins
b[slice(2,3)] = ins
c[2:3:] = ins
def test_iterationcontains(self):
a = array.array(self.typecode, range(10))
self.assertEqual(list(a), range(10))
b = array.array(self.typecode, [20])
self.assertEqual(a[-1] in a, True)
self.assertEqual(b[0] not in a, True)
def check_overflow(self, lower, upper):
# method to be used by subclasses
# should not overflow assigning lower limit
a = array.array(self.typecode, [lower])
a[0] = lower
# should overflow assigning less than lower limit
self.assertRaises(OverflowError, array.array, self.typecode, [lower-1])
self.assertRaises(OverflowError, a.__setitem__, 0, lower-1)
# should not overflow assigning upper limit
a = array.array(self.typecode, [upper])
a[0] = upper
# should overflow assigning more than upper limit
self.assertRaises(OverflowError, array.array, self.typecode, [upper+1])
self.assertRaises(OverflowError, a.__setitem__, 0, upper+1)
def test_subclassing(self):
typecode = self.typecode
class ExaggeratingArray(array.array):
__slots__ = ['offset']
def __new__(cls, typecode, data, offset):
return array.array.__new__(cls, typecode, data)
def __init__(self, typecode, data, offset):
self.offset = offset
def __getitem__(self, i):
return array.array.__getitem__(self, i) + self.offset
a = ExaggeratingArray(self.typecode, [3, 6, 7, 11], 4)
self.assertEntryEqual(a[0], 7)
self.assertRaises(AttributeError, setattr, a, "color", "blue")
class SignedNumberTest(NumberTest):
example = [-1, 0, 1, 42, 0x7f]
smallerexample = [-1, 0, 1, 42, 0x7e]
biggerexample = [-1, 0, 1, 43, 0x7f]
outside = 23
def test_overflow(self):
a = array.array(self.typecode)
lower = -1 * long(pow(2, a.itemsize * 8 - 1))
upper = long(pow(2, a.itemsize * 8 - 1)) - 1L
self.check_overflow(lower, upper)
class UnsignedNumberTest(NumberTest):
example = [0, 1, 17, 23, 42, 0xff]
smallerexample = [0, 1, 17, 23, 42, 0xfe]
biggerexample = [0, 1, 17, 23, 43, 0xff]
outside = 0xaa
def test_overflow(self):
a = array.array(self.typecode)
lower = 0
upper = long(pow(2, a.itemsize * 8)) - 1L
self.check_overflow(lower, upper)
@test_support.cpython_only
def test_sizeof_with_buffer(self):
a = array.array(self.typecode, self.example)
basesize = test_support.calcvobjsize('4P')
buffer_size = a.buffer_info()[1] * a.itemsize
test_support.check_sizeof(self, a, basesize + buffer_size)
@test_support.cpython_only
def test_sizeof_without_buffer(self):
a = array.array(self.typecode)
basesize = test_support.calcvobjsize('4P')
test_support.check_sizeof(self, a, basesize)
class ByteTest(SignedNumberTest):
typecode = 'b'
minitemsize = 1
tests.append(ByteTest)
class UnsignedByteTest(UnsignedNumberTest):
typecode = 'B'
minitemsize = 1
tests.append(UnsignedByteTest)
class ShortTest(SignedNumberTest):
typecode = 'h'
minitemsize = 2
tests.append(ShortTest)
class UnsignedShortTest(UnsignedNumberTest):
typecode = 'H'
minitemsize = 2
tests.append(UnsignedShortTest)
class IntTest(SignedNumberTest):
typecode = 'i'
minitemsize = 2
tests.append(IntTest)
class UnsignedIntTest(UnsignedNumberTest):
typecode = 'I'
minitemsize = 2
tests.append(UnsignedIntTest)
class LongTest(SignedNumberTest):
typecode = 'l'
minitemsize = 4
tests.append(LongTest)
class UnsignedLongTest(UnsignedNumberTest):
typecode = 'L'
minitemsize = 4
tests.append(UnsignedLongTest)
class FPTest(NumberTest):
example = [-42.0, 0, 42, 1e5, -1e10]
smallerexample = [-42.0, 0, 42, 1e5, -2e10]
biggerexample = [-42.0, 0, 42, 1e5, 1e10]
outside = 23
def assertEntryEqual(self, entry1, entry2):
self.assertAlmostEqual(entry1, entry2)
def test_byteswap(self):
a = array.array(self.typecode, self.example)
self.assertRaises(TypeError, a.byteswap, 42)
if a.itemsize in (1, 2, 4, 8):
b = array.array(self.typecode, self.example)
b.byteswap()
if a.itemsize==1:
self.assertEqual(a, b)
else:
# On alphas treating the byte swapped bit patters as
# floats/doubles results in floating point exceptions
# => compare the 8bit string values instead
self.assertNotEqual(a.tostring(), b.tostring())
b.byteswap()
self.assertEqual(a, b)
class FloatTest(FPTest):
typecode = 'f'
minitemsize = 4
tests.append(FloatTest)
class DoubleTest(FPTest):
typecode = 'd'
minitemsize = 8
def test_alloc_overflow(self):
from sys import maxsize
a = array.array('d', [-1]*65536)
try:
a *= maxsize//65536 + 1
except MemoryError:
pass
else:
self.fail("Array of size > maxsize created - MemoryError expected")
b = array.array('d', [ 2.71828183, 3.14159265, -1])
try:
b * (maxsize//3 + 1)
except MemoryError:
pass
else:
self.fail("Array of size > maxsize created - MemoryError expected")
tests.append(DoubleTest)
def test_main(verbose=None):
import sys
test_support.run_unittest(*tests)
# verify reference counting
if verbose and hasattr(sys, "gettotalrefcount"):
import gc
counts = [None] * 5
for i in xrange(len(counts)):
test_support.run_unittest(*tests)
gc.collect()
counts[i] = sys.gettotalrefcount()
print counts
if __name__ == "__main__":
test_main(verbose=True)
| |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The InverseGamma distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops import distribution
from tensorflow.contrib.distributions.python.ops import distribution_util
from tensorflow.python.framework import common_shapes
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import random_ops
class InverseGamma(distribution.Distribution):
"""The `InverseGamma` distribution with parameter alpha and beta.
The parameters are the shape and inverse scale parameters alpha, beta.
The PDF of this distribution is:
```pdf(x) = (beta^alpha)/Gamma(alpha)(x^(-alpha-1))e^(-beta/x), x > 0```
and the CDF of this distribution is:
```cdf(x) = GammaInc(alpha, beta / x) / Gamma(alpha), x > 0```
where GammaInc is the upper incomplete Gamma function.
Examples:
```python
dist = InverseGamma(alpha=3.0, beta=2.0)
dist2 = InverseGamma(alpha=[3.0, 4.0], beta=[2.0, 3.0])
```
"""
def __init__(self,
alpha,
beta,
validate_args=False,
allow_nan_stats=True,
name="InverseGamma"):
"""Construct InverseGamma distributions with parameters `alpha` and `beta`.
The parameters `alpha` and `beta` must be shaped in a way that supports
broadcasting (e.g. `alpha + beta` is a valid operation).
Args:
alpha: Floating point tensor, the shape params of the
distribution(s).
alpha must contain only positive values.
beta: Floating point tensor, the scale params of the distribution(s).
beta must contain only positive values.
validate_args: `Boolean`, default `False`. Whether to assert that
`a > 0`, `b > 0`, and that `x > 0` in the methods `prob(x)` and
`log_prob(x)`. If `validate_args` is `False` and the inputs are
invalid, correct behavior is not guaranteed.
allow_nan_stats: `Boolean`, default `True`. If `False`, raise an
exception if a statistic (e.g. mean/mode/etc...) is undefined for any
batch member. If `True`, batch members with valid parameters leading to
undefined statistics will return NaN for this statistic.
name: The name to prepend to all ops created by this distribution.
Raises:
TypeError: if `alpha` and `beta` are different dtypes.
"""
with ops.name_scope(name, values=[alpha, beta]) as ns:
with ops.control_dependencies([
check_ops.assert_positive(alpha),
check_ops.assert_positive(beta),
] if validate_args else []):
self._alpha = array_ops.identity(alpha, name="alpha")
self._beta = array_ops.identity(beta, name="beta")
super(InverseGamma, self).__init__(
dtype=self._alpha.dtype,
parameters={"alpha": self._alpha, "beta": self._beta},
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
is_continuous=True,
is_reparameterized=False,
name=ns)
@staticmethod
def _param_shapes(sample_shape):
return dict(
zip(("alpha", "beta"), ([ops.convert_to_tensor(
sample_shape, dtype=dtypes.int32)] * 2)))
@property
def alpha(self):
"""Shape parameter."""
return self._alpha
@property
def beta(self):
"""Scale parameter."""
return self._beta
def _batch_shape(self):
return array_ops.shape(self.alpha + self.beta)
def _get_batch_shape(self):
return common_shapes.broadcast_shape(self.alpha.get_shape(),
self.beta.get_shape())
def _event_shape(self):
return constant_op.constant([], dtype=dtypes.int32)
def _get_event_shape(self):
return tensor_shape.scalar()
def _sample_n(self, n, seed=None):
"""See the documentation for tf.random_gamma for more details."""
return 1. / random_ops.random_gamma([n], self.alpha, beta=self.beta,
dtype=self.dtype, seed=seed)
def _log_prob(self, x):
x = control_flow_ops.with_dependencies([check_ops.assert_positive(x)] if
self.validate_args else [], x)
return (self.alpha * math_ops.log(self.beta) -
math_ops.lgamma(self.alpha) -
(self.alpha + 1.) * math_ops.log(x) - self.beta / x)
def _prob(self, x):
return math_ops.exp(self._log_prob(x))
def _log_cdf(self, x):
return math_ops.log(self._cdf(x))
def _cdf(self, x):
x = control_flow_ops.with_dependencies([check_ops.assert_positive(x)] if
self.validate_args else [], x)
# Note that igammac returns the upper regularized incomplete gamma
# function Q(a, x), which is what we want for the CDF.
return math_ops.igammac(self.alpha, self.beta / x)
@distribution_util.AppendDocstring(
"""This is defined to be
```
entropy = alpha - log(beta) + log(Gamma(alpha))
+ (1-alpha)digamma(alpha)
```
where digamma(alpha) is the digamma function.""")
def _entropy(self):
return (self.alpha +
math_ops.log(self.beta) +
math_ops.lgamma(self.alpha) -
(1. + self.alpha) * math_ops.digamma(self.alpha))
@distribution_util.AppendDocstring(
"""The mean of an inverse gamma distribution is `beta / (alpha - 1)`,
when `alpha > 1`, and `NaN` otherwise. If `self.allow_nan_stats` is
`False`, an exception will be raised rather than returning `NaN`""")
def _mean(self):
mean = self.beta / (self.alpha - 1.)
if self.allow_nan_stats:
nan = np.array(np.nan, dtype=self.dtype.as_numpy_dtype())
return math_ops.select(
self.alpha > 1., mean,
array_ops.fill(self.batch_shape(), nan, name="nan"))
else:
return control_flow_ops.with_dependencies([
check_ops.assert_less(
array_ops.ones((), self.dtype), self.alpha,
message="mean not defined for components of self.alpha <= 1"),
], mean)
@distribution_util.AppendDocstring(
"""Variance for inverse gamma is defined only for `alpha > 2`. If
`self.allow_nan_stats` is `False`, an exception will be raised rather
than returning `NaN`.""")
def _variance(self):
var = (math_ops.square(self.beta) /
(math_ops.square(self.alpha - 1.) * (self.alpha - 2.)))
if self.allow_nan_stats:
nan = np.array(np.nan, dtype=self.dtype.as_numpy_dtype())
return math_ops.select(
self.alpha > 2., var,
array_ops.fill(self.batch_shape(), nan, name="nan"))
else:
return control_flow_ops.with_dependencies([
check_ops.assert_less(
constant_op.constant(2., dtype=self.dtype), self.alpha,
message="variance not defined for components of alpha <= 2"),
], var)
def _mode(self):
"""The mode of an inverse gamma distribution is `beta / (alpha + 1)`."""
return self.beta / (self.alpha + 1.)
class InverseGammaWithSoftplusAlphaBeta(InverseGamma):
"""Inverse Gamma with softplus applied to `alpha` and `beta`."""
def __init__(self,
alpha,
beta,
validate_args=False,
allow_nan_stats=True,
name="InverseGammaWithSoftplusAlphaBeta"):
with ops.name_scope(name, values=[alpha, beta]) as ns:
super(InverseGammaWithSoftplusAlphaBeta, self).__init__(
alpha=nn.softplus(alpha),
beta=nn.softplus(beta),
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
name=ns)
| |
#!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests the text output of Google C++ Testing and Mocking Framework.
To update the golden file:
googletest_output_test.py --build_dir=BUILD/DIR --gengolden
where BUILD/DIR contains the built googletest-output-test_ file.
googletest_output_test.py --gengolden
googletest_output_test.py
"""
import difflib
import os
import re
import sys
import gtest_test_utils
# The flag for generating the golden file
GENGOLDEN_FLAG = '--gengolden'
CATCH_EXCEPTIONS_ENV_VAR_NAME = 'GTEST_CATCH_EXCEPTIONS'
# The flag indicating stacktraces are not supported
NO_STACKTRACE_SUPPORT_FLAG = '--no_stacktrace_support'
IS_LINUX = os.name == 'posix' and os.uname()[0] == 'Linux'
IS_WINDOWS = os.name == 'nt'
GOLDEN_NAME = 'googletest-output-test-golden-lin.txt'
PROGRAM_PATH = gtest_test_utils.GetTestExecutablePath('googletest-output-test_')
# At least one command we exercise must not have the
# 'internal_skip_environment_and_ad_hoc_tests' argument.
COMMAND_LIST_TESTS = ({}, [PROGRAM_PATH, '--gtest_list_tests'])
COMMAND_WITH_COLOR = ({}, [PROGRAM_PATH, '--gtest_color=yes'])
COMMAND_WITH_TIME = ({}, [PROGRAM_PATH,
'--gtest_print_time',
'internal_skip_environment_and_ad_hoc_tests',
'--gtest_filter=FatalFailureTest.*:LoggingTest.*'])
COMMAND_WITH_DISABLED = (
{}, [PROGRAM_PATH,
'--gtest_also_run_disabled_tests',
'internal_skip_environment_and_ad_hoc_tests',
'--gtest_filter=*DISABLED_*'])
COMMAND_WITH_SHARDING = (
{'GTEST_SHARD_INDEX': '1', 'GTEST_TOTAL_SHARDS': '2'},
[PROGRAM_PATH,
'internal_skip_environment_and_ad_hoc_tests',
'--gtest_filter=PassingTest.*'])
GOLDEN_PATH = os.path.join(gtest_test_utils.GetSourceDir(), GOLDEN_NAME)
def ToUnixLineEnding(s):
"""Changes all Windows/Mac line endings in s to UNIX line endings."""
return s.replace('\r\n', '\n').replace('\r', '\n')
def RemoveLocations(test_output):
"""Removes all file location info from a Google Test program's output.
Args:
test_output: the output of a Google Test program.
Returns:
output with all file location info (in the form of
'DIRECTORY/FILE_NAME:LINE_NUMBER: 'or
'DIRECTORY\\FILE_NAME(LINE_NUMBER): ') replaced by
'FILE_NAME:#: '.
"""
return re.sub(r'.*[/\\]((googletest-output-test_|gtest).cc)(\:\d+|\(\d+\))\: ',
r'\1:#: ', test_output)
def RemoveStackTraceDetails(output):
"""Removes all stack traces from a Google Test program's output."""
# *? means "find the shortest string that matches".
return re.sub(r'Stack trace:(.|\n)*?\n\n',
'Stack trace: (omitted)\n\n', output)
def RemoveStackTraces(output):
"""Removes all traces of stack traces from a Google Test program's output."""
# *? means "find the shortest string that matches".
return re.sub(r'Stack trace:(.|\n)*?\n\n', '', output)
def RemoveTime(output):
"""Removes all time information from a Google Test program's output."""
return re.sub(r'\(\d+ ms', '(? ms', output)
def RemoveTypeInfoDetails(test_output):
"""Removes compiler-specific type info from Google Test program's output.
Args:
test_output: the output of a Google Test program.
Returns:
output with type information normalized to canonical form.
"""
# some compilers output the name of type 'unsigned int' as 'unsigned'
return re.sub(r'unsigned int', 'unsigned', test_output)
def NormalizeToCurrentPlatform(test_output):
"""Normalizes platform specific output details for easier comparison."""
if IS_WINDOWS:
# Removes the color information that is not present on Windows.
test_output = re.sub('\x1b\\[(0;3\d)?m', '', test_output)
# Changes failure message headers into the Windows format.
test_output = re.sub(r': Failure\n', r': error: ', test_output)
# Changes file(line_number) to file:line_number.
test_output = re.sub(r'((\w|\.)+)\((\d+)\):', r'\1:\3:', test_output)
return test_output
def RemoveTestCounts(output):
"""Removes test counts from a Google Test program's output."""
output = re.sub(r'\d+ tests?, listed below',
'? tests, listed below', output)
output = re.sub(r'\d+ FAILED TESTS',
'? FAILED TESTS', output)
output = re.sub(r'\d+ tests? from \d+ test cases?',
'? tests from ? test cases', output)
output = re.sub(r'\d+ tests? from ([a-zA-Z_])',
r'? tests from \1', output)
return re.sub(r'\d+ tests?\.', '? tests.', output)
def RemoveMatchingTests(test_output, pattern):
"""Removes output of specified tests from a Google Test program's output.
This function strips not only the beginning and the end of a test but also
all output in between.
Args:
test_output: A string containing the test output.
pattern: A regex string that matches names of test cases or
tests to remove.
Returns:
Contents of test_output with tests whose names match pattern removed.
"""
test_output = re.sub(
r'.*\[ RUN \] .*%s(.|\n)*?\[( FAILED | OK )\] .*%s.*\n' % (
pattern, pattern),
'',
test_output)
return re.sub(r'.*%s.*\n' % pattern, '', test_output)
def NormalizeOutput(output):
"""Normalizes output (the output of googletest-output-test_.exe)."""
output = ToUnixLineEnding(output)
output = RemoveLocations(output)
output = RemoveStackTraceDetails(output)
output = RemoveTime(output)
return output
def GetShellCommandOutput(env_cmd):
"""Runs a command in a sub-process, and returns its output in a string.
Args:
env_cmd: The shell command. A 2-tuple where element 0 is a dict of extra
environment variables to set, and element 1 is a string with
the command and any flags.
Returns:
A string with the command's combined standard and diagnostic output.
"""
# Spawns cmd in a sub-process, and gets its standard I/O file objects.
# Set and save the environment properly.
environ = os.environ.copy()
environ.update(env_cmd[0])
p = gtest_test_utils.Subprocess(env_cmd[1], env=environ)
return p.output
def GetCommandOutput(env_cmd):
"""Runs a command and returns its output with all file location
info stripped off.
Args:
env_cmd: The shell command. A 2-tuple where element 0 is a dict of extra
environment variables to set, and element 1 is a string with
the command and any flags.
"""
# Disables exception pop-ups on Windows.
environ, cmdline = env_cmd
environ = dict(environ) # Ensures we are modifying a copy.
environ[CATCH_EXCEPTIONS_ENV_VAR_NAME] = '1'
return NormalizeOutput(GetShellCommandOutput((environ, cmdline)))
def GetOutputOfAllCommands():
"""Returns concatenated output from several representative commands."""
return (GetCommandOutput(COMMAND_WITH_COLOR) +
GetCommandOutput(COMMAND_WITH_TIME) +
GetCommandOutput(COMMAND_WITH_DISABLED) +
GetCommandOutput(COMMAND_WITH_SHARDING))
test_list = GetShellCommandOutput(COMMAND_LIST_TESTS)
SUPPORTS_DEATH_TESTS = 'DeathTest' in test_list
SUPPORTS_TYPED_TESTS = 'TypedTest' in test_list
SUPPORTS_THREADS = 'ExpectFailureWithThreadsTest' in test_list
SUPPORTS_STACK_TRACES = NO_STACKTRACE_SUPPORT_FLAG not in sys.argv
CAN_GENERATE_GOLDEN_FILE = (SUPPORTS_DEATH_TESTS and
SUPPORTS_TYPED_TESTS and
SUPPORTS_THREADS and
SUPPORTS_STACK_TRACES)
class GTestOutputTest(gtest_test_utils.TestCase):
def RemoveUnsupportedTests(self, test_output):
if not SUPPORTS_DEATH_TESTS:
test_output = RemoveMatchingTests(test_output, 'DeathTest')
if not SUPPORTS_TYPED_TESTS:
test_output = RemoveMatchingTests(test_output, 'TypedTest')
test_output = RemoveMatchingTests(test_output, 'TypedDeathTest')
test_output = RemoveMatchingTests(test_output, 'TypeParamDeathTest')
if not SUPPORTS_THREADS:
test_output = RemoveMatchingTests(test_output,
'ExpectFailureWithThreadsTest')
test_output = RemoveMatchingTests(test_output,
'ScopedFakeTestPartResultReporterTest')
test_output = RemoveMatchingTests(test_output,
'WorksConcurrently')
if not SUPPORTS_STACK_TRACES:
test_output = RemoveStackTraces(test_output)
return test_output
def testOutput(self):
output = GetOutputOfAllCommands()
golden_file = open(GOLDEN_PATH, 'rb')
# A mis-configured source control system can cause \r appear in EOL
# sequences when we read the golden file irrespective of an operating
# system used. Therefore, we need to strip those \r's from newlines
# unconditionally.
golden = ToUnixLineEnding(golden_file.read().decode())
golden_file.close()
# We want the test to pass regardless of certain features being
# supported or not.
# We still have to remove type name specifics in all cases.
normalized_actual = RemoveTypeInfoDetails(output)
normalized_golden = RemoveTypeInfoDetails(golden)
if CAN_GENERATE_GOLDEN_FILE:
self.assertEqual(normalized_golden, normalized_actual,
'\n'.join(difflib.unified_diff(
normalized_golden.split('\n'),
normalized_actual.split('\n'),
'golden', 'actual')))
else:
normalized_actual = NormalizeToCurrentPlatform(
RemoveTestCounts(normalized_actual))
normalized_golden = NormalizeToCurrentPlatform(
RemoveTestCounts(self.RemoveUnsupportedTests(normalized_golden)))
# This code is very handy when debugging golden file differences:
if os.getenv('DEBUG_GTEST_OUTPUT_TEST'):
open(os.path.join(
gtest_test_utils.GetSourceDir(),
'_googletest-output-test_normalized_actual.txt'), 'wb').write(
normalized_actual)
open(os.path.join(
gtest_test_utils.GetSourceDir(),
'_googletest-output-test_normalized_golden.txt'), 'wb').write(
normalized_golden)
self.assertEqual(normalized_golden, normalized_actual)
if __name__ == '__main__':
if NO_STACKTRACE_SUPPORT_FLAG in sys.argv:
# unittest.main() can't handle unknown flags
sys.argv.remove(NO_STACKTRACE_SUPPORT_FLAG)
if GENGOLDEN_FLAG in sys.argv:
if CAN_GENERATE_GOLDEN_FILE:
output = GetOutputOfAllCommands()
golden_file = open(GOLDEN_PATH, 'wb')
golden_file.write(output)
golden_file.close()
else:
message = (
"""Unable to write a golden file when compiled in an environment
that does not support all the required features (death tests,
typed tests, stack traces, and multiple threads).
Please build this test and generate the golden file using Blaze on Linux.""")
sys.stderr.write(message)
sys.exit(1)
else:
gtest_test_utils.Main()
| |
import sublime
import sublime_plugin
import sys
import os
# We use several commands implemented in Vintange, so make it available here.
sys.path.append(os.path.join(sublime.packages_path(), 'Vintage'))
import re
import subprocess
from vintage import g_registers
from plat.windows import get_oem_cp
from plat.windows import get_startup_info
from vex import ex_error
from vex import ex_range
from vex import shell
from vex import parsers
GLOBAL_RANGES = []
CURRENT_LINE_RANGE = {'left_ref': '.', 'left_offset': 0, 'left_search_offsets': [],
'right_ref': None, 'right_offset': 0, 'right_search_offsets': []}
class VintageExState(object):
# When repeating searches, determines which search term to use: the current
# word or the latest search term.
# Values: find_under, search_pattern
search_buffer_type = 'find_under'
def is_any_buffer_dirty(window):
for v in window.views():
if v.is_dirty():
return True
# TODO: this code must be shared with Vintage, not reimplemented here.
def set_register(text, register):
global g_registers
if register == '*' or register == '+':
sublime.set_clipboard(text)
elif register == '%':
pass
else:
reg = register.lower()
append = (reg != register)
if append and reg in g_registers:
g_registers[reg] += text
else:
g_registers[reg] = text
def gather_buffer_info(v):
"""gathers data to be displayed by :ls or :buffers
"""
path = v.file_name()
if path:
parent, leaf = os.path.split(path)
parent = os.path.basename(parent)
path = os.path.join(parent, leaf)
else:
path = v.name() or str(v.buffer_id())
leaf = v.name() or 'untitled'
status = []
if not v.file_name():
status.append("t")
if v.is_dirty():
status.append("*")
if v.is_read_only():
status.append("r")
if status:
leaf += ' (%s)' % ', '.join(status)
return [leaf, path]
def get_region_by_range(view, line_range=None, as_lines=False):
# If GLOBAL_RANGES exists, the ExGlobal command has been run right before
# the current command, and we know we must process these lines.
global GLOBAL_RANGES
if GLOBAL_RANGES:
rv = GLOBAL_RANGES[:]
GLOBAL_RANGES = []
return rv
if line_range:
vim_range = ex_range.VimRange(view, line_range)
if as_lines:
return vim_range.lines()
else:
return vim_range.blocks()
class ExGoto(sublime_plugin.TextCommand):
def run(self, edit, line_range=None):
if not line_range['text_range']:
# No-op: user issued ":".
return
ranges, _ = ex_range.new_calculate_range(self.view, line_range)
a, b = ranges[0]
self.view.run_command('vi_goto_line', {'repeat': b})
self.view.show(self.view.sel()[0])
class ExShellOut(sublime_plugin.TextCommand):
"""Ex command(s): :!cmd, :'<,>'!cmd
Run cmd in a system's shell or filter selected regions through external
command.
"""
def run(self, edit, line_range=None, shell_cmd=''):
try:
if line_range['text_range']:
shell.filter_thru_shell(
view=self.view,
regions=get_region_by_range(self.view, line_range=line_range),
cmd=shell_cmd)
else:
shell.run_and_wait(self.view, shell_cmd)
except NotImplementedError:
ex_error.handle_not_implemented()
class ExShell(sublime_plugin.TextCommand):
"""Ex command(s): :shell
Opens a shell at the current view's directory. Sublime Text keeps a virtual
current directory that most of the time will be out of sync with the actual
current directory. The virtual current directory is always set to the
current view's directory, but it isn't accessible through the API.
"""
def open_shell(self, command):
view_dir = os.path.dirname(self.view.file_name())
return subprocess.Popen(command, cwd=view_dir)
def run(self, edit):
if sublime.platform() == 'linux':
term = self.view.settings().get('vintageex_linux_terminal')
term = term or os.environ.get('COLORTERM') or os.environ.get("TERM")
if not term:
sublime.status_message("VintageEx: Not terminal name found.")
return
try:
self.open_shell([term, '-e', 'bash']).wait()
except Exception as e:
print e
sublime.status_message("VintageEx: Error while executing command through shell.")
return
elif sublime.platform() == 'osx':
term = self.view.settings().get('vintageex_osx_terminal')
term = term or os.environ.get('COLORTERM') or os.environ.get("TERM")
if not term:
sublime.status_message("VintageEx: Not terminal name found.")
return
try:
self.open_shell([term, '-e', 'bash']).wait()
except Exception as e:
print e
sublime.status_message("VintageEx: Error while executing command through shell.")
return
elif sublime.platform() == 'windows':
self.open_shell(['cmd.exe', '/k']).wait()
else:
# XXX OSX (make check explicit)
ex_error.handle_not_implemented()
class ExReadShellOut(sublime_plugin.TextCommand):
def run(self, edit, line_range=None, name='', plusplus_args='', forced=False):
target_line = self.view.line(self.view.sel()[0].begin())
if line_range['text_range']:
range = max(ex_range.calculate_range(self.view, line_range=line_range)[0])
target_line = self.view.line(self.view.text_point(range, 0))
target_point = min(target_line.b + 1, self.view.size())
# cheat a little bit to get the parsing right:
# - forced == True means we need to execute a command
if forced:
if sublime.platform() == 'linux':
for s in self.view.sel():
# TODO: make shell command configurable.
the_shell = self.view.settings().get('linux_shell')
the_shell = the_shell or os.path.expandvars("$SHELL")
if not the_shell:
sublime.status_message("VintageEx: No shell name found.")
return
try:
p = subprocess.Popen([the_shell, '-c', name],
stdout=subprocess.PIPE)
except Exception as e:
print e
sublime.status_message("VintageEx: Error while executing command through shell.")
return
self.view.insert(edit, s.begin(), p.communicate()[0][:-1])
elif sublime.platform() == 'windows':
for s in self.view.sel():
p = subprocess.Popen(['cmd.exe', '/C', name],
stdout=subprocess.PIPE,
startupinfo=get_startup_info()
)
cp = 'cp' + get_oem_cp()
rv = p.communicate()[0].decode(cp)[:-2].strip()
self.view.insert(edit, s.begin(), rv)
else:
ex_error.handle_not_implemented()
# Read a file into the current view.
else:
# According to Vim's help, :r should read the current file's content
# if no file name is given, but Vim doesn't do that.
# TODO: implement reading a file into the buffer.
ex_error.handle_not_implemented()
return
class ExPromptSelectOpenFile(sublime_plugin.TextCommand):
"""Ex command(s): :ls, :files
Shows a quick panel listing the open files only. Provides concise
information about the buffers's state: 'transient', 'unsaved'.
"""
def run(self, edit):
self.file_names = [gather_buffer_info(v)
for v in self.view.window().views()]
self.view.window().show_quick_panel(self.file_names, self.on_done)
def on_done(self, idx):
if idx == -1: return
sought_fname = self.file_names[idx]
for v in self.view.window().views():
if v.file_name() and v.file_name().endswith(sought_fname[1]):
self.view.window().focus_view(v)
# XXX Base all checks on buffer id?
elif sought_fname[1].isdigit() and \
v.buffer_id() == int(sought_fname[1]):
self.view.window().focus_view(v)
class ExMap(sublime_plugin.TextCommand):
# do at least something moderately useful: open the user's .sublime-keymap
# file
def run(self, edit):
if sublime.platform() == 'windows':
platf = 'Windows'
elif sublime.platform() == 'linux':
platf = 'Linux'
else:
platf = 'OSX'
self.view.window().run_command('open_file', {'file':
'${packages}/User/Default (%s).sublime-keymap' % platf})
class ExAbbreviate(sublime_plugin.TextCommand):
# for them moment, just open a completions file.
def run(self, edit):
abbs_file_name = 'VintageEx Abbreviations.sublime-completions'
abbreviations = os.path.join(sublime.packages_path(),
'User/' + abbs_file_name)
if not os.path.exists(abbreviations):
with open(abbreviations, 'w') as f:
f.write('{\n\t"scope": "",\n\t"completions": [\n\t\n\t]\n}\n')
self.view.window().run_command('open_file',
{'file': "${packages}/User/%s" % abbs_file_name})
class ExPrintWorkingDir(sublime_plugin.TextCommand):
def run(self, edit):
sublime.status_message(os.getcwd())
class ExWriteFile(sublime_plugin.TextCommand):
def run(self, edit,
line_range=None,
forced=False,
file_name='',
plusplus_args='',
operator='',
target_redirect='',
subcmd=''):
if file_name and target_redirect:
sublime.status_message('VintageEx: Too many arguments.')
return
appending = operator == '>>'
# FIXME: reversed? -- what's going on here!!
a_range = line_range['text_range']
content = get_region_by_range(self.view, line_range=line_range) if a_range else \
[sublime.Region(0, self.view.size())]
if target_redirect or file_name:
target = self.view.window().new_file()
target.set_name(target_redirect or file_name)
else:
target = self.view
start = 0 if not appending else target.size()
prefix = '\n' if appending and target.size() > 0 else ''
if appending or target_redirect or file_name:
for frag in reversed(content):
target.insert(edit, start, prefix + self.view.substr(frag) + '\n')
elif a_range:
start_deleting = 0
for frag in content:
text = self.view.substr(frag) + '\n'
self.view.insert(edit, 0, text)
start_deleting += len(text)
self.view.replace(edit, sublime.Region(start_deleting,
self.view.size()), '')
else:
if self.view.is_dirty():
self.view.run_command('save')
class ExWriteAll(sublime_plugin.TextCommand):
def run(self, edit, forced=False):
for v in self.view.window().views():
if v.is_dirty():
v.run_command('save')
class ExNewFile(sublime_plugin.TextCommand):
def run(self, edit, forced=False):
self.view.window().run_command('new_file')
class ExFile(sublime_plugin.TextCommand):
def run(self, edit, forced=False):
# XXX figure out what the right params are. vim's help seems to be
# wrong
if self.view.file_name():
fname = self.view.file_name()
else:
fname = 'untitled'
attrs = ''
if self.view.is_read_only():
attrs = 'readonly'
if self.view.is_scratch():
attrs = 'modified'
lines = 'no lines in the buffer'
if self.view.rowcol(self.view.size())[0]:
lines = self.view.rowcol(self.view.size())[0] + 1
# fixme: doesn't calculate the buffer's % correctly
if not isinstance(lines, basestring):
vr = self.view.visible_region()
start_row, end_row = self.view.rowcol(vr.begin())[0], \
self.view.rowcol(vr.end())[0]
mid = (start_row + end_row + 2) / 2
percent = float(mid) / lines * 100.0
msg = fname
if attrs:
msg += " [%s]" % attrs
if isinstance(lines, basestring):
msg += " -- %s --" % lines
else:
msg += " %d line(s) --%d%%--" % (lines, int(percent))
sublime.status_message('VintageEx: %s' % msg)
class ExMove(sublime_plugin.TextCommand):
def run(self, edit, line_range=None, forced=False, address=''):
# make sure we have a default range
if not line_range['text_range']:
line_range['text_range'] = '.'
address_parser = parsers.cmd_line.AddressParser(address)
parsed_address = address_parser.parse()
address = ex_range.calculate_address(self.view, parsed_address)
if address is None:
ex_error.display_error(ex_error.ERR_INVALID_ADDRESS)
return
line_block = get_region_by_range(self.view, line_range=line_range)
line_block = [self.view.substr(r) for r in line_block]
text = '\n'.join(line_block) + '\n'
if address != 0:
dest = self.view.line(self.view.text_point(address, 0)).end() + 1
else:
dest = 0
# Don't move lines onto themselves.
for sel in self.view.sel():
if sel.contains(dest):
ex_error.display_error(ex_error.ERR_CANT_MOVE_LINES_ONTO_THEMSELVES)
return
if dest > self.view.size():
dest = self.view.size()
text = '\n' + text[:-1]
self.view.insert(edit, dest, text)
for r in reversed(get_region_by_range(self.view, line_range)):
self.view.erase(edit, self.view.full_line(r))
class ExCopy(sublime_plugin.TextCommand):
# todo: do null ranges always default to '.'?
def run(self, edit, line_range=CURRENT_LINE_RANGE, forced=False, address=''):
address_parser = parsers.cmd_line.AddressParser(address)
parsed_address = address_parser.parse()
address = ex_range.calculate_address(self.view, parsed_address)
if address is None:
ex_error.display_error(ex_error.ERR_INVALID_ADDRESS)
return
line_block = get_region_by_range(self.view, line_range=line_range)
line_block = [self.view.substr(r) for r in line_block]
text = '\n'.join(line_block) + '\n'
if address != 0:
dest = self.view.line(self.view.text_point(address, 0)).end() + 1
else:
dest = address
if dest > self.view.size():
dest = self.view.size()
text = '\n' + text[:-1]
self.view.insert(edit, dest, text)
self.view.sel().clear()
cursor_dest = self.view.line(dest + len(text) - 1).begin()
self.view.sel().add(sublime.Region(cursor_dest, cursor_dest))
class ExOnly(sublime_plugin.TextCommand):
""" Command: :only
"""
def run(self, edit, forced=False):
if not forced:
if is_any_buffer_dirty(self.view.window()):
ex_error.display_error(ex_error.ERR_OTHER_BUFFER_HAS_CHANGES)
return
w = self.view.window()
current_id = self.view.id()
for v in w.views():
if v.id() != current_id:
if forced and v.is_dirty():
v.set_scratch(True)
w.focus_view(v)
w.run_command('close')
class ExDoubleAmpersand(sublime_plugin.TextCommand):
""" Command :&&
"""
def run(self, edit, line_range=None, flags='', count=''):
self.view.run_command('ex_substitute', {'line_range': line_range,
'pattern': flags + count})
class ExSubstitute(sublime_plugin.TextCommand):
most_recent_pat = None
most_recent_flags = ''
most_recent_replacement = ''
def run(self, edit, line_range=None, pattern=''):
# :s
if not pattern:
pattern = ExSubstitute.most_recent_pat
replacement = ExSubstitute.most_recent_replacement
flags = ''
count = 0
# :s g 100 | :s/ | :s// | s:/foo/bar/g 100 | etc.
else:
try:
parts = parsers.s_cmd.split(pattern)
except SyntaxError, e:
sublime.status_message("VintageEx: (substitute) %s" % e)
print "VintageEx: (substitute) %s" % e
return
else:
if len(parts) == 4:
# This is a full command in the form :s/foo/bar/g 100 or a
# partial version of it.
(pattern, replacement, flags, count) = parts
else:
# This is a short command in the form :s g 100 or a partial
# version of it.
(flags, count) = parts
pattern = ExSubstitute.most_recent_pat
replacement = ExSubstitute.most_recent_replacement
if not pattern:
pattern = ExSubstitute.most_recent_pat
else:
ExSubstitute.most_recent_pat = pattern
ExSubstitute.most_recent_replacement = replacement
ExSubstitute.most_recent_flags = flags
computed_flags = 0
computed_flags |= re.IGNORECASE if (flags and 'i' in flags) else 0
try:
pattern = re.compile(pattern, flags=computed_flags)
except Exception, e:
sublime.status_message("VintageEx [regex error]: %s ... in pattern '%s'" % (e.message, pattern))
print "VintageEx [regex error]: %s ... in pattern '%s'" % (e.message, pattern)
return
replace_count = 0 if (flags and 'g' in flags) else 1
target_region = get_region_by_range(self.view, line_range=line_range, as_lines=True)
for r in reversed(target_region):
line_text = self.view.substr(self.view.line(r))
rv = re.sub(pattern, replacement, line_text, count=replace_count)
self.view.replace(edit, self.view.line(r), rv)
class ExDelete(sublime_plugin.TextCommand):
def run(self, edit, line_range=None, register='', count=''):
# XXX somewhat different to vim's behavior
rs = get_region_by_range(self.view, line_range=line_range)
self.view.sel().clear()
to_store = []
for r in rs:
self.view.sel().add(r)
if register:
to_store.append(self.view.substr(self.view.full_line(r)))
if register:
text = ''.join(to_store)
# needed for lines without a newline character
if not text.endswith('\n'):
text = text + '\n'
set_register(text, register)
self.view.run_command('split_selection_into_lines')
self.view.run_command('run_macro_file',
{'file': 'Packages/Default/Delete Line.sublime-macro'})
class ExGlobal(sublime_plugin.TextCommand):
"""Ex command(s): :global
:global filters lines where a pattern matches and then applies the supplied
action to all those lines.
Examples:
:10,20g/FOO/delete
This command deletes all lines between line 10 and line 20 where 'FOO'
matches.
:g:XXX:s!old!NEW!g
This command replaces all instances of 'old' with 'NEW' in every line
where 'XXX' matches.
By default, :global searches all lines in the buffer.
If you want to filter lines where a pattern does NOT match, add an
exclamation point:
:g!/DON'T TOUCH THIS/delete
"""
most_recent_pat = None
def run(self, edit, line_range=None, forced=False, pattern=''):
if not line_range['text_range']:
line_range['text_range'] = '%'
line_range['left_ref'] = '%'
try:
global_pattern, subcmd = parsers.g_cmd.split(pattern)
except ValueError:
msg = "VintageEx: Bad :global pattern. (%s)" % pattern
sublime.status_message(msg)
print msg
return
if global_pattern:
ExGlobal.most_recent_pat = global_pattern
else:
global_pattern = ExGlobal.most_recent_pat
# Make sure we always have a subcommand to exectute. This is what
# Vim does too.
subcmd = subcmd or 'print'
rs = get_region_by_range(self.view, line_range=line_range, as_lines=True)
for r in rs:
try:
match = re.search(global_pattern, self.view.substr(r))
except Exception, e:
msg = "VintageEx (global): %s ... in pattern '%s'" % (str(e), global_pattern)
sublime.status_message(msg)
print msg
return
if (match and not forced) or (not match and forced):
GLOBAL_RANGES.append(r)
# don't do anything if we didn't found any target ranges
if not GLOBAL_RANGES:
return
self.view.window().run_command('vi_colon_input',
{'cmd_line': ':' +
str(self.view.rowcol(r.a)[0] + 1) +
subcmd})
class ExPrint(sublime_plugin.TextCommand):
def run(self, edit, line_range=None, count='1', flags=''):
if not count.isdigit():
flags, count = count, ''
rs = get_region_by_range(self.view, line_range=line_range)
to_display = []
for r in rs:
for line in self.view.lines(r):
text = self.view.substr(line)
if '#' in flags:
row = self.view.rowcol(line.begin())[0] + 1
else:
row = ''
to_display.append((text, row))
v = self.view.window().new_file()
v.set_scratch(True)
if 'l' in flags:
v.settings().set('draw_white_space', 'all')
for t, r in to_display:
v.insert(edit, v.size(), (str(r) + ' ' + t + '\n').lstrip())
# TODO: General note for all :q variants:
# ST has a notion of hot_exit, whereby it preserves all buffers so that they
# can be restored next time you open ST. With this option on, all :q
# commands should probably execute silently even if there are unsaved buffers.
# Sticking to Vim's behavior closely here makes for a worse experience
# because typically you don't start ST as many times.
class ExQuitCommand(sublime_plugin.WindowCommand):
"""Ex command(s): :quit
Closes the window.
* Don't close the window if there are dirty buffers
TODO:
(Doesn't make too much sense if hot_exit is on, though.)
Although ST's window command 'exit' would take care of this, it
displays a modal dialog, so spare ourselves that.
"""
def run(self, forced=False, count=1, flags=''):
v = self.window.active_view()
if forced:
v.set_scratch(True)
if v.is_dirty():
sublime.status_message("There are unsaved changes!")
return
self.window.run_command('close')
if len(self.window.views()) == 0:
self.window.run_command('close')
class ExQuitAllCommand(sublime_plugin.WindowCommand):
"""Ex command(s): :qall
Close all windows and then exit Sublime Text.
If there are dirty buffers, exit only if :qall!.
"""
def run(self, forced=False):
if forced:
for v in self.window.views():
if v.is_dirty():
v.set_scratch(True)
elif is_any_buffer_dirty(self.window):
sublime.status_message("There are unsaved changes!")
return
self.window.run_command('close_all')
self.window.run_command('exit')
class ExWriteAndQuitCommand(sublime_plugin.TextCommand):
"""Ex command(s): :wq
Write and then close the active buffer.
"""
def run(self, edit, line_range=None, forced=False):
# TODO: implement this
if forced:
ex_error.handle_not_implemented()
return
if self.view.is_read_only():
sublime.status_message("Can't write a read-only buffer.")
return
if not self.view.file_name():
sublime.status_message("Can't save a file without name.")
return
self.view.run_command('save')
self.view.window().run_command('ex_quit')
class ExBrowse(sublime_plugin.TextCommand):
def run(self, edit):
self.view.window().run_command('prompt_open_file')
class ExEdit(sublime_plugin.TextCommand):
def run_(self, args):
self.run(args)
def run(self, forced=False):
# todo: restore active line_nr too
if forced or not self.view.is_dirty():
self.view.run_command('revert')
return
elif self.view.is_dirty():
ex_error.display_error(ex_error.ERR_UNSAVED_CHANGES)
return
ex_error.handle_not_implemented()
class ExCquit(sublime_plugin.TextCommand):
def run(self, edit):
self.view.window().run_command('exit')
class ExExit(sublime_plugin.TextCommand):
"""Ex command(s): :x[it], :exi[t]
Like :wq, but write only when changes have been made.
TODO: Support ranges, like :w.
"""
def run(self, edit, line_range=None):
w = self.view.window()
if w.active_view().is_dirty():
w.run_command('save')
w.run_command('close')
if len(w.views()) == 0:
w.run_command('close')
class ExListRegisters(sublime_plugin.TextCommand):
"""Lists registers in quick panel and saves selected to `"` register."""
def run(self, edit):
if not g_registers:
sublime.status_message('VintageEx: no registers.')
self.view.window().show_quick_panel(
['"{0} {1}'.format(k, v) for k, v in g_registers.items()],
self.on_done)
def on_done(self, idx):
"""Save selected value to `"` register."""
if idx == -1:
return
g_registers['"'] = g_registers.values()[idx]
class ExNew(sublime_plugin.TextCommand):
"""Ex command(s): :new
Create a new buffer.
TODO: Create new buffer by splitting the screen.
"""
def run(self, edit, line_range=None):
self.view.window().run_command('new_file')
class ExYank(sublime_plugin.TextCommand):
"""Ex command(s): :y[ank]
"""
def run(self, edit, line_range, register=None, count=None):
if not register:
register = '"'
regs = get_region_by_range(self.view, line_range)
text = '\n'.join([self.view.substr(line) for line in regs])
g_registers[register] = text
if register == '"':
g_registers['0'] = text
class TabControlCommand(sublime_plugin.WindowCommand):
def run(self, command, file_name=None, forced=False):
window = self.window
selfview = window.active_view()
max_index = len(window.views())
(group, index) = window.get_view_index(selfview)
if (command == "open"):
if file_name is None: # TODO: file completion
window.run_command("show_overlay", {"overlay": "goto", "show_files": True, })
else:
cur_dir = os.path.dirname(selfview.file_name())
window.open_file(os.path.join(cur_dir, file_name))
elif command == "next":
window.run_command("select_by_index", {"index": (index + 1) % max_index}, )
elif command == "prev":
window.run_command("select_by_index", {"index": (index + max_index - 1) % max_index, })
elif command == "last":
window.run_command("select_by_index", {"index": max_index - 1, })
elif command == "first":
window.run_command("select_by_index", {"index": 0, })
elif command == "only":
for view in window.views_in_group(group):
if view.id() != selfview.id():
window.focus_view(view)
window.run_command("ex_quit", {"forced": forced})
window.focus_view(selfview)
else:
sublime.status_message("Unknown TabControl Command")
class ExTabOpenCommand(sublime_plugin.WindowCommand):
def run(self, file_name=None):
self.window.run_command("tab_control", {"command": "open", "file_name": file_name}, )
class ExTabNextCommand(sublime_plugin.WindowCommand):
def run(self):
self.window.run_command("tab_control", {"command": "next"}, )
class ExTabPrevCommand(sublime_plugin.WindowCommand):
def run(self):
self.window.run_command("tab_control", {"command": "prev"}, )
class ExTabLastCommand(sublime_plugin.WindowCommand):
def run(self):
self.window.run_command("tab_control", {"command": "last"}, )
class ExTabFirstCommand(sublime_plugin.WindowCommand):
def run(self):
self.window.run_command("tab_control", {"command": "first"}, )
class ExTabOnlyCommand(sublime_plugin.WindowCommand):
def run(self, forced=False):
self.window.run_command("tab_control", {"command": "only", "forced": forced, }, )
| |
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Matti Hamalainen <msh@nmr.mgh.harvard.edu>
# Denis A. Engemann <denis.engemann@gmail.com>
#
# License: BSD (3-clause)
import os
from os import path as op
import sys
from struct import pack
from glob import glob
import numpy as np
from scipy.sparse import coo_matrix, csr_matrix, eye as speye
from .bem import read_bem_surfaces
from .io.constants import FIFF
from .io.open import fiff_open
from .io.tree import dir_tree_find
from .io.tag import find_tag
from .io.write import (write_int, start_file, end_block,
start_block, end_file, write_string,
write_float_sparse_rcs)
from .channels.channels import _get_meg_system
from .transforms import transform_surface_to
from .utils import logger, verbose, get_subjects_dir, warn
from .externals.six import string_types
###############################################################################
# AUTOMATED SURFACE FINDING
@verbose
def get_head_surf(subject, source=('bem', 'head'), subjects_dir=None,
verbose=None):
"""Load the subject head surface
Parameters
----------
subject : str
Subject name.
source : str | list of str
Type to load. Common choices would be `'bem'` or `'head'`. We first
try loading `'$SUBJECTS_DIR/$SUBJECT/bem/$SUBJECT-$SOURCE.fif'`, and
then look for `'$SUBJECT*$SOURCE.fif'` in the same directory by going
through all files matching the pattern. The head surface will be read
from the first file containing a head surface. Can also be a list
to try multiple strings.
subjects_dir : str, or None
Path to the SUBJECTS_DIR. If None, the path is obtained by using
the environment variable SUBJECTS_DIR.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
surf : dict
The head surface.
"""
# Load the head surface from the BEM
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
if not isinstance(subject, string_types):
raise TypeError('subject must be a string, not %s' % (type(subject,)))
# use realpath to allow for linked surfaces (c.f. MNE manual 196-197)
if isinstance(source, string_types):
source = [source]
surf = None
for this_source in source:
this_head = op.realpath(op.join(subjects_dir, subject, 'bem',
'%s-%s.fif' % (subject, this_source)))
if op.exists(this_head):
surf = read_bem_surfaces(this_head, True,
FIFF.FIFFV_BEM_SURF_ID_HEAD,
verbose=False)
else:
# let's do a more sophisticated search
path = op.join(subjects_dir, subject, 'bem')
if not op.isdir(path):
raise IOError('Subject bem directory "%s" does not exist'
% path)
files = sorted(glob(op.join(path, '%s*%s.fif'
% (subject, this_source))))
for this_head in files:
try:
surf = read_bem_surfaces(this_head, True,
FIFF.FIFFV_BEM_SURF_ID_HEAD,
verbose=False)
except ValueError:
pass
else:
break
if surf is not None:
break
if surf is None:
raise IOError('No file matching "%s*%s" and containing a head '
'surface found' % (subject, this_source))
logger.info('Using surface from %s' % this_head)
return surf
@verbose
def get_meg_helmet_surf(info, trans=None, verbose=None):
"""Load the MEG helmet associated with the MEG sensors
Parameters
----------
info : instance of Info
Measurement info.
trans : dict
The head<->MRI transformation, usually obtained using
read_trans(). Can be None, in which case the surface will
be in head coordinates instead of MRI coordinates.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
surf : dict
The MEG helmet as a surface.
"""
system = _get_meg_system(info)
logger.info('Getting helmet for system %s' % system)
fname = op.join(op.split(__file__)[0], 'data', 'helmets',
system + '.fif.gz')
surf = read_bem_surfaces(fname, False, FIFF.FIFFV_MNE_SURF_MEG_HELMET,
verbose=False)
# Ignore what the file says, it's in device coords and we want MRI coords
surf['coord_frame'] = FIFF.FIFFV_COORD_DEVICE
transform_surface_to(surf, 'head', info['dev_head_t'])
if trans is not None:
transform_surface_to(surf, 'mri', trans)
return surf
###############################################################################
# EFFICIENCY UTILITIES
def fast_cross_3d(x, y):
"""Compute cross product between list of 3D vectors
Much faster than np.cross() when the number of cross products
becomes large (>500). This is because np.cross() methods become
less memory efficient at this stage.
Parameters
----------
x : array
Input array 1.
y : array
Input array 2.
Returns
-------
z : array
Cross product of x and y.
Notes
-----
x and y must both be 2D row vectors. One must have length 1, or both
lengths must match.
"""
assert x.ndim == 2
assert y.ndim == 2
assert x.shape[1] == 3
assert y.shape[1] == 3
assert (x.shape[0] == 1 or y.shape[0] == 1) or x.shape[0] == y.shape[0]
if max([x.shape[0], y.shape[0]]) >= 500:
return np.c_[x[:, 1] * y[:, 2] - x[:, 2] * y[:, 1],
x[:, 2] * y[:, 0] - x[:, 0] * y[:, 2],
x[:, 0] * y[:, 1] - x[:, 1] * y[:, 0]]
else:
return np.cross(x, y)
def _fast_cross_nd_sum(a, b, c):
"""Fast cross and sum"""
return ((a[..., 1] * b[..., 2] - a[..., 2] * b[..., 1]) * c[..., 0] +
(a[..., 2] * b[..., 0] - a[..., 0] * b[..., 2]) * c[..., 1] +
(a[..., 0] * b[..., 1] - a[..., 1] * b[..., 0]) * c[..., 2])
def _accumulate_normals(tris, tri_nn, npts):
"""Efficiently accumulate triangle normals"""
# this code replaces the following, but is faster (vectorized):
#
# this['nn'] = np.zeros((this['np'], 3))
# for p in xrange(this['ntri']):
# verts = this['tris'][p]
# this['nn'][verts, :] += this['tri_nn'][p, :]
#
nn = np.zeros((npts, 3))
for verts in tris.T: # note this only loops 3x (number of verts per tri)
for idx in range(3): # x, y, z
nn[:, idx] += np.bincount(verts, weights=tri_nn[:, idx],
minlength=npts)
return nn
def _triangle_neighbors(tris, npts):
"""Efficiently compute vertex neighboring triangles"""
# this code replaces the following, but is faster (vectorized):
#
# this['neighbor_tri'] = [list() for _ in xrange(this['np'])]
# for p in xrange(this['ntri']):
# verts = this['tris'][p]
# this['neighbor_tri'][verts[0]].append(p)
# this['neighbor_tri'][verts[1]].append(p)
# this['neighbor_tri'][verts[2]].append(p)
# this['neighbor_tri'] = [np.array(nb, int) for nb in this['neighbor_tri']]
#
verts = tris.ravel()
counts = np.bincount(verts, minlength=npts)
reord = np.argsort(verts)
tri_idx = np.unravel_index(reord, (len(tris), 3))[0]
idx = np.cumsum(np.r_[0, counts])
# the sort below slows it down a bit, but is needed for equivalence
neighbor_tri = [np.sort(tri_idx[v1:v2])
for v1, v2 in zip(idx[:-1], idx[1:])]
return neighbor_tri
def _triangle_coords(r, geom, best):
"""Get coordinates of a vertex projected to a triangle"""
r1 = geom['r1'][best]
tri_nn = geom['nn'][best]
r12 = geom['r12'][best]
r13 = geom['r13'][best]
a = geom['a'][best]
b = geom['b'][best]
c = geom['c'][best]
rr = r - r1
z = np.sum(rr * tri_nn)
v1 = np.sum(rr * r12)
v2 = np.sum(rr * r13)
det = a * b - c * c
x = (b * v1 - c * v2) / det
y = (a * v2 - c * v1) / det
return x, y, z
@verbose
def _complete_surface_info(this, do_neighbor_vert=False, verbose=None):
"""Complete surface info"""
# based on mne_source_space_add_geometry_info() in mne_add_geometry_info.c
# Main triangulation [mne_add_triangle_data()]
this['tri_area'] = np.zeros(this['ntri'])
r1 = this['rr'][this['tris'][:, 0], :]
r2 = this['rr'][this['tris'][:, 1], :]
r3 = this['rr'][this['tris'][:, 2], :]
this['tri_cent'] = (r1 + r2 + r3) / 3.0
this['tri_nn'] = fast_cross_3d((r2 - r1), (r3 - r1))
# Triangle normals and areas
size = np.sqrt(np.sum(this['tri_nn'] ** 2, axis=1))
this['tri_area'] = size / 2.0
zidx = np.where(size == 0)[0]
for idx in zidx:
logger.info(' Warning: zero size triangle # %s' % idx)
size[zidx] = 1.0 # prevent ugly divide-by-zero
this['tri_nn'] /= size[:, None]
# Find neighboring triangles, accumulate vertex normals, normalize
logger.info(' Triangle neighbors and vertex normals...')
this['neighbor_tri'] = _triangle_neighbors(this['tris'], this['np'])
this['nn'] = _accumulate_normals(this['tris'], this['tri_nn'], this['np'])
_normalize_vectors(this['nn'])
# Check for topological defects
idx = np.where([len(n) == 0 for n in this['neighbor_tri']])[0]
if len(idx) > 0:
logger.info(' Vertices [%s] do not have any neighboring'
'triangles!' % ','.join([str(ii) for ii in idx]))
idx = np.where([len(n) < 3 for n in this['neighbor_tri']])[0]
if len(idx) > 0:
logger.info(' Vertices [%s] have fewer than three neighboring '
'tris, omitted' % ','.join([str(ii) for ii in idx]))
for k in idx:
this['neighbor_tri'] = np.array([], int)
# Determine the neighboring vertices and fix errors
if do_neighbor_vert is True:
logger.info(' Vertex neighbors...')
this['neighbor_vert'] = [_get_surf_neighbors(this, k)
for k in range(this['np'])]
return this
def _get_surf_neighbors(surf, k):
"""Calculate the surface neighbors based on triangulation"""
verts = surf['tris'][surf['neighbor_tri'][k]]
verts = np.setdiff1d(verts, [k], assume_unique=False)
assert np.all(verts < surf['np'])
nneighbors = len(verts)
nneigh_max = len(surf['neighbor_tri'][k])
if nneighbors > nneigh_max:
raise RuntimeError('Too many neighbors for vertex %d' % k)
elif nneighbors != nneigh_max:
logger.info(' Incorrect number of distinct neighbors for vertex'
' %d (%d instead of %d) [fixed].' % (k, nneighbors,
nneigh_max))
return verts
def _normalize_vectors(rr):
"""Normalize surface vertices"""
size = np.sqrt(np.sum(rr * rr, axis=1))
size[size == 0] = 1.0 # avoid divide-by-zero
rr /= size[:, np.newaxis] # operate in-place
def _compute_nearest(xhs, rr, use_balltree=True, return_dists=False):
"""Find nearest neighbors
Note: The rows in xhs and rr must all be unit-length vectors, otherwise
the result will be incorrect.
Parameters
----------
xhs : array, shape=(n_samples, n_dim)
Points of data set.
rr : array, shape=(n_query, n_dim)
Points to find nearest neighbors for.
use_balltree : bool
Use fast BallTree based search from scikit-learn. If scikit-learn
is not installed it will fall back to the slow brute force search.
return_dists : bool
If True, return associated distances.
Returns
-------
nearest : array, shape=(n_query,)
Index of nearest neighbor in xhs for every point in rr.
distances : array, shape=(n_query,)
The distances. Only returned if return_dists is True.
"""
if use_balltree:
try:
from sklearn.neighbors import BallTree
except ImportError:
logger.info('Nearest-neighbor searches will be significantly '
'faster if scikit-learn is installed.')
use_balltree = False
if xhs.size == 0 or rr.size == 0:
if return_dists:
return np.array([], int), np.array([])
return np.array([], int)
if use_balltree is True:
ball_tree = BallTree(xhs)
if return_dists:
out = ball_tree.query(rr, k=1, return_distance=True)
return out[1][:, 0], out[0][:, 0]
else:
nearest = ball_tree.query(rr, k=1, return_distance=False)[:, 0]
return nearest
else:
from scipy.spatial.distance import cdist
if return_dists:
nearest = list()
dists = list()
for r in rr:
d = cdist(r[np.newaxis, :], xhs)
idx = np.argmin(d)
nearest.append(idx)
dists.append(d[0, idx])
return (np.array(nearest), np.array(dists))
else:
nearest = np.array([np.argmin(cdist(r[np.newaxis, :], xhs))
for r in rr])
return nearest
###############################################################################
# Handle freesurfer
def _fread3(fobj):
"""Docstring"""
b1, b2, b3 = np.fromfile(fobj, ">u1", 3)
return (b1 << 16) + (b2 << 8) + b3
def _fread3_many(fobj, n):
"""Read 3-byte ints from an open binary file object."""
b1, b2, b3 = np.fromfile(fobj, ">u1",
3 * n).reshape(-1, 3).astype(np.int).T
return (b1 << 16) + (b2 << 8) + b3
def read_curvature(filepath):
"""Load in curavature values from the ?h.curv file."""
with open(filepath, "rb") as fobj:
magic = _fread3(fobj)
if magic == 16777215:
vnum = np.fromfile(fobj, ">i4", 3)[0]
curv = np.fromfile(fobj, ">f4", vnum)
else:
vnum = magic
_fread3(fobj)
curv = np.fromfile(fobj, ">i2", vnum) / 100
bin_curv = 1 - np.array(curv != 0, np.int)
return bin_curv
@verbose
def read_surface(fname, verbose=None):
"""Load a Freesurfer surface mesh in triangular format
Parameters
----------
fname : str
The name of the file containing the surface.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
rr : array, shape=(n_vertices, 3)
Coordinate points.
tris : int array, shape=(n_faces, 3)
Triangulation (each line contains indexes for three points which
together form a face).
See Also
--------
write_surface
"""
TRIANGLE_MAGIC = 16777214
QUAD_MAGIC = 16777215
NEW_QUAD_MAGIC = 16777213
with open(fname, "rb", buffering=0) as fobj: # buffering=0 for np bug
magic = _fread3(fobj)
# Quad file or new quad
if magic in (QUAD_MAGIC, NEW_QUAD_MAGIC):
create_stamp = ''
nvert = _fread3(fobj)
nquad = _fread3(fobj)
(fmt, div) = (">i2", 100.) if magic == QUAD_MAGIC else (">f4", 1.)
coords = np.fromfile(fobj, fmt, nvert * 3).astype(np.float) / div
coords = coords.reshape(-1, 3)
quads = _fread3_many(fobj, nquad * 4)
quads = quads.reshape(nquad, 4)
# Face splitting follows
faces = np.zeros((2 * nquad, 3), dtype=np.int)
nface = 0
for quad in quads:
if (quad[0] % 2) == 0:
faces[nface:nface + 2] = [[quad[0], quad[1], quad[3]],
[quad[2], quad[3], quad[1]]]
else:
faces[nface:nface + 2] = [[quad[0], quad[1], quad[2]],
[quad[0], quad[2], quad[3]]]
nface += 2
elif magic == TRIANGLE_MAGIC: # Triangle file
create_stamp = fobj.readline()
fobj.readline()
vnum = np.fromfile(fobj, ">i4", 1)[0]
fnum = np.fromfile(fobj, ">i4", 1)[0]
coords = np.fromfile(fobj, ">f4", vnum * 3).reshape(vnum, 3)
faces = np.fromfile(fobj, ">i4", fnum * 3).reshape(fnum, 3)
else:
raise ValueError("%s does not appear to be a Freesurfer surface"
% fname)
logger.info('Triangle file: %s nvert = %s ntri = %s'
% (create_stamp.strip(), len(coords), len(faces)))
coords = coords.astype(np.float) # XXX: due to mayavi bug on mac 32bits
return coords, faces
@verbose
def _read_surface_geom(fname, patch_stats=True, norm_rr=False, verbose=None):
"""Load the surface as dict, optionally add the geometry information"""
# based on mne_load_surface_geom() in mne_surface_io.c
if isinstance(fname, string_types):
rr, tris = read_surface(fname) # mne_read_triangle_file()
nvert = len(rr)
ntri = len(tris)
s = dict(rr=rr, tris=tris, use_tris=tris, ntri=ntri,
np=nvert)
elif isinstance(fname, dict):
s = fname
else:
raise RuntimeError('fname cannot be understood as str or dict')
if patch_stats is True:
s = _complete_surface_info(s)
if norm_rr is True:
_normalize_vectors(s['rr'])
return s
##############################################################################
# SURFACE CREATION
def _get_ico_surface(grade, patch_stats=False):
"""Return an icosahedral surface of the desired grade"""
# always use verbose=False since users don't need to know we're pulling
# these from a file
ico_file_name = op.join(op.dirname(__file__), 'data',
'icos.fif.gz')
ico = read_bem_surfaces(ico_file_name, patch_stats, s_id=9000 + grade,
verbose=False)
return ico
def _tessellate_sphere_surf(level, rad=1.0):
"""Return a surface structure instead of the details"""
rr, tris = _tessellate_sphere(level)
npt = len(rr) # called "npt" instead of "np" because of numpy...
ntri = len(tris)
nn = rr.copy()
rr *= rad
s = dict(rr=rr, np=npt, tris=tris, use_tris=tris, ntri=ntri, nuse=np,
nn=nn, inuse=np.ones(npt, int))
return s
def _norm_midpt(ai, bi, rr):
a = np.array([rr[aii] for aii in ai])
b = np.array([rr[bii] for bii in bi])
c = (a + b) / 2.
return c / np.sqrt(np.sum(c ** 2, 1))[:, np.newaxis]
def _tessellate_sphere(mylevel):
"""Create a tessellation of a unit sphere"""
# Vertices of a unit octahedron
rr = np.array([[1, 0, 0], [-1, 0, 0], # xplus, xminus
[0, 1, 0], [0, -1, 0], # yplus, yminus
[0, 0, 1], [0, 0, -1]], float) # zplus, zminus
tris = np.array([[0, 4, 2], [2, 4, 1], [1, 4, 3], [3, 4, 0],
[0, 2, 5], [2, 1, 5], [1, 3, 5], [3, 0, 5]], int)
# A unit octahedron
if mylevel < 1:
raise ValueError('# of levels must be >= 1')
# Reverse order of points in each triangle
# for counter-clockwise ordering
tris = tris[:, [2, 1, 0]]
# Subdivide each starting triangle (mylevel - 1) times
for _ in range(1, mylevel):
"""
Subdivide each triangle in the old approximation and normalize
the new points thus generated to lie on the surface of the unit
sphere.
Each input triangle with vertices labelled [0,1,2] as shown
below will be turned into four new triangles:
Make new points
a = (0+2)/2
b = (0+1)/2
c = (1+2)/2
1
/\ Normalize a, b, c
/ \
b/____\c Construct new triangles
/\ /\ [0,b,a]
/ \ / \ [b,1,c]
/____\/____\ [a,b,c]
0 a 2 [a,c,2]
"""
# use new method: first make new points (rr)
a = _norm_midpt(tris[:, 0], tris[:, 2], rr)
b = _norm_midpt(tris[:, 0], tris[:, 1], rr)
c = _norm_midpt(tris[:, 1], tris[:, 2], rr)
lims = np.cumsum([len(rr), len(a), len(b), len(c)])
aidx = np.arange(lims[0], lims[1])
bidx = np.arange(lims[1], lims[2])
cidx = np.arange(lims[2], lims[3])
rr = np.concatenate((rr, a, b, c))
# now that we have our points, make new triangle definitions
tris = np.array((np.c_[tris[:, 0], bidx, aidx],
np.c_[bidx, tris[:, 1], cidx],
np.c_[aidx, bidx, cidx],
np.c_[aidx, cidx, tris[:, 2]]), int).swapaxes(0, 1)
tris = np.reshape(tris, (np.prod(tris.shape[:2]), 3))
# Copy the resulting approximation into standard table
rr_orig = rr
rr = np.empty_like(rr)
nnode = 0
for k, tri in enumerate(tris):
for j in range(3):
coord = rr_orig[tri[j]]
# this is faster than cdist (no need for sqrt)
similarity = np.dot(rr[:nnode], coord)
idx = np.where(similarity > 0.99999)[0]
if len(idx) > 0:
tris[k, j] = idx[0]
else:
rr[nnode] = coord
tris[k, j] = nnode
nnode += 1
rr = rr[:nnode].copy()
return rr, tris
def _create_surf_spacing(surf, hemi, subject, stype, sval, ico_surf,
subjects_dir):
"""Load a surf and use the subdivided icosahedron to get points"""
# Based on load_source_space_surf_spacing() in load_source_space.c
surf = _read_surface_geom(surf)
if stype in ['ico', 'oct']:
# ## from mne_ico_downsample.c ## #
surf_name = op.join(subjects_dir, subject, 'surf', hemi + '.sphere')
logger.info('Loading geometry from %s...' % surf_name)
from_surf = _read_surface_geom(surf_name, norm_rr=True,
patch_stats=False)
if not len(from_surf['rr']) == surf['np']:
raise RuntimeError('Mismatch between number of surface vertices, '
'possible parcellation error?')
_normalize_vectors(ico_surf['rr'])
# Make the maps
logger.info('Mapping %s %s -> %s (%d) ...'
% (hemi, subject, stype, sval))
mmap = _compute_nearest(from_surf['rr'], ico_surf['rr'])
nmap = len(mmap)
surf['inuse'] = np.zeros(surf['np'], int)
for k in range(nmap):
if surf['inuse'][mmap[k]]:
# Try the nearest neighbors
neigh = _get_surf_neighbors(surf, mmap[k])
was = mmap[k]
inds = np.where(np.logical_not(surf['inuse'][neigh]))[0]
if len(inds) == 0:
raise RuntimeError('Could not find neighbor for vertex '
'%d / %d' % (k, nmap))
else:
mmap[k] = neigh[inds[-1]]
logger.info(' Source space vertex moved from %d to %d '
'because of double occupation', was, mmap[k])
elif mmap[k] < 0 or mmap[k] > surf['np']:
raise RuntimeError('Map number out of range (%d), this is '
'probably due to inconsistent surfaces. '
'Parts of the FreeSurfer reconstruction '
'need to be redone.' % mmap[k])
surf['inuse'][mmap[k]] = True
logger.info('Setting up the triangulation for the decimated '
'surface...')
surf['use_tris'] = np.array([mmap[ist] for ist in ico_surf['tris']],
np.int32)
else: # use_all is True
surf['inuse'] = np.ones(surf['np'], int)
surf['use_tris'] = None
if surf['use_tris'] is not None:
surf['nuse_tri'] = len(surf['use_tris'])
else:
surf['nuse_tri'] = 0
surf['nuse'] = np.sum(surf['inuse'])
surf['vertno'] = np.where(surf['inuse'])[0]
# set some final params
inds = np.arange(surf['np'])
sizes = np.sqrt(np.sum(surf['nn'] ** 2, axis=1))
surf['nn'][inds] = surf['nn'][inds] / sizes[:, np.newaxis]
surf['inuse'][sizes <= 0] = False
surf['nuse'] = np.sum(surf['inuse'])
surf['subject_his_id'] = subject
return surf
def write_surface(fname, coords, faces, create_stamp=''):
"""Write a triangular Freesurfer surface mesh
Accepts the same data format as is returned by read_surface().
Parameters
----------
fname : str
File to write.
coords : array, shape=(n_vertices, 3)
Coordinate points.
faces : int array, shape=(n_faces, 3)
Triangulation (each line contains indexes for three points which
together form a face).
create_stamp : str
Comment that is written to the beginning of the file. Can not contain
line breaks.
See Also
--------
read_surface
"""
if len(create_stamp.splitlines()) > 1:
raise ValueError("create_stamp can only contain one line")
with open(fname, 'wb') as fid:
fid.write(pack('>3B', 255, 255, 254))
strs = ['%s\n' % create_stamp, '\n']
strs = [s.encode('utf-8') for s in strs]
fid.writelines(strs)
vnum = len(coords)
fnum = len(faces)
fid.write(pack('>2i', vnum, fnum))
fid.write(np.array(coords, dtype='>f4').tostring())
fid.write(np.array(faces, dtype='>i4').tostring())
###############################################################################
# Decimation
def _decimate_surface(points, triangles, reduction):
"""Aux function"""
if 'DISPLAY' not in os.environ and sys.platform != 'win32':
os.environ['ETS_TOOLKIT'] = 'null'
try:
from tvtk.api import tvtk
from tvtk.common import configure_input
except ImportError:
raise ValueError('This function requires the TVTK package to be '
'installed')
if triangles.max() > len(points) - 1:
raise ValueError('The triangles refer to undefined points. '
'Please check your mesh.')
src = tvtk.PolyData(points=points, polys=triangles)
decimate = tvtk.QuadricDecimation(target_reduction=reduction)
configure_input(decimate, src)
decimate.update()
out = decimate.output
tris = out.polys.to_array()
# n-tuples + interleaved n-next -- reshape trick
return out.points.to_array(), tris.reshape(tris.size / 4, 4)[:, 1:]
def decimate_surface(points, triangles, n_triangles):
""" Decimate surface data
Note. Requires TVTK to be installed for this to function.
Note. If an if an odd target number was requested,
the ``quadric decimation`` algorithm used results in the
next even number of triangles. For example a reduction request to 30001
triangles will result in 30000 triangles.
Parameters
----------
points : ndarray
The surface to be decimated, a 3 x number of points array.
triangles : ndarray
The surface to be decimated, a 3 x number of triangles array.
n_triangles : int
The desired number of triangles.
Returns
-------
points : ndarray
The decimated points.
triangles : ndarray
The decimated triangles.
"""
reduction = 1 - (float(n_triangles) / len(triangles))
return _decimate_surface(points, triangles, reduction)
###############################################################################
# Morph maps
@verbose
def read_morph_map(subject_from, subject_to, subjects_dir=None,
verbose=None):
"""Read morph map
Morph maps can be generated with mne_make_morph_maps. If one isn't
available, it will be generated automatically and saved to the
``subjects_dir/morph_maps`` directory.
Parameters
----------
subject_from : string
Name of the original subject as named in the SUBJECTS_DIR.
subject_to : string
Name of the subject on which to morph as named in the SUBJECTS_DIR.
subjects_dir : string
Path to SUBJECTS_DIR is not set in the environment.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
left_map, right_map : sparse matrix
The morph maps for the 2 hemispheres.
"""
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
# First check for morph-map dir existence
mmap_dir = op.join(subjects_dir, 'morph-maps')
if not op.isdir(mmap_dir):
try:
os.mkdir(mmap_dir)
except Exception:
warn('Could not find or make morph map directory "%s"' % mmap_dir)
# Does the file exist
fname = op.join(mmap_dir, '%s-%s-morph.fif' % (subject_from, subject_to))
if not op.exists(fname):
fname = op.join(mmap_dir, '%s-%s-morph.fif'
% (subject_to, subject_from))
if not op.exists(fname):
warn('Morph map "%s" does not exist, creating it and saving it to '
'disk (this may take a few minutes)' % fname)
logger.info('Creating morph map %s -> %s'
% (subject_from, subject_to))
mmap_1 = _make_morph_map(subject_from, subject_to, subjects_dir)
logger.info('Creating morph map %s -> %s'
% (subject_to, subject_from))
mmap_2 = _make_morph_map(subject_to, subject_from, subjects_dir)
try:
_write_morph_map(fname, subject_from, subject_to,
mmap_1, mmap_2)
except Exception as exp:
warn('Could not write morph-map file "%s" (error: %s)'
% (fname, exp))
return mmap_1
f, tree, _ = fiff_open(fname)
with f as fid:
# Locate all maps
maps = dir_tree_find(tree, FIFF.FIFFB_MNE_MORPH_MAP)
if len(maps) == 0:
raise ValueError('Morphing map data not found')
# Find the correct ones
left_map = None
right_map = None
for m in maps:
tag = find_tag(fid, m, FIFF.FIFF_MNE_MORPH_MAP_FROM)
if tag.data == subject_from:
tag = find_tag(fid, m, FIFF.FIFF_MNE_MORPH_MAP_TO)
if tag.data == subject_to:
# Names match: which hemishere is this?
tag = find_tag(fid, m, FIFF.FIFF_MNE_HEMI)
if tag.data == FIFF.FIFFV_MNE_SURF_LEFT_HEMI:
tag = find_tag(fid, m, FIFF.FIFF_MNE_MORPH_MAP)
left_map = tag.data
logger.info(' Left-hemisphere map read.')
elif tag.data == FIFF.FIFFV_MNE_SURF_RIGHT_HEMI:
tag = find_tag(fid, m, FIFF.FIFF_MNE_MORPH_MAP)
right_map = tag.data
logger.info(' Right-hemisphere map read.')
if left_map is None or right_map is None:
raise ValueError('Could not find both hemispheres in %s' % fname)
return left_map, right_map
def _write_morph_map(fname, subject_from, subject_to, mmap_1, mmap_2):
"""Write a morph map to disk"""
fid = start_file(fname)
assert len(mmap_1) == 2
assert len(mmap_2) == 2
hemis = [FIFF.FIFFV_MNE_SURF_LEFT_HEMI, FIFF.FIFFV_MNE_SURF_RIGHT_HEMI]
for m, hemi in zip(mmap_1, hemis):
start_block(fid, FIFF.FIFFB_MNE_MORPH_MAP)
write_string(fid, FIFF.FIFF_MNE_MORPH_MAP_FROM, subject_from)
write_string(fid, FIFF.FIFF_MNE_MORPH_MAP_TO, subject_to)
write_int(fid, FIFF.FIFF_MNE_HEMI, hemi)
write_float_sparse_rcs(fid, FIFF.FIFF_MNE_MORPH_MAP, m)
end_block(fid, FIFF.FIFFB_MNE_MORPH_MAP)
for m, hemi in zip(mmap_2, hemis):
start_block(fid, FIFF.FIFFB_MNE_MORPH_MAP)
write_string(fid, FIFF.FIFF_MNE_MORPH_MAP_FROM, subject_to)
write_string(fid, FIFF.FIFF_MNE_MORPH_MAP_TO, subject_from)
write_int(fid, FIFF.FIFF_MNE_HEMI, hemi)
write_float_sparse_rcs(fid, FIFF.FIFF_MNE_MORPH_MAP, m)
end_block(fid, FIFF.FIFFB_MNE_MORPH_MAP)
end_file(fid)
def _get_tri_dist(p, q, p0, q0, a, b, c, dist):
"""Auxiliary function for getting the distance to a triangle edge"""
return np.sqrt((p - p0) * (p - p0) * a +
(q - q0) * (q - q0) * b +
(p - p0) * (q - q0) * c +
dist * dist)
def _get_tri_supp_geom(tris, rr):
"""Create supplementary geometry information using tris and rrs"""
r1 = rr[tris[:, 0], :]
r12 = rr[tris[:, 1], :] - r1
r13 = rr[tris[:, 2], :] - r1
r1213 = np.array([r12, r13]).swapaxes(0, 1)
a = np.sum(r12 * r12, axis=1)
b = np.sum(r13 * r13, axis=1)
c = np.sum(r12 * r13, axis=1)
mat = np.rollaxis(np.array([[b, -c], [-c, a]]), 2)
mat /= (a * b - c * c)[:, np.newaxis, np.newaxis]
nn = fast_cross_3d(r12, r13)
_normalize_vectors(nn)
return dict(r1=r1, r12=r12, r13=r13, r1213=r1213,
a=a, b=b, c=c, mat=mat, nn=nn)
@verbose
def _make_morph_map(subject_from, subject_to, subjects_dir=None):
"""Construct morph map from one subject to another
Note that this is close, but not exactly like the C version.
For example, parts are more accurate due to double precision,
so expect some small morph-map differences!
Note: This seems easily parallelizable, but the overhead
of pickling all the data structures makes it less efficient
than just running on a single core :(
"""
subjects_dir = get_subjects_dir(subjects_dir)
morph_maps = list()
# add speedy short-circuit for self-maps
if subject_from == subject_to:
for hemi in ['lh', 'rh']:
fname = op.join(subjects_dir, subject_from, 'surf',
'%s.sphere.reg' % hemi)
from_pts = read_surface(fname, verbose=False)[0]
n_pts = len(from_pts)
morph_maps.append(speye(n_pts, n_pts, format='csr'))
return morph_maps
for hemi in ['lh', 'rh']:
# load surfaces and normalize points to be on unit sphere
fname = op.join(subjects_dir, subject_from, 'surf',
'%s.sphere.reg' % hemi)
from_pts, from_tris = read_surface(fname, verbose=False)
n_from_pts = len(from_pts)
_normalize_vectors(from_pts)
tri_geom = _get_tri_supp_geom(from_tris, from_pts)
fname = op.join(subjects_dir, subject_to, 'surf',
'%s.sphere.reg' % hemi)
to_pts = read_surface(fname, verbose=False)[0]
n_to_pts = len(to_pts)
_normalize_vectors(to_pts)
# from surface: get nearest neighbors, find triangles for each vertex
nn_pts_idx = _compute_nearest(from_pts, to_pts)
from_pt_tris = _triangle_neighbors(from_tris, len(from_pts))
from_pt_tris = [from_pt_tris[pt_idx] for pt_idx in nn_pts_idx]
# find triangle in which point lies and assoc. weights
nn_tri_inds = []
nn_tris_weights = []
for pt_tris, to_pt in zip(from_pt_tris, to_pts):
p, q, idx, dist = _find_nearest_tri_pt(pt_tris, to_pt, tri_geom)
nn_tri_inds.append(idx)
nn_tris_weights.extend([1. - (p + q), p, q])
nn_tris = from_tris[nn_tri_inds]
row_ind = np.repeat(np.arange(n_to_pts), 3)
this_map = csr_matrix((nn_tris_weights, (row_ind, nn_tris.ravel())),
shape=(n_to_pts, n_from_pts))
morph_maps.append(this_map)
return morph_maps
def _find_nearest_tri_pt(pt_tris, to_pt, tri_geom, run_all=False):
"""Find nearest point mapping to a set of triangles
If run_all is False, if the point lies within a triangle, it stops.
If run_all is True, edges of other triangles are checked in case
those (somehow) are closer.
"""
# The following dense code is equivalent to the following:
# rr = r1[pt_tris] - to_pts[ii]
# v1s = np.sum(rr * r12[pt_tris], axis=1)
# v2s = np.sum(rr * r13[pt_tris], axis=1)
# aas = a[pt_tris]
# bbs = b[pt_tris]
# ccs = c[pt_tris]
# dets = aas * bbs - ccs * ccs
# pp = (bbs * v1s - ccs * v2s) / dets
# qq = (aas * v2s - ccs * v1s) / dets
# pqs = np.array(pp, qq)
# This einsum is equivalent to doing:
# pqs = np.array([np.dot(x, y) for x, y in zip(r1213, r1-to_pt)])
r1 = tri_geom['r1'][pt_tris]
rrs = to_pt - r1
tri_nn = tri_geom['nn'][pt_tris]
vect = np.einsum('ijk,ik->ij', tri_geom['r1213'][pt_tris], rrs)
mats = tri_geom['mat'][pt_tris]
# This einsum is equivalent to doing:
# pqs = np.array([np.dot(m, v) for m, v in zip(mats, vect)]).T
pqs = np.einsum('ijk,ik->ji', mats, vect)
found = False
dists = np.sum(rrs * tri_nn, axis=1)
# There can be multiple (sadness), find closest
idx = np.where(np.all(pqs >= 0., axis=0))[0]
idx = idx[np.where(np.all(pqs[:, idx] <= 1., axis=0))[0]]
idx = idx[np.where(np.sum(pqs[:, idx], axis=0) < 1.)[0]]
dist = np.inf
if len(idx) > 0:
found = True
pt = idx[np.argmin(np.abs(dists[idx]))]
p, q = pqs[:, pt]
dist = dists[pt]
# re-reference back to original numbers
pt = pt_tris[pt]
if found is False or run_all is True:
# don't include ones that we might have found before
s = np.setdiff1d(np.arange(len(pt_tris)), idx) # ones to check sides
# Tough: must investigate the sides
pp, qq, ptt, distt = _nearest_tri_edge(pt_tris[s], to_pt, pqs[:, s],
dists[s], tri_geom)
if np.abs(distt) < np.abs(dist):
p, q, pt, dist = pp, qq, ptt, distt
return p, q, pt, dist
def _nearest_tri_edge(pt_tris, to_pt, pqs, dist, tri_geom):
"""Get nearest location from a point to the edge of a set of triangles"""
# We might do something intelligent here. However, for now
# it is ok to do it in the hard way
aa = tri_geom['a'][pt_tris]
bb = tri_geom['b'][pt_tris]
cc = tri_geom['c'][pt_tris]
pp = pqs[0]
qq = pqs[1]
# Find the nearest point from a triangle:
# Side 1 -> 2
p0 = np.minimum(np.maximum(pp + 0.5 * (qq * cc) / aa,
0.0), 1.0)
q0 = np.zeros_like(p0)
# Side 2 -> 3
t1 = (0.5 * ((2.0 * aa - cc) * (1.0 - pp) +
(2.0 * bb - cc) * qq) / (aa + bb - cc))
t1 = np.minimum(np.maximum(t1, 0.0), 1.0)
p1 = 1.0 - t1
q1 = t1
# Side 1 -> 3
q2 = np.minimum(np.maximum(qq + 0.5 * (pp * cc) / bb, 0.0), 1.0)
p2 = np.zeros_like(q2)
# figure out which one had the lowest distance
dist0 = _get_tri_dist(pp, qq, p0, q0, aa, bb, cc, dist)
dist1 = _get_tri_dist(pp, qq, p1, q1, aa, bb, cc, dist)
dist2 = _get_tri_dist(pp, qq, p2, q2, aa, bb, cc, dist)
pp = np.r_[p0, p1, p2]
qq = np.r_[q0, q1, q2]
dists = np.r_[dist0, dist1, dist2]
ii = np.argmin(np.abs(dists))
p, q, pt, dist = pp[ii], qq[ii], pt_tris[ii % len(pt_tris)], dists[ii]
return p, q, pt, dist
def mesh_edges(tris):
"""Returns sparse matrix with edges as an adjacency matrix
Parameters
----------
tris : array of shape [n_triangles x 3]
The triangles.
Returns
-------
edges : sparse matrix
The adjacency matrix.
"""
if np.max(tris) > len(np.unique(tris)):
raise ValueError('Cannot compute connectivity on a selection of '
'triangles.')
npoints = np.max(tris) + 1
ones_ntris = np.ones(3 * len(tris))
a, b, c = tris.T
x = np.concatenate((a, b, c))
y = np.concatenate((b, c, a))
edges = coo_matrix((ones_ntris, (x, y)), shape=(npoints, npoints))
edges = edges.tocsr()
edges = edges + edges.T
return edges
def mesh_dist(tris, vert):
"""Compute adjacency matrix weighted by distances
It generates an adjacency matrix where the entries are the distances
between neighboring vertices.
Parameters
----------
tris : array (n_tris x 3)
Mesh triangulation
vert : array (n_vert x 3)
Vertex locations
Returns
-------
dist_matrix : scipy.sparse.csr_matrix
Sparse matrix with distances between adjacent vertices
"""
edges = mesh_edges(tris).tocoo()
# Euclidean distances between neighboring vertices
dist = np.sqrt(np.sum((vert[edges.row, :] - vert[edges.col, :]) ** 2,
axis=1))
dist_matrix = csr_matrix((dist, (edges.row, edges.col)), shape=edges.shape)
return dist_matrix
| |
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import hashlib
import json
import uuid
from keystoneclient.contrib.ec2 import utils as ec2_utils
from keystone import config
from keystone import exception
from keystone.tests import test_v3
CONF = config.CONF
class CredentialBaseTestCase(test_v3.RestfulTestCase):
def _create_dict_blob_credential(self):
blob = {"access": uuid.uuid4().hex,
"secret": uuid.uuid4().hex}
credential_id = hashlib.sha256(blob['access']).hexdigest()
credential = self.new_credential_ref(
user_id=self.user['id'],
project_id=self.project_id)
credential['id'] = credential_id
# Store the blob as a dict *not* JSON ref bug #1259584
# This means we can test the dict->json workaround, added
# as part of the bugfix for backwards compatibility works.
credential['blob'] = blob
credential['type'] = 'ec2'
# Create direct via the DB API to avoid validation failure
self.credential_api.create_credential(
credential_id,
credential)
expected_blob = json.dumps(blob)
return expected_blob, credential_id
class CredentialTestCase(CredentialBaseTestCase):
"""Test credential CRUD."""
def setUp(self):
super(CredentialTestCase, self).setUp()
self.credential_id = uuid.uuid4().hex
self.credential = self.new_credential_ref(
user_id=self.user['id'],
project_id=self.project_id)
self.credential['id'] = self.credential_id
self.credential_api.create_credential(
self.credential_id,
self.credential)
def test_credential_api_delete_credentials_for_project(self):
self.credential_api.delete_credentials_for_project(self.project_id)
# Test that the credential that we created in .setUp no longer exists
# once we delete all credentials for self.project_id
self.assertRaises(exception.CredentialNotFound,
self.credential_api.get_credential,
credential_id=self.credential_id)
def test_credential_api_delete_credentials_for_user(self):
self.credential_api.delete_credentials_for_user(self.user_id)
# Test that the credential that we created in .setUp no longer exists
# once we delete all credentials for self.user_id
self.assertRaises(exception.CredentialNotFound,
self.credential_api.get_credential,
credential_id=self.credential_id)
def test_list_credentials(self):
"""Call ``GET /credentials``."""
r = self.get('/credentials')
self.assertValidCredentialListResponse(r, ref=self.credential)
def test_list_credentials_xml(self):
"""Call ``GET /credentials`` (xml data)."""
r = self.get('/credentials', content_type='xml')
self.assertValidCredentialListResponse(r, ref=self.credential)
def test_create_credential(self):
"""Call ``POST /credentials``."""
ref = self.new_credential_ref(user_id=self.user['id'])
r = self.post(
'/credentials',
body={'credential': ref})
self.assertValidCredentialResponse(r, ref)
def test_get_credential(self):
"""Call ``GET /credentials/{credential_id}``."""
r = self.get(
'/credentials/%(credential_id)s' % {
'credential_id': self.credential_id})
self.assertValidCredentialResponse(r, self.credential)
def test_update_credential(self):
"""Call ``PATCH /credentials/{credential_id}``."""
ref = self.new_credential_ref(
user_id=self.user['id'],
project_id=self.project_id)
del ref['id']
r = self.patch(
'/credentials/%(credential_id)s' % {
'credential_id': self.credential_id},
body={'credential': ref})
self.assertValidCredentialResponse(r, ref)
def test_delete_credential(self):
"""Call ``DELETE /credentials/{credential_id}``."""
self.delete(
'/credentials/%(credential_id)s' % {
'credential_id': self.credential_id})
def test_create_ec2_credential(self):
"""Call ``POST /credentials`` for creating ec2 credential."""
ref = self.new_credential_ref(user_id=self.user['id'])
blob = {"access": uuid.uuid4().hex,
"secret": uuid.uuid4().hex}
ref['blob'] = json.dumps(blob)
ref['type'] = 'ec2'
r = self.post(
'/credentials',
body={'credential': ref})
self.assertValidCredentialResponse(r, ref)
# Assert credential id is same as hash of access key id for
# ec2 credentials
self.assertEqual(r.result['credential']['id'],
hashlib.sha256(blob['access']).hexdigest())
# Create second ec2 credential with the same access key id and check
# for conflict.
self.post(
'/credentials',
body={'credential': ref}, expected_status=409)
def test_get_ec2_dict_blob(self):
"""Ensure non-JSON blob data is correctly converted."""
expected_blob, credential_id = self._create_dict_blob_credential()
r = self.get(
'/credentials/%(credential_id)s' % {
'credential_id': credential_id})
self.assertEqual(expected_blob, r.result['credential']['blob'])
def test_list_ec2_dict_blob(self):
"""Ensure non-JSON blob data is correctly converted."""
expected_blob, credential_id = self._create_dict_blob_credential()
list_r = self.get('/credentials')
list_creds = list_r.result['credentials']
list_ids = [r['id'] for r in list_creds]
self.assertIn(credential_id, list_ids)
for r in list_creds:
if r['id'] == credential_id:
self.assertEqual(expected_blob, r['blob'])
def test_create_non_ec2_credential(self):
"""Call ``POST /credentials`` for creating non-ec2 credential."""
ref = self.new_credential_ref(user_id=self.user['id'])
blob = {"access": uuid.uuid4().hex,
"secret": uuid.uuid4().hex}
ref['blob'] = json.dumps(blob)
r = self.post(
'/credentials',
body={'credential': ref})
self.assertValidCredentialResponse(r, ref)
# Assert credential id is not same as hash of access key id for
# non-ec2 credentials
self.assertNotEqual(r.result['credential']['id'],
hashlib.sha256(blob['access']).hexdigest())
def test_create_ec2_credential_with_invalid_blob(self):
"""Call ``POST /credentials`` for creating ec2
credential with invalid blob.
"""
ref = self.new_credential_ref(user_id=self.user['id'])
ref['blob'] = '{"abc":"def"d}'
ref['type'] = 'ec2'
# Assert 400 status for bad request containing invalid
# blob
response = self.post(
'/credentials',
body={'credential': ref}, expected_status=400)
self.assertValidErrorResponse(response)
def test_create_credential_with_admin_token(self):
# Make sure we can create credential with the static admin token
ref = self.new_credential_ref(user_id=self.user['id'])
r = self.post(
'/credentials',
body={'credential': ref},
token=CONF.admin_token)
self.assertValidCredentialResponse(r, ref)
class TestCredentialTrustScoped(test_v3.RestfulTestCase):
"""Test credential with trust scoped token."""
def setUp(self):
super(TestCredentialTrustScoped, self).setUp()
self.trustee_user = self.new_user_ref(domain_id=self.domain_id)
password = self.trustee_user['password']
self.trustee_user = self.identity_api.create_user(self.trustee_user)
self.trustee_user['password'] = password
self.trustee_user_id = self.trustee_user['id']
def config_overrides(self):
super(TestCredentialTrustScoped, self).config_overrides()
self.config_fixture.config(group='trust', enabled=True)
def test_trust_scoped_ec2_credential(self):
"""Call ``POST /credentials`` for creating ec2 credential."""
# Create the trust
ref = self.new_trust_ref(
trustor_user_id=self.user_id,
trustee_user_id=self.trustee_user_id,
project_id=self.project_id,
impersonation=True,
expires=dict(minutes=1),
role_ids=[self.role_id])
del ref['id']
r = self.post('/OS-TRUST/trusts', body={'trust': ref})
trust = self.assertValidTrustResponse(r)
# Get a trust scoped token
auth_data = self.build_authentication_request(
user_id=self.trustee_user['id'],
password=self.trustee_user['password'],
trust_id=trust['id'])
r = self.post('/auth/tokens', body=auth_data)
self.assertValidProjectTrustScopedTokenResponse(r, self.user)
trust_id = r.result['token']['OS-TRUST:trust']['id']
token_id = r.headers.get('X-Subject-Token')
# Create the credential with the trust scoped token
ref = self.new_credential_ref(user_id=self.user['id'])
blob = {"access": uuid.uuid4().hex,
"secret": uuid.uuid4().hex}
ref['blob'] = json.dumps(blob)
ref['type'] = 'ec2'
r = self.post(
'/credentials',
body={'credential': ref},
token=token_id)
# We expect the response blob to contain the trust_id
ret_ref = ref.copy()
ret_blob = blob.copy()
ret_blob['trust_id'] = trust_id
ret_ref['blob'] = json.dumps(ret_blob)
self.assertValidCredentialResponse(r, ref=ret_ref)
# Assert credential id is same as hash of access key id for
# ec2 credentials
self.assertEqual(r.result['credential']['id'],
hashlib.sha256(blob['access']).hexdigest())
# Create second ec2 credential with the same access key id and check
# for conflict.
self.post(
'/credentials',
body={'credential': ref},
token=token_id,
expected_status=409)
class TestCredentialEc2(CredentialBaseTestCase):
"""Test v3 credential compatibility with ec2tokens."""
def setUp(self):
super(TestCredentialEc2, self).setUp()
def _validate_signature(self, access, secret):
"""Test signature validation with the access/secret provided."""
signer = ec2_utils.Ec2Signer(secret)
params = {'SignatureMethod': 'HmacSHA256',
'SignatureVersion': '2',
'AWSAccessKeyId': access}
request = {'host': 'foo',
'verb': 'GET',
'path': '/bar',
'params': params}
signature = signer.generate(request)
# Now make a request to validate the signed dummy request via the
# ec2tokens API. This proves the v3 ec2 credentials actually work.
sig_ref = {'access': access,
'signature': signature,
'host': 'foo',
'verb': 'GET',
'path': '/bar',
'params': params}
r = self.post(
'/ec2tokens',
body={'ec2Credentials': sig_ref},
expected_status=200)
self.assertValidTokenResponse(r)
def test_ec2_credential_signature_validate(self):
"""Test signature validation with a v3 ec2 credential."""
ref = self.new_credential_ref(
user_id=self.user['id'],
project_id=self.project_id)
blob = {"access": uuid.uuid4().hex,
"secret": uuid.uuid4().hex}
ref['blob'] = json.dumps(blob)
ref['type'] = 'ec2'
r = self.post(
'/credentials',
body={'credential': ref})
self.assertValidCredentialResponse(r, ref)
# Assert credential id is same as hash of access key id
self.assertEqual(r.result['credential']['id'],
hashlib.sha256(blob['access']).hexdigest())
cred_blob = json.loads(r.result['credential']['blob'])
self.assertEqual(blob, cred_blob)
self._validate_signature(access=cred_blob['access'],
secret=cred_blob['secret'])
def test_ec2_credential_signature_validate_legacy(self):
"""Test signature validation with a legacy v3 ec2 credential."""
cred_json, credential_id = self._create_dict_blob_credential()
cred_blob = json.loads(cred_json)
self._validate_signature(access=cred_blob['access'],
secret=cred_blob['secret'])
def _get_ec2_cred_uri(self):
return '/users/%s/credentials/OS-EC2' % self.user_id
def _get_ec2_cred(self):
uri = self._get_ec2_cred_uri()
r = self.post(uri, body={'tenant_id': self.project_id})
return r.result['credential']
def test_ec2_create_credential(self):
"""Test ec2 credential creation."""
ec2_cred = self._get_ec2_cred()
self.assertEqual(self.user_id, ec2_cred['user_id'])
self.assertEqual(self.project_id, ec2_cred['tenant_id'])
self.assertIsNone(ec2_cred['trust_id'])
self._validate_signature(access=ec2_cred['access'],
secret=ec2_cred['secret'])
return ec2_cred
def test_ec2_get_credential(self):
ec2_cred = self._get_ec2_cred()
uri = '/'.join([self._get_ec2_cred_uri(), ec2_cred['access']])
r = self.get(uri)
self.assertDictEqual(ec2_cred, r.result['credential'])
def test_ec2_list_credentials(self):
"""Test ec2 credential listing."""
self._get_ec2_cred_uri()
uri = self._get_ec2_cred_uri()
r = self.get(uri)
cred_list = r.result
self.assertEqual(1, len(cred_list))
def test_ec2_delete_credential(self):
"""Test ec2 credential deletion."""
ec2_cred = self._get_ec2_cred()
uri = '/'.join([self._get_ec2_cred_uri(), ec2_cred['access']])
cred_from_credential_api = self.credential_api.list_credentials(
user_id=self.user_id)
self.assertEqual(1, len(cred_from_credential_api))
self.delete(uri)
self.assertRaises(exception.CredentialNotFound,
self.credential_api.get_credential,
cred_from_credential_api[0]['id'])
| |
# Copyright (c) 2014 Adafruit Industries
# Author: Tony DiCola
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import HT16K33
# Digit value to bitmask mapping:
DIGIT_VALUES = {
' ': 0x00,
'-': 0x40,
'0': 0x3F,
'1': 0x06,
'2': 0x5B,
'3': 0x4F,
'4': 0x66,
'5': 0x6D,
'6': 0x7D,
'7': 0x07,
'8': 0x7F,
'9': 0x6F,
'A': 0x77,
'B': 0x7C,
'C': 0x39,
'D': 0x5E,
'E': 0x79,
'F': 0x71
}
IDIGIT_VALUES = {
' ': 0x00,
'-': 0x40,
'0': 0x3F,
'1': 0x30,
'2': 0x5B,
'3': 0x79,
'4': 0x74,
'5': 0x6D,
'6': 0x6F,
'7': 0x38,
'8': 0x7F,
'9': 0x7D,
'A': 0x7E,
'B': 0x67,
'C': 0x0F,
'D': 0x73,
'E': 0x4F,
'F': 0x4E
}
class SevenSegment(HT16K33.HT16K33):
"""Seven segment LED backpack display."""
def __init__(self, invert=False, **kwargs):
"""Initialize display. All arguments will be passed to the HT16K33 class
initializer, including optional I2C address and bus number parameters.
"""
super(SevenSegment, self).__init__(**kwargs)
self.invert = invert
def set_invert(self, _invert):
"""Set whether the display is upside-down or not.
"""
self.invert = _invert
def set_digit_raw(self, pos, bitmask):
"""Set digit at position to raw bitmask value. Position should be a value
of 0 to 3 with 0 being the left most digit on the display."""
if pos < 0 or pos > 3:
# Ignore out of bounds digits.
return
# Jump past the colon at position 2 by adding a conditional offset.
offset = 0 if pos < 2 else 1
# Calculate the correct position depending on orientation
if self.invert:
pos = 4-(pos+offset)
else:
pos = pos+offset
# Set the digit bitmask value at the appropriate position.
self.buffer[pos*2] = bitmask & 0xFF
def set_decimal(self, pos, decimal):
"""Turn decimal point on or off at provided position. Position should be
a value 0 to 3 with 0 being the left most digit on the display. Decimal
should be True to turn on the decimal point and False to turn it off.
"""
if pos < 0 or pos > 3:
# Ignore out of bounds digits.
return
# Jump past the colon at position 2 by adding a conditional offset.
offset = 0 if pos < 2 else 1
# Calculate the correct position depending on orientation
if self.invert:
pos = 4-(pos+offset)
else:
pos = pos+offset
# Set bit 7 (decimal point) based on provided value.
if decimal:
self.buffer[pos*2] |= (1 << 7)
else:
self.buffer[pos*2] &= ~(1 << 7)
def set_digit(self, pos, digit, decimal=False):
"""Set digit at position to provided value. Position should be a value
of 0 to 3 with 0 being the left most digit on the display. Digit should
be a number 0-9, character A-F, space (all LEDs off), or dash (-).
"""
if self.invert:
self.set_digit_raw(pos, IDIGIT_VALUES.get(str(digit).upper(), 0x00))
else:
self.set_digit_raw(pos, DIGIT_VALUES.get(str(digit).upper(), 0x00))
if decimal:
self.set_decimal(pos, True)
def set_colon(self, show_colon):
"""Turn the colon on with show colon True, or off with show colon False."""
if show_colon:
self.buffer[4] |= 0x02
else:
self.buffer[4] &= (~0x02) & 0xFF
def set_left_colon(self, show_colon):
"""Turn the left colon on with show color True, or off with show colon
False. Only the large 1.2" 7-segment display has a left colon.
"""
if show_colon:
self.buffer[4] |= 0x04
self.buffer[4] |= 0x08
else:
self.buffer[4] &= (~0x04) & 0xFF
self.buffer[4] &= (~0x08) & 0xFF
def set_fixed_decimal(self, show_decimal):
"""Turn on/off the single fixed decimal point on the large 1.2" 7-segment
display. Set show_decimal to True to turn on and False to turn off.
Only the large 1.2" 7-segment display has this decimal point (in the
upper right in the normal orientation of the display).
"""
if show_decimal:
self.buffer[4] |= 0x10
else:
self.buffer[4] &= (~0x10) & 0xFF
def print_number_str(self, value, justify_right=True):
"""Print a 4 character long string of numeric values to the display.
Characters in the string should be any supported character by set_digit,
or a decimal point. Decimal point characters will be associated with
the previous character.
"""
# Calculate length of value without decimals.
length = len(value.translate(None, '.'))
# Error if value without decimals is longer than 4 characters.
if length > 4:
self.print_number_str('----')
return
# Calculcate starting position of digits based on justification.
pos = (4-length) if justify_right else 0
# Go through each character and print it on the display.
for i, ch in enumerate(value):
if ch == '.':
# Print decimal points on the previous digit.
self.set_decimal(pos-1, True)
else:
self.set_digit(pos, ch)
pos += 1
def print_float(self, value, decimal_digits=2, justify_right=True):
"""Print a numeric value to the display. If value is negative
it will be printed with a leading minus sign. Decimal digits is the
desired number of digits after the decimal point.
"""
format_string = '{{0:0.{0}F}}'.format(decimal_digits)
self.print_number_str(format_string.format(value), justify_right)
def print_hex(self, value, justify_right=True):
"""Print a numeric value in hexadecimal. Value should be from 0 to FFFF.
"""
if value < 0 or value > 0xFFFF:
# Ignore out of range values.
return
self.print_number_str('{0:X}'.format(value), justify_right)
| |
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common functions for task creation for test cases."""
from clusterfuzz._internal.base import bisection
from clusterfuzz._internal.base import tasks
from clusterfuzz._internal.base import utils
from clusterfuzz._internal.build_management import build_manager
from clusterfuzz._internal.datastore import data_handler
from clusterfuzz._internal.datastore import data_types
from clusterfuzz._internal.system import environment
def mark_unreproducible_if_flaky(testcase, potentially_flaky):
"""Check to see if a test case appears to be flaky."""
task_name = environment.get_value('TASK_NAME')
# If this run does not suggest that we are flaky, clear the flag and assume
# that we are reproducible.
if not potentially_flaky:
testcase.set_metadata('potentially_flaky', False)
return
# If we have not been marked as potentially flaky in the past, don't mark
# mark the test case as unreproducible yet. It is now potentially flaky.
if not testcase.get_metadata('potentially_flaky'):
testcase.set_metadata('potentially_flaky', True)
# In this case, the current task will usually be in a state where it cannot
# be completed. Recreate it.
tasks.add_task(task_name, testcase.key.id(), testcase.job_type)
return
# At this point, this test case has been flagged as potentially flaky twice.
# It should be marked as unreproducible. Mark it as unreproducible, and set
# fields that cannot be populated accordingly.
if task_name == 'minimize' and not testcase.minimized_keys:
testcase.minimized_keys = 'NA'
if task_name in ['minimize', 'impact']:
testcase.set_impacts_as_na()
if task_name in ['minimize', 'regression']:
testcase.regression = 'NA'
if task_name in ['minimize', 'progression']:
testcase.fixed = 'NA'
testcase.one_time_crasher_flag = True
data_handler.update_testcase_comment(testcase, data_types.TaskState.ERROR,
'Testcase appears to be flaky')
# Issue update to flip reproducibility label is done in App Engine cleanup
# cron. This avoids calling the issue tracker apis from GCE.
# For unreproducible testcases, it is still beneficial to get component
# information from blame task.
create_blame_task_if_needed(testcase)
# Let bisection service know about flakiness.
bisection.request_bisection(testcase)
def create_blame_task_if_needed(testcase):
"""Creates a blame task if needed."""
# Blame doesn't work for non-chromium projects.
if not utils.is_chromium():
return
# Blame is only applicable to chromium project, otherwise bail out.
if testcase.project_name != 'chromium':
return
# We cannot run blame job for custom binaries since we don't have any context
# on the crash revision and regression range.
if build_manager.is_custom_binary():
return
# Don't send duplicate issues to Predator. This causes issues with metrics
# tracking and wastes cycles.
if testcase.status == 'Duplicate':
return
create_task = False
if testcase.one_time_crasher_flag:
# For unreproducible testcases, it is still beneficial to get component
# information from blame task.
create_task = True
else:
# Reproducible testcase.
# Step 1: Check if the regression task finished. If not, bail out.
if not testcase.regression:
return
# Step 2: Check if the symbolize task is applicable and finished. If not,
# bail out.
if build_manager.has_symbolized_builds() and not testcase.symbolized:
return
create_task = True
if create_task:
tasks.add_task('blame', testcase.key.id(), testcase.job_type)
def create_impact_task_if_needed(testcase):
"""Creates an impact task if needed."""
# Impact doesn't make sense for non-chromium projects.
if not utils.is_chromium():
return
# Impact is only applicable to chromium project, otherwise bail out.
if testcase.project_name != 'chromium':
return
# We cannot run impact job for custom binaries since we don't have any
# archived production builds for these.
if build_manager.is_custom_binary():
return
tasks.add_task('impact', testcase.key.id(), testcase.job_type)
def create_minimize_task_if_needed(testcase):
"""Creates a minimize task if needed."""
tasks.add_task('minimize', testcase.key.id(), testcase.job_type)
def create_regression_task_if_needed(testcase):
"""Creates a regression task if needed."""
# We cannot run regression job for custom binaries since we don't have any
# archived builds for previous revisions. We only track the last uploaded
# custom build.
if build_manager.is_custom_binary():
return
tasks.add_task('regression', testcase.key.id(), testcase.job_type)
def create_variant_tasks_if_needed(testcase):
"""Creates a variant task if needed."""
if testcase.duplicate_of:
# If another testcase exists with same params, no need to spend cycles on
# calculating variants again.
return
testcase_id = testcase.key.id()
project = data_handler.get_project_name(testcase.job_type)
jobs = data_types.Job.query(data_types.Job.project == project)
for job in jobs:
# The variant needs to be tested in a different job type than us.
job_type = job.name
if testcase.job_type == job_type:
continue
# Don't try to reproduce engine fuzzer testcase with blackbox fuzzer
# testcases and vice versa.
if (environment.is_engine_fuzzer_job(testcase.job_type) !=
environment.is_engine_fuzzer_job(job_type)):
continue
# Skip experimental jobs.
job_environment = job.get_environment()
if utils.string_is_true(job_environment.get('EXPERIMENTAL')):
continue
# Skip jobs for which variant tasks are disabled.
if utils.string_is_true(job_environment.get('DISABLE_VARIANT')):
continue
queue = tasks.queue_for_platform(job.platform)
tasks.add_task('variant', testcase_id, job_type, queue)
variant = data_handler.get_testcase_variant(testcase_id, job_type)
variant.status = data_types.TestcaseVariantStatus.PENDING
variant.put()
def create_symbolize_task_if_needed(testcase):
"""Creates a symbolize task if needed."""
# We cannot run symbolize job for custom binaries since we don't have any
# archived symbolized builds.
if build_manager.is_custom_binary():
return
# Make sure we have atleast one symbolized url pattern defined in job type.
if not build_manager.has_symbolized_builds():
return
tasks.add_task('symbolize', testcase.key.id(), testcase.job_type)
def create_tasks(testcase):
"""Create tasks like minimization, regression, impact, progression, stack
stack for a newly generated testcase."""
# No need to create progression task. It is automatically created by the cron
# handler for reproducible testcases.
# For a non reproducible crash.
if testcase.one_time_crasher_flag:
# For unreproducible testcases, it is still beneficial to get component
# information from blame task.
create_blame_task_if_needed(testcase)
return
# For a fully reproducible crash.
# MIN environment variable defined in a job definition indicates if
# we want to do the heavy weight tasks like minimization, regression,
# impact, etc on this testcase. These are usually skipped when we have
# a large timeout and we can't afford to waste more than a couple of hours
# on these jobs.
testcase_id = testcase.key.id()
if environment.get_value('MIN') == 'No':
testcase = data_handler.get_testcase_by_id(testcase_id)
testcase.minimized_keys = 'NA'
testcase.regression = 'NA'
testcase.set_impacts_as_na()
testcase.put()
return
# Just create the minimize task for now. Once minimization is complete, it
# automatically created the rest of the needed tasks.
create_minimize_task_if_needed(testcase)
| |
# Copyright 2013 Dell Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo.config import cfg
from neutron.agent.common import config as a_cfg
import neutron.services.firewall.drivers.linux.iptables_fwaas as fwaas
from neutron.tests import base
from neutron.tests.unit import test_api_v2
_uuid = test_api_v2._uuid
FAKE_SRC_PREFIX = '10.0.0.0/24'
FAKE_DST_PREFIX = '20.0.0.0/24'
FAKE_PROTOCOL = 'tcp'
FAKE_SRC_PORT = 5000
FAKE_DST_PORT = 22
FAKE_FW_ID = 'fake-fw-uuid'
class IptablesFwaasTestCase(base.BaseTestCase):
def setUp(self):
super(IptablesFwaasTestCase, self).setUp()
cfg.CONF.register_opts(a_cfg.ROOT_HELPER_OPTS, 'AGENT')
self.utils_exec_p = mock.patch(
'neutron.agent.linux.utils.execute')
self.utils_exec = self.utils_exec_p.start()
self.iptables_cls_p = mock.patch(
'neutron.agent.linux.iptables_manager.IptablesManager')
self.iptables_cls_p.start()
self.firewall = fwaas.IptablesFwaasDriver()
def _fake_rules_v4(self, fwid, apply_list):
rule_list = []
rule1 = {'enabled': True,
'action': 'allow',
'ip_version': 4,
'protocol': 'tcp',
'destination_port': '80',
'source_ip_address': '10.24.4.2'}
rule2 = {'enabled': True,
'action': 'deny',
'ip_version': 4,
'protocol': 'tcp',
'destination_port': '22'}
ingress_chain = ('iv4%s' % fwid)[:11]
egress_chain = ('ov4%s' % fwid)[:11]
for router_info_inst in apply_list:
v4filter_inst = router_info_inst.iptables_manager.ipv4['filter']
v4filter_inst.chains.append(ingress_chain)
v4filter_inst.chains.append(egress_chain)
rule_list.append(rule1)
rule_list.append(rule2)
return rule_list
def _fake_firewall_no_rule(self):
rule_list = []
fw_inst = {'id': FAKE_FW_ID,
'admin_state_up': True,
'tenant_id': 'tenant-uuid',
'firewall_rule_list': rule_list}
return fw_inst
def _fake_firewall(self, rule_list):
fw_inst = {'id': FAKE_FW_ID,
'admin_state_up': True,
'tenant_id': 'tenant-uuid',
'firewall_rule_list': rule_list}
return fw_inst
def _fake_firewall_with_admin_down(self, rule_list):
fw_inst = {'id': FAKE_FW_ID,
'admin_state_up': False,
'tenant_id': 'tenant-uuid',
'firewall_rule_list': rule_list}
return fw_inst
def _fake_apply_list(self, router_count=1, distributed=False,
distributed_mode=None):
apply_list = []
while router_count > 0:
iptables_inst = mock.Mock()
router_inst = {'distributed': distributed}
v4filter_inst = mock.Mock()
v6filter_inst = mock.Mock()
v4filter_inst.chains = []
v6filter_inst.chains = []
iptables_inst.ipv4 = {'filter': v4filter_inst}
iptables_inst.ipv6 = {'filter': v6filter_inst}
router_info_inst = mock.Mock()
router_info_inst.iptables_manager = iptables_inst
router_info_inst.snat_iptables_manager = iptables_inst
if distributed_mode == 'dvr':
router_info_inst.dist_fip_count = 1
router_info_inst.router = router_inst
apply_list.append(router_info_inst)
router_count -= 1
return apply_list
def _setup_firewall_with_rules(self, func, router_count=1,
distributed=False, distributed_mode=None):
apply_list = self._fake_apply_list(router_count=router_count,
distributed=distributed, distributed_mode=distributed_mode)
rule_list = self._fake_rules_v4(FAKE_FW_ID, apply_list)
firewall = self._fake_firewall(rule_list)
if distributed:
if distributed_mode == 'dvr_snat':
if_prefix = 'sg-+'
if distributed_mode == 'dvr':
if_prefix = 'rfp-+'
else:
if_prefix = 'qr-+'
distributed_mode = 'legacy'
func(distributed_mode, apply_list, firewall)
invalid_rule = '-m state --state INVALID -j DROP'
est_rule = '-m state --state ESTABLISHED,RELATED -j ACCEPT'
rule1 = '-p tcp --dport 80 -s 10.24.4.2 -j ACCEPT'
rule2 = '-p tcp --dport 22 -j DROP'
ingress_chain = 'iv4%s' % firewall['id']
egress_chain = 'ov4%s' % firewall['id']
bname = fwaas.iptables_manager.binary_name
ipt_mgr_ichain = '%s-%s' % (bname, ingress_chain[:11])
ipt_mgr_echain = '%s-%s' % (bname, egress_chain[:11])
for router_info_inst in apply_list:
v4filter_inst = router_info_inst.iptables_manager.ipv4['filter']
calls = [mock.call.ensure_remove_chain('iv4fake-fw-uuid'),
mock.call.ensure_remove_chain('ov4fake-fw-uuid'),
mock.call.ensure_remove_chain('fwaas-default-policy'),
mock.call.add_chain('fwaas-default-policy'),
mock.call.add_rule('fwaas-default-policy', '-j DROP'),
mock.call.add_chain(ingress_chain),
mock.call.add_rule(ingress_chain, invalid_rule),
mock.call.add_rule(ingress_chain, est_rule),
mock.call.add_chain(egress_chain),
mock.call.add_rule(egress_chain, invalid_rule),
mock.call.add_rule(egress_chain, est_rule),
mock.call.add_rule(ingress_chain, rule1),
mock.call.add_rule(egress_chain, rule1),
mock.call.add_rule(ingress_chain, rule2),
mock.call.add_rule(egress_chain, rule2),
mock.call.add_rule('FORWARD',
'-o %s -j %s' % (if_prefix,
ipt_mgr_ichain)),
mock.call.add_rule('FORWARD',
'-i %s -j %s' % (if_prefix,
ipt_mgr_echain)),
mock.call.add_rule('FORWARD',
'-o %s -j %s-fwaas-defau' % (if_prefix,
bname)),
mock.call.add_rule('FORWARD',
'-i %s -j %s-fwaas-defau' % (if_prefix,
bname))]
v4filter_inst.assert_has_calls(calls)
def test_create_firewall_no_rules(self):
apply_list = self._fake_apply_list()
firewall = self._fake_firewall_no_rule()
self.firewall.create_firewall('legacy', apply_list, firewall)
invalid_rule = '-m state --state INVALID -j DROP'
est_rule = '-m state --state ESTABLISHED,RELATED -j ACCEPT'
bname = fwaas.iptables_manager.binary_name
for ip_version in (4, 6):
ingress_chain = ('iv%s%s' % (ip_version, firewall['id']))
egress_chain = ('ov%s%s' % (ip_version, firewall['id']))
calls = [mock.call.ensure_remove_chain(
'iv%sfake-fw-uuid' % ip_version),
mock.call.ensure_remove_chain(
'ov%sfake-fw-uuid' % ip_version),
mock.call.ensure_remove_chain('fwaas-default-policy'),
mock.call.add_chain('fwaas-default-policy'),
mock.call.add_rule('fwaas-default-policy', '-j DROP'),
mock.call.add_chain(ingress_chain),
mock.call.add_rule(ingress_chain, invalid_rule),
mock.call.add_rule(ingress_chain, est_rule),
mock.call.add_chain(egress_chain),
mock.call.add_rule(egress_chain, invalid_rule),
mock.call.add_rule(egress_chain, est_rule),
mock.call.add_rule('FORWARD',
'-o qr-+ -j %s-fwaas-defau' % bname),
mock.call.add_rule('FORWARD',
'-i qr-+ -j %s-fwaas-defau' % bname)]
if ip_version == 4:
v4filter_inst = apply_list[0].iptables_manager.ipv4['filter']
v4filter_inst.assert_has_calls(calls)
else:
v6filter_inst = apply_list[0].iptables_manager.ipv6['filter']
v6filter_inst.assert_has_calls(calls)
def test_create_firewall_with_rules(self):
self._setup_firewall_with_rules(self.firewall.create_firewall)
def test_create_firewall_with_rules_two_routers(self):
self._setup_firewall_with_rules(self.firewall.create_firewall,
router_count=2)
def test_update_firewall_with_rules(self):
self._setup_firewall_with_rules(self.firewall.update_firewall)
def test_delete_firewall(self):
apply_list = self._fake_apply_list()
firewall = self._fake_firewall_no_rule()
self.firewall.delete_firewall('legacy', apply_list, firewall)
ingress_chain = 'iv4%s' % firewall['id']
egress_chain = 'ov4%s' % firewall['id']
calls = [mock.call.ensure_remove_chain(ingress_chain),
mock.call.ensure_remove_chain(egress_chain),
mock.call.ensure_remove_chain('fwaas-default-policy')]
apply_list[0].iptables_manager.ipv4['filter'].assert_has_calls(calls)
def test_create_firewall_with_admin_down(self):
apply_list = self._fake_apply_list()
rule_list = self._fake_rules_v4(FAKE_FW_ID, apply_list)
firewall = self._fake_firewall_with_admin_down(rule_list)
self.firewall.create_firewall('legacy', apply_list, firewall)
calls = [mock.call.ensure_remove_chain('iv4fake-fw-uuid'),
mock.call.ensure_remove_chain('ov4fake-fw-uuid'),
mock.call.ensure_remove_chain('fwaas-default-policy'),
mock.call.add_chain('fwaas-default-policy'),
mock.call.add_rule('fwaas-default-policy', '-j DROP')]
apply_list[0].iptables_manager.ipv4['filter'].assert_has_calls(calls)
def test_create_firewall_with_rules_dvr_snat(self):
self._setup_firewall_with_rules(self.firewall.create_firewall,
distributed=True, distributed_mode='dvr_snat')
def test_update_firewall_with_rules_dvr_snat(self):
self._setup_firewall_with_rules(self.firewall.update_firewall,
distributed=True, distributed_mode='dvr_snat')
def test_create_firewall_with_rules_dvr(self):
self._setup_firewall_with_rules(self.firewall.create_firewall,
distributed=True, distributed_mode='dvr')
def test_update_firewall_with_rules_dvr(self):
self._setup_firewall_with_rules(self.firewall.update_firewall,
distributed=True, distributed_mode='dvr')
| |
"""Denon HEOS Media Player."""
from __future__ import annotations
from functools import reduce, wraps
import logging
from operator import ior
from pyheos import HeosError, const as heos_const
from homeassistant.components.media_player import MediaPlayerEntity
from homeassistant.components.media_player.const import (
ATTR_MEDIA_ENQUEUE,
DOMAIN,
MEDIA_TYPE_MUSIC,
MEDIA_TYPE_PLAYLIST,
MEDIA_TYPE_URL,
SUPPORT_CLEAR_PLAYLIST,
SUPPORT_GROUPING,
SUPPORT_NEXT_TRACK,
SUPPORT_PAUSE,
SUPPORT_PLAY,
SUPPORT_PLAY_MEDIA,
SUPPORT_PREVIOUS_TRACK,
SUPPORT_SELECT_SOURCE,
SUPPORT_SHUFFLE_SET,
SUPPORT_STOP,
SUPPORT_VOLUME_MUTE,
SUPPORT_VOLUME_SET,
SUPPORT_VOLUME_STEP,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import STATE_IDLE, STATE_PAUSED, STATE_PLAYING
from homeassistant.core import HomeAssistant
from homeassistant.helpers.dispatcher import (
async_dispatcher_connect,
async_dispatcher_send,
)
from homeassistant.helpers.entity import DeviceInfo
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.util.dt import utcnow
from .const import (
DATA_ENTITY_ID_MAP,
DATA_GROUP_MANAGER,
DATA_SOURCE_MANAGER,
DOMAIN as HEOS_DOMAIN,
SIGNAL_HEOS_PLAYER_ADDED,
SIGNAL_HEOS_UPDATED,
)
BASE_SUPPORTED_FEATURES = (
SUPPORT_VOLUME_MUTE
| SUPPORT_VOLUME_SET
| SUPPORT_VOLUME_STEP
| SUPPORT_CLEAR_PLAYLIST
| SUPPORT_SHUFFLE_SET
| SUPPORT_SELECT_SOURCE
| SUPPORT_PLAY_MEDIA
| SUPPORT_GROUPING
)
PLAY_STATE_TO_STATE = {
heos_const.PLAY_STATE_PLAY: STATE_PLAYING,
heos_const.PLAY_STATE_STOP: STATE_IDLE,
heos_const.PLAY_STATE_PAUSE: STATE_PAUSED,
}
CONTROL_TO_SUPPORT = {
heos_const.CONTROL_PLAY: SUPPORT_PLAY,
heos_const.CONTROL_PAUSE: SUPPORT_PAUSE,
heos_const.CONTROL_STOP: SUPPORT_STOP,
heos_const.CONTROL_PLAY_PREVIOUS: SUPPORT_PREVIOUS_TRACK,
heos_const.CONTROL_PLAY_NEXT: SUPPORT_NEXT_TRACK,
}
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(
hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback
) -> None:
"""Add media players for a config entry."""
players = hass.data[HEOS_DOMAIN][DOMAIN]
devices = [HeosMediaPlayer(player) for player in players.values()]
async_add_entities(devices, True)
def log_command_error(command: str):
"""Return decorator that logs command failure."""
def decorator(func):
@wraps(func)
async def wrapper(*args, **kwargs):
try:
await func(*args, **kwargs)
except (HeosError, ValueError) as ex:
_LOGGER.error("Unable to %s: %s", command, ex)
return wrapper
return decorator
class HeosMediaPlayer(MediaPlayerEntity):
"""The HEOS player."""
def __init__(self, player):
"""Initialize."""
self._media_position_updated_at = None
self._player = player
self._signals = []
self._supported_features = BASE_SUPPORTED_FEATURES
self._source_manager = None
self._group_manager = None
async def _player_update(self, player_id, event):
"""Handle player attribute updated."""
if self._player.player_id != player_id:
return
if event == heos_const.EVENT_PLAYER_NOW_PLAYING_PROGRESS:
self._media_position_updated_at = utcnow()
await self.async_update_ha_state(True)
async def _heos_updated(self):
"""Handle sources changed."""
await self.async_update_ha_state(True)
async def async_added_to_hass(self):
"""Device added to hass."""
# Update state when attributes of the player change
self._signals.append(
self._player.heos.dispatcher.connect(
heos_const.SIGNAL_PLAYER_EVENT, self._player_update
)
)
# Update state when heos changes
self._signals.append(
async_dispatcher_connect(self.hass, SIGNAL_HEOS_UPDATED, self._heos_updated)
)
# Register this player's entity_id so it can be resolved by the group manager
self.hass.data[HEOS_DOMAIN][DATA_ENTITY_ID_MAP][
self._player.player_id
] = self.entity_id
async_dispatcher_send(self.hass, SIGNAL_HEOS_PLAYER_ADDED)
@log_command_error("clear playlist")
async def async_clear_playlist(self):
"""Clear players playlist."""
await self._player.clear_queue()
@log_command_error("join_players")
async def async_join_players(self, group_members: list[str]) -> None:
"""Join `group_members` as a player group with the current player."""
await self._group_manager.async_join_players(self.entity_id, group_members)
@log_command_error("pause")
async def async_media_pause(self):
"""Send pause command."""
await self._player.pause()
@log_command_error("play")
async def async_media_play(self):
"""Send play command."""
await self._player.play()
@log_command_error("move to previous track")
async def async_media_previous_track(self):
"""Send previous track command."""
await self._player.play_previous()
@log_command_error("move to next track")
async def async_media_next_track(self):
"""Send next track command."""
await self._player.play_next()
@log_command_error("stop")
async def async_media_stop(self):
"""Send stop command."""
await self._player.stop()
@log_command_error("set mute")
async def async_mute_volume(self, mute):
"""Mute the volume."""
await self._player.set_mute(mute)
@log_command_error("play media")
async def async_play_media(self, media_type, media_id, **kwargs):
"""Play a piece of media."""
if media_type in (MEDIA_TYPE_URL, MEDIA_TYPE_MUSIC):
await self._player.play_url(media_id)
return
if media_type == "quick_select":
# media_id may be an int or a str
selects = await self._player.get_quick_selects()
try:
index = int(media_id)
except ValueError:
# Try finding index by name
index = next(
(index for index, select in selects.items() if select == media_id),
None,
)
if index is None:
raise ValueError(f"Invalid quick select '{media_id}'")
await self._player.play_quick_select(index)
return
if media_type == MEDIA_TYPE_PLAYLIST:
playlists = await self._player.heos.get_playlists()
playlist = next((p for p in playlists if p.name == media_id), None)
if not playlist:
raise ValueError(f"Invalid playlist '{media_id}'")
add_queue_option = (
heos_const.ADD_QUEUE_ADD_TO_END
if kwargs.get(ATTR_MEDIA_ENQUEUE)
else heos_const.ADD_QUEUE_REPLACE_AND_PLAY
)
await self._player.add_to_queue(playlist, add_queue_option)
return
if media_type == "favorite":
# media_id may be an int or str
try:
index = int(media_id)
except ValueError:
# Try finding index by name
index = next(
(
index
for index, favorite in self._source_manager.favorites.items()
if favorite.name == media_id
),
None,
)
if index is None:
raise ValueError(f"Invalid favorite '{media_id}'")
await self._player.play_favorite(index)
return
raise ValueError(f"Unsupported media type '{media_type}'")
@log_command_error("select source")
async def async_select_source(self, source):
"""Select input source."""
await self._source_manager.play_source(source, self._player)
@log_command_error("set shuffle")
async def async_set_shuffle(self, shuffle):
"""Enable/disable shuffle mode."""
await self._player.set_play_mode(self._player.repeat, shuffle)
@log_command_error("set volume level")
async def async_set_volume_level(self, volume):
"""Set volume level, range 0..1."""
await self._player.set_volume(int(volume * 100))
async def async_update(self):
"""Update supported features of the player."""
controls = self._player.now_playing_media.supported_controls
current_support = [CONTROL_TO_SUPPORT[control] for control in controls]
self._supported_features = reduce(ior, current_support, BASE_SUPPORTED_FEATURES)
if self._group_manager is None:
self._group_manager = self.hass.data[HEOS_DOMAIN][DATA_GROUP_MANAGER]
if self._source_manager is None:
self._source_manager = self.hass.data[HEOS_DOMAIN][DATA_SOURCE_MANAGER]
@log_command_error("unjoin_player")
async def async_unjoin_player(self):
"""Remove this player from any group."""
await self._group_manager.async_unjoin_player(self.entity_id)
async def async_will_remove_from_hass(self):
"""Disconnect the device when removed."""
for signal_remove in self._signals:
signal_remove()
self._signals.clear()
@property
def available(self) -> bool:
"""Return True if the device is available."""
return self._player.available
@property
def device_info(self) -> DeviceInfo:
"""Get attributes about the device."""
return DeviceInfo(
identifiers={(HEOS_DOMAIN, self._player.player_id)},
manufacturer="HEOS",
model=self._player.model,
name=self._player.name,
sw_version=self._player.version,
)
@property
def extra_state_attributes(self) -> dict:
"""Get additional attribute about the state."""
return {
"media_album_id": self._player.now_playing_media.album_id,
"media_queue_id": self._player.now_playing_media.queue_id,
"media_source_id": self._player.now_playing_media.source_id,
"media_station": self._player.now_playing_media.station,
"media_type": self._player.now_playing_media.type,
}
@property
def group_members(self) -> list[str]:
"""List of players which are grouped together."""
return self._group_manager.group_membership.get(self.entity_id, [])
@property
def is_volume_muted(self) -> bool:
"""Boolean if volume is currently muted."""
return self._player.is_muted
@property
def media_album_name(self) -> str:
"""Album name of current playing media, music track only."""
return self._player.now_playing_media.album
@property
def media_artist(self) -> str:
"""Artist of current playing media, music track only."""
return self._player.now_playing_media.artist
@property
def media_content_id(self) -> str:
"""Content ID of current playing media."""
return self._player.now_playing_media.media_id
@property
def media_content_type(self) -> str:
"""Content type of current playing media."""
return MEDIA_TYPE_MUSIC
@property
def media_duration(self):
"""Duration of current playing media in seconds."""
duration = self._player.now_playing_media.duration
if isinstance(duration, int):
return duration / 1000
return None
@property
def media_position(self):
"""Position of current playing media in seconds."""
# Some media doesn't have duration but reports position, return None
if not self._player.now_playing_media.duration:
return None
return self._player.now_playing_media.current_position / 1000
@property
def media_position_updated_at(self):
"""When was the position of the current playing media valid."""
# Some media doesn't have duration but reports position, return None
if not self._player.now_playing_media.duration:
return None
return self._media_position_updated_at
@property
def media_image_remotely_accessible(self) -> bool:
"""If the image url is remotely accessible."""
return True
@property
def media_image_url(self) -> str:
"""Image url of current playing media."""
# May be an empty string, if so, return None
image_url = self._player.now_playing_media.image_url
return image_url if image_url else None
@property
def media_title(self) -> str:
"""Title of current playing media."""
return self._player.now_playing_media.song
@property
def name(self) -> str:
"""Return the name of the device."""
return self._player.name
@property
def should_poll(self) -> bool:
"""No polling needed for this device."""
return False
@property
def shuffle(self) -> bool:
"""Boolean if shuffle is enabled."""
return self._player.shuffle
@property
def source(self) -> str:
"""Name of the current input source."""
return self._source_manager.get_current_source(self._player.now_playing_media)
@property
def source_list(self) -> list[str]:
"""List of available input sources."""
return self._source_manager.source_list
@property
def state(self) -> str:
"""State of the player."""
return PLAY_STATE_TO_STATE[self._player.state]
@property
def supported_features(self) -> int:
"""Flag media player features that are supported."""
return self._supported_features
@property
def unique_id(self) -> str:
"""Return a unique ID."""
return str(self._player.player_id)
@property
def volume_level(self) -> float:
"""Volume level of the media player (0..1)."""
return self._player.volume / 100
| |
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Flavor action implementations"""
import logging
import six
from cliff import command
from cliff import lister
from cliff import show
from openstackclient.common import parseractions
from openstackclient.common import utils
class CreateFlavor(show.ShowOne):
"""Create new flavor"""
log = logging.getLogger(__name__ + ".CreateFlavor")
def get_parser(self, prog_name):
parser = super(CreateFlavor, self).get_parser(prog_name)
parser.add_argument(
"name",
metavar="<flavor-name>",
help="New flavor name",
)
parser.add_argument(
"--id",
metavar="<id>",
default='auto',
help="Unique flavor ID; 'auto' creates a UUID "
"(default: auto)",
)
parser.add_argument(
"--ram",
type=int,
metavar="<size-mb>",
default=256,
help="Memory size in MB (default 256M)",
)
parser.add_argument(
"--disk",
type=int,
metavar="<size-gb>",
default=0,
help="Disk size in GB (default 0G)",
)
parser.add_argument(
"--ephemeral",
type=int,
metavar="<size-gb>",
default=0,
help="Ephemeral disk size in GB (default 0G)",
)
parser.add_argument(
"--swap",
type=int,
metavar="<size-gb>",
default=0,
help="Swap space size in GB (default 0G)",
)
parser.add_argument(
"--vcpus",
type=int,
metavar="<vcpus>",
default=1,
help="Number of vcpus (default 1)",
)
parser.add_argument(
"--rxtx-factor",
type=int,
metavar="<factor>",
default=1,
help="RX/TX factor (default 1)",
)
public_group = parser.add_mutually_exclusive_group()
public_group.add_argument(
"--public",
dest="public",
action="store_true",
default=True,
help="Flavor is available to other projects (default)",
)
public_group.add_argument(
"--private",
dest="public",
action="store_false",
help="Flavor is not available to other projects",
)
return parser
def take_action(self, parsed_args):
self.log.debug("take_action(%s)", parsed_args)
compute_client = self.app.client_manager.compute
args = (
parsed_args.name,
parsed_args.ram,
parsed_args.vcpus,
parsed_args.disk,
parsed_args.id,
parsed_args.ephemeral,
parsed_args.swap,
parsed_args.rxtx_factor,
parsed_args.public
)
flavor = compute_client.flavors.create(*args)._info.copy()
flavor.pop("links")
return zip(*sorted(six.iteritems(flavor)))
class DeleteFlavor(command.Command):
"""Delete flavor"""
log = logging.getLogger(__name__ + ".DeleteFlavor")
def get_parser(self, prog_name):
parser = super(DeleteFlavor, self).get_parser(prog_name)
parser.add_argument(
"flavor",
metavar="<flavor>",
help="Flavor to delete (name or ID)",
)
return parser
def take_action(self, parsed_args):
self.log.debug("take_action(%s)", parsed_args)
compute_client = self.app.client_manager.compute
flavor = utils.find_resource(compute_client.flavors,
parsed_args.flavor)
compute_client.flavors.delete(flavor.id)
return
class ListFlavor(lister.Lister):
"""List flavors"""
log = logging.getLogger(__name__ + ".ListFlavor")
def get_parser(self, prog_name):
parser = super(ListFlavor, self).get_parser(prog_name)
public_group = parser.add_mutually_exclusive_group()
public_group.add_argument(
"--public",
dest="public",
action="store_true",
default=True,
help="List only public flavors (default)",
)
public_group.add_argument(
"--private",
dest="public",
action="store_false",
help="List only private flavors",
)
public_group.add_argument(
"--all",
dest="all",
action="store_true",
default=False,
help="List all flavors, whether public or private",
)
parser.add_argument(
'--long',
action='store_true',
default=False,
help='List additional fields in output')
parser.add_argument(
'--marker',
metavar="<marker>",
help='The last flavor ID of the previous page')
parser.add_argument(
'--limit',
type=int,
metavar="<limit>",
help='Maximum number of flavors to display')
return parser
def take_action(self, parsed_args):
self.log.debug("take_action(%s)", parsed_args)
compute_client = self.app.client_manager.compute
columns = (
"ID",
"Name",
"RAM",
"Disk",
"Ephemeral",
"VCPUs",
"Is Public",
)
# is_public is ternary - None means give all flavors,
# True is public only and False is private only
# By default Nova assumes True and gives admins public flavors
# and flavors from their own projects only.
is_public = None if parsed_args.all else parsed_args.public
data = compute_client.flavors.list(is_public=is_public,
marker=parsed_args.marker,
limit=parsed_args.limit)
if parsed_args.long:
columns = columns + (
"Swap",
"RXTX Factor",
"Properties",
)
for f in data:
f.properties = f.get_keys()
column_headers = columns
return (column_headers,
(utils.get_item_properties(
s, columns, formatters={'Properties': utils.format_dict},
) for s in data))
class ShowFlavor(show.ShowOne):
"""Display flavor details"""
log = logging.getLogger(__name__ + ".ShowFlavor")
def get_parser(self, prog_name):
parser = super(ShowFlavor, self).get_parser(prog_name)
parser.add_argument(
"flavor",
metavar="<flavor>",
help="Flavor to display (name or ID)",
)
return parser
def take_action(self, parsed_args):
self.log.debug("take_action(%s)", parsed_args)
compute_client = self.app.client_manager.compute
resource_flavor = utils.find_resource(compute_client.flavors,
parsed_args.flavor)
flavor = resource_flavor._info.copy()
flavor.pop("links", None)
flavor['properties'] = utils.format_dict(resource_flavor.get_keys())
return zip(*sorted(six.iteritems(flavor)))
class SetFlavor(show.ShowOne):
"""Set flavor properties"""
log = logging.getLogger(__name__ + ".SetFlavor")
def get_parser(self, prog_name):
parser = super(SetFlavor, self).get_parser(prog_name)
parser.add_argument(
"--property",
metavar="<key=value>",
action=parseractions.KeyValueAction,
help='Property to add or modify for this flavor '
'(repeat option to set multiple properties)',
)
parser.add_argument(
"flavor",
metavar="<flavor>",
help="Flavor to modify (name or ID)",
)
return parser
def take_action(self, parsed_args):
self.log.debug("take_action(%s)", parsed_args)
compute_client = self.app.client_manager.compute
resource_flavor = compute_client.flavors.find(name=parsed_args.flavor)
resource_flavor.set_keys(parsed_args.property)
flavor = resource_flavor._info.copy()
flavor['properties'] = utils.format_dict(resource_flavor.get_keys())
flavor.pop("links", None)
return zip(*sorted(six.iteritems(flavor)))
class UnsetFlavor(show.ShowOne):
"""Unset flavor properties"""
log = logging.getLogger(__name__ + ".UnsetFlavor")
def get_parser(self, prog_name):
parser = super(UnsetFlavor, self).get_parser(prog_name)
parser.add_argument(
"--property",
metavar="<key>",
action='append',
help='Property to remove from flavor '
'(repeat option to unset multiple properties)',
required=True,
)
parser.add_argument(
"flavor",
metavar="<flavor>",
help="Flavor to modify (name or ID)",
)
return parser
def take_action(self, parsed_args):
self.log.debug("take_action(%s)", parsed_args)
compute_client = self.app.client_manager.compute
resource_flavor = compute_client.flavors.find(name=parsed_args.flavor)
resource_flavor.unset_keys(parsed_args.property)
flavor = resource_flavor._info.copy()
flavor['properties'] = utils.format_dict(resource_flavor.get_keys())
flavor.pop("links", None)
return zip(*sorted(six.iteritems(flavor)))
| |
from __future__ import print_function
import sys
import json
HEADER = {'User-Agent': 'RealTimeWeb BusinessSearch library for educational purposes'}
PYTHON_3 = sys.version_info >= (3, 0)
if PYTHON_3:
import urllib.error
import urllib.request as request
from urllib.parse import quote_plus
else:
import urllib2
from urllib import quote_plus
# Embed your own keys for simplicity
CONSUMER_KEY = "your key goes here"
CONSUMER_SECRET = "your key goes here"
ACCESS_TOKEN = "your key goes here"
ACCESS_TOKEN_SECRET = "your key goes here"
# Remove these lines; we just do this for our own simplicity
# with open('../src/secrets.txt', 'r') as secrets:
# CONSUMER_KEY, CONSUMER_SECRET, ACCESS_TOKEN, ACCESS_TOKEN_SECRET = \
# [l.strip() for l in secrets.readlines()]
# Auxilary
def _parse_float(value, default=0.0):
"""
Attempt to cast *value* into a float, returning *default* if it fails.
"""
if value is None:
return default
try:
return float(value)
except ValueError:
return default
def _iteritems(_dict):
"""
Internal method to factor-out Py2-to-3 differences in dictionary item
iterator methods
:param dict _dict: the dictionary to parse
:returns: the iterable dictionary
"""
if PYTHON_3:
return _dict.items()
else:
return _dict.iteritems()
def _urlencode(query, params):
"""
Internal method to combine the url and params into a single url string.
:param str query: the base url to query
:param dict params: the parameters to send to the url
:returns: a *str* of the full url
"""
return query + '?' + '&'.join(
key + '=' + quote_plus(str(value)) for key, value in _iteritems(params))
def _get(url):
"""
Internal method to convert a URL into it's response (a *str*).
:param str url: the url to request a response from
:returns: the *str* response
"""
if PYTHON_3:
req = request.Request(url, headers=HEADER)
response = request.urlopen(req)
return response.read().decode('utf-8')
else:
req = urllib2.Request(url, headers=HEADER)
response = urllib2.urlopen(req)
return response.read()
def _recursively_convert_unicode_to_str(input):
"""
Force the given input to only use `str` instead of `bytes` or `unicode`.
This works even if the input is a dict, list, or a string.
:params input: The bytes/unicode input
:returns str: The input converted to a `str`
"""
if isinstance(input, dict):
return {_recursively_convert_unicode_to_str(
key): _recursively_convert_unicode_to_str(value) for key, value in
input.items()}
elif isinstance(input, list):
return [_recursively_convert_unicode_to_str(element) for element in
input]
elif not PYTHON_3:
return input.encode('utf-8')
elif PYTHON_3 and isinstance(input, str):
return str(input.encode('ascii', 'replace').decode('ascii'))
else:
return input
# Cache
_CACHE = {}
_CACHE_COUNTER = {}
_EDITABLE = False
_CONNECTED = True
_PATTERN = "repeat"
def _start_editing(pattern="repeat"):
"""
Start adding seen entries to the cache. So, every time that you make a request,
it will be saved to the cache. You must :ref:`_save_cache` to save the
newly edited cache to disk, though!
"""
global _EDITABLE, _PATTERN
_EDITABLE = True
_PATTERN = pattern
def _stop_editing():
"""
Stop adding seen entries to the cache.
"""
global _EDITABLE
_EDITABLE = False
def _add_to_cache(key, value):
"""
Internal method to add a new key-value to the local cache.
:param str key: The new url to add to the cache
:param str value: The HTTP response for this key.
:returns: void
"""
if key in _CACHE:
_CACHE[key].append(value)
else:
_CACHE[key] = [_PATTERN, value]
_CACHE_COUNTER[key] = 0
def _clear_key(key):
"""
Internal method to remove a key from the local cache.
:param str key: The url to remove from the cache
"""
if key in _CACHE:
del _CACHE[key]
def _save_cache(filename="cache.json"):
"""
Internal method to save the cache in memory to a file, so that it can be used later.
:param str filename: the location to store this at.
"""
with open(filename, 'w') as f:
json.dump({"data": _CACHE, "metadata": ""}, f)
# TODO Need to uncomment
# def _lookup(key):
# """
# Internal method that looks up a key in the local cache.
#
# :param key: Get the value based on the key from the cache.
# :type key: string
# :returns: void
# """
# if key not in _CACHE:
# return ""
# if _CACHE_COUNTER[key] >= len(_CACHE[key][1:]):
# if _CACHE[key][0] == "empty":
# return ""
# elif _CACHE[key][0] == "repeat" and _CACHE[key][1:]:
# return _CACHE[key][-1]
# elif _CACHE[key][0] == "repeat":
# return ""
# else:
# _CACHE_COUNTER[key] = 1
# else:
# _CACHE_COUNTER[key] += 1
# if _CACHE[key]:
# return _CACHE[key][_CACHE_COUNTER[key]]
# else:
# return ""
def _lookup(filename):
with open('response.json', 'r') as f:
data = json.load(f)
return data
def connect():
"""
Connect to the online data source in order to get up-to-date information.
:returns: void
"""
global _CONNECTED
_CONNECTED = True
def disconnect(filename="../src/cache.json"):
"""
Connect to the local cache, so no internet connection is required.
:returns: void
"""
global _CONNECTED, _CACHE
try:
with open(filename, 'r') as f:
_CACHE = _recursively_convert_unicode_to_str(json.load(f))['data']
except (OSError, IOError) as e:
raise BusinessSearchException(
"The cache file '{}' was not found.".format(filename))
for key in _CACHE.keys():
_CACHE_COUNTER[key] = 0
_CONNECTED = False
# Exceptions
class BusinessSearchException(Exception):
pass
# Domain Objects
class Business(object):
"""
Information about a specific business.
"""
def __init__(self, rating=None, description=None, phone=None, location=None, business_id=None, name=None):
"""
Creates a new Business.
:param self: This object
:type self: Business
:param rating: Rating for this business (value ranges from 1, 1.5, ... 4.5, 5)
:type rating: float
:param description: Snippet text associated with this business
:type description: str
:param phone: Phone number for this business formatted for display
:type phone: str
:param location: Address for this business formatted for display. Includes all address fields, cross streets and city, state_code, etc.
:type location: str
:param business_id: A uniquely identifying id for this business.
:type business_id: str
:param name: Name of this business.
:type name: str
:returns: Business
"""
self.rating = rating
self.description = description
self.phone = phone
self.location = location
self.business_id = business_id
self.name = name
def __unicode__(self):
string = """ <Business Name: {}> """
return string.format(self.name)
def __repr__(self):
string = self.__unicode__()
if not PYTHON_3:
return string.encode('utf-8')
return string
def __str__(self):
string = self.__unicode__()
if not PYTHON_3:
return string.encode('utf-8')
return string
def _to_dict(self):
return {'name': self.name,
'business_id': self.business_id,
'location': self.location,
'phone': self.phone,
'description': self.description,
'rating': _parse_float(self.rating)}
@staticmethod
def _from_json(json_data):
"""
Creates a Business from json data.
:param json_data: The raw json data to parse
:type json_data: dict
:returns: BusinessSearch
"""
if json_data is None:
return Business()
try:
json_dict = json_data[0]
name = json_dict['name']
business_id = json_dict['business_id']
location = json_dict['location']
phone = json_dict['phone']
description = json_dict['description']
rating = json_dict['rating']
business = Business(name=name, business_id=business_id,
location=location, phone=phone,
description=description, rating=_parse_float(rating))
return business
except KeyError:
raise BusinessSearchException(
"The given information was incomplete.")
# Service Methods
def _fetch_business_info(params):
"""
Internal method to form and query the server
:param dict params: the parameters to pass to the server
:returns: the JSON response object
"""
baseurl = 'http://api.yelp.com/v2/business/'
query = _urlencode(baseurl, params)
if PYTHON_3:
try:
result = _get(query) if _CONNECTED else _lookup(query)
except urllib.error.HTTPError:
raise BusinessSearchException("Make sure you entered a valid query")
else:
try:
result = _get(query) if _CONNECTED else _lookup(query)
except urllib2.HTTPError:
raise BusinessSearchException("Make sure you entered a valid query")
if not result:
raise BusinessSearchException("There were no results")
if _CONNECTED:
result = result.replace("// ", "") # Remove Strange Double Slashes
result = result.replace("\n", "") # Remove All New Lines
result = result.replace(" ", "") # Remove All Extra Spaces
try:
if _CONNECTED and _EDITABLE:
_add_to_cache(query, result)
json_res = json.loads(result)
except ValueError:
raise BusinessSearchException("Internal Error")
return json_res
def get_business_information(term, location):
"""
Forms and poses the query to get information from the database
:param str term: The term to search for ex. 'food'
:param str location: The zip code or state to use in the search
:return: the JSON response
"""
if not isinstance(term, str) or not isinstance(location, str):
raise BusinessSearchException("Please enter a valid query")
params = {'term': term, 'location': location}
json_res = _fetch_business_info(params)
business = Business._from_json(json_res)
return business._to_dict()
| |
from typing import Dict, List, Optional
import urllib3
import demistomock as demisto # noqa: F401
from CommonServerPython import * # noqa: F401
# disable insecure warnings
urllib3.disable_warnings()
''' CONSTANTS '''
DATE_FORMAT = '%Y-%m-%dT%H:%M:%SZ' # ISO8601 format with UTC, default in XSOAR
AF_TAGS_DATE_FORMAT = '%Y-%m-%d %H:%M:%S'
TAG_CLASS_TO_DEMISTO_TYPE = {
'malware_family': ThreatIntel.ObjectsNames.MALWARE,
'actor': ThreatIntel.ObjectsNames.THREAT_ACTOR,
'campaign': ThreatIntel.ObjectsNames.CAMPAIGN,
'malicious_behavior': ThreatIntel.ObjectsNames.ATTACK_PATTERN,
}
MAP_RELATIONSHIPS = {
ThreatIntel.ObjectsNames.MALWARE:
{
ThreatIntel.ObjectsNames.MALWARE: 'related-to',
ThreatIntel.ObjectsNames.THREAT_ACTOR: 'used-by',
ThreatIntel.ObjectsNames.CAMPAIGN: 'used-by',
ThreatIntel.ObjectsNames.ATTACK_PATTERN: 'used-by'
},
ThreatIntel.ObjectsNames.THREAT_ACTOR:
{
ThreatIntel.ObjectsNames.MALWARE: 'uses',
ThreatIntel.ObjectsNames.THREAT_ACTOR: 'related-to',
ThreatIntel.ObjectsNames.CAMPAIGN: 'attributed-by',
ThreatIntel.ObjectsNames.ATTACK_PATTERN: 'uses'
},
ThreatIntel.ObjectsNames.CAMPAIGN:
{
ThreatIntel.ObjectsNames.MALWARE: 'uses',
ThreatIntel.ObjectsNames.THREAT_ACTOR: 'attributed-to',
ThreatIntel.ObjectsNames.CAMPAIGN: 'related-to',
ThreatIntel.ObjectsNames.ATTACK_PATTERN: 'used-by'
},
ThreatIntel.ObjectsNames.ATTACK_PATTERN:
{
ThreatIntel.ObjectsNames.MALWARE: 'uses',
ThreatIntel.ObjectsNames.THREAT_ACTOR: 'used-by',
ThreatIntel.ObjectsNames.CAMPAIGN: 'uses',
ThreatIntel.ObjectsNames.ATTACK_PATTERN: 'related-to'
},
}
SCORES_MAP = {
ThreatIntel.ObjectsNames.MALWARE: ThreatIntel.ObjectsScore.MALWARE,
ThreatIntel.ObjectsNames.THREAT_ACTOR: ThreatIntel.ObjectsScore.THREAT_ACTOR,
ThreatIntel.ObjectsNames.CAMPAIGN: ThreatIntel.ObjectsScore.CAMPAIGN,
ThreatIntel.ObjectsNames.ATTACK_PATTERN: ThreatIntel.ObjectsScore.ATTACK_PATTERN,
}
# The page size in the AutoFocus response (can be 1-200)
PAGE_SIZE = 50
''' CLIENT CLASS '''
class Client(BaseClient):
"""
Client class to interact with AutoFocus API
Args:
api_key: AutoFocus API Key.
"""
def __init__(self, api_key, base_url, verify, proxy):
super().__init__(base_url=base_url, verify=verify, proxy=proxy)
self.headers = {
'apiKey': api_key,
'Content-Type': 'application/json'
}
def get_tags(self, data: Dict[str, Any]): # pragma: no cover
res = self._http_request('POST',
url_suffix='tags',
headers=self.headers,
json_data=data,
timeout=90,
)
return res
def get_tag_details(self, public_tag_name: str): # pragma: no cover
res = self._http_request('POST',
url_suffix=f'tag/{public_tag_name}',
headers=self.headers,
timeout=90,
)
return res
def build_iterator(self, is_get_command: bool, limit: int = -1) -> list:
"""
Retrieves all entries from the feed.
This method implements the logic to get tags from the feed.
Args:
limit: max amount of results to return
is_get_command: whether this method is called from the get-indicators-command
Returns:
A list of objects, containing the indicators.
"""
results: list = []
if is_get_command:
# since get-indicators command is used mostly for debug,
# getting the tags from the first page is sufficient
page_num = 0
else:
integration_context = get_integration_context()
# if so, than this is the first fetch
if not integration_context:
page_num = 0
time_of_first_fetch = date_to_timestamp(datetime.now(), DATE_FORMAT)
set_integration_context({'time_of_first_fetch': time_of_first_fetch})
else:
page_num = integration_context.get('page_num', 0)
get_tags_response = self.get_tags({
'pageNum': page_num,
'pageSize': PAGE_SIZE,
'sortBy': 'created_at'
})
tags = get_tags_response.get('tags', [])
# when finishing the "first level fetch" (getting all the tags from the feed), the next call to the api
# will be with a page num greater than the total pages, and the api should return an empty tags list.
if not tags:
# now the fetch will retrieve only tags that has been updated after the last fetch time
return incremental_level_fetch(self)
# this is the "first level fetch" logic. Every fetch returns at most PAGE_SIZE indicators from the feed.
for tag in tags:
if is_get_command and limit > 0:
if len(results) >= limit:
return results
public_tag_name = tag.get('public_tag_name', '')
tag_details_response = self.get_tag_details(public_tag_name)
results.append(tag_details_response)
if not is_get_command:
page_num += 1
context = get_integration_context()
context['page_num'] = page_num
set_integration_context(context)
return results
''' HELPER FUNCTIONS '''
def incremental_level_fetch(client: Client) -> list:
"""
This method implements the incremental level of the feed. It checks if any updates
have been made in the tags from the last fetch time, and returns the updated tags.
Args:
client: Client object
Returns:
A list of tag details represents the tags that have been updated.
"""
results: list = []
integration_context = get_integration_context()
# This field saves tags that have been updated since the last fetch time and need to be updated in demisto
list_of_all_updated_tags = argToList(integration_context.get('tags_need_to_be_fetched', ''))
time_from_last_update = integration_context.get('time_of_first_fetch')
index_to_delete = 0
for tag in list_of_all_updated_tags: # pragma: no cover
# if there are such tags, we first get all of them, so we wont miss any tags
if len(results) < PAGE_SIZE:
results.append(client.get_tag_details(tag.get('public_tag_name', '')))
index_to_delete += 1
else:
context = get_integration_context()
context['time_of_first_fetch'] = date_to_timestamp(datetime.now(), DATE_FORMAT)
context['tags_need_to_be_fetched'] = list_of_all_updated_tags[index_to_delete:]
set_integration_context(context)
return results
list_of_all_updated_tags = get_all_updated_tags_since_last_fetch(client,
list_of_all_updated_tags,
time_from_last_update)
# add only PAGE_SIZE tag_details to results, so we wont make too many calls to the API
index_to_delete = 0
for tag in list_of_all_updated_tags:
if len(results) < PAGE_SIZE:
public_tag_name = tag.get('public_tag_name')
response = client.get_tag_details(public_tag_name)
results.append(response)
index_to_delete += 1
else:
break
# delete from the list all tags that will be returned this fetch
list_of_all_updated_tags = list_of_all_updated_tags[index_to_delete:]
# update integration context
context = get_integration_context()
context['tags_need_to_be_fetched'] = list_of_all_updated_tags
context['time_of_first_fetch'] = date_to_timestamp(datetime.now(), DATE_FORMAT)
set_integration_context(context)
return results
def get_all_updated_tags_since_last_fetch(client: Client,
list_of_all_updated_tags: list,
time_from_last_update: int) -> list: # pragma: no cover
"""
This method makes API calls to gat all the tags that has been updated since the last fetch time
It filters the tags according to the update time (and gets another pages if needed),
adds them to list_of_all_updated_tags and returns it.
Args:
client: Client object
list_of_all_updated_tags:
time_from_last_update:
Returns:
List of all tags that has been updated and need to be fetched.
"""
page_num = 0
has_updates = True
while has_updates:
response = client.get_tags({
'pageNum': page_num,
'pageSize': 200,
'sortBy': 'updated_at',
'order': 'desc'
})
tags = response.get('tags', [])
for tag in tags:
update_time = tag.get('updated_at')
update_time = datetime.strptime(update_time, AF_TAGS_DATE_FORMAT).strftime(
DATE_FORMAT) if update_time else None
update_time = date_to_timestamp(update_time, DATE_FORMAT)
if update_time >= time_from_last_update:
# this means that the tag hase been updated, so it needs to be added to the list of updated tags
list_of_all_updated_tags.append(
{'public_tag_name': tag.get('public_tag_name')})
else:
has_updates = False
break
page_num += 1
# the list contained all tags that has been updated since the last fetch time
return list_of_all_updated_tags
def get_tag_class(tag_class: Optional[str], source: Optional[str]) -> Optional[str]:
"""
Returns the tag class as demisto indicator type.
Args:
tag_class: tag class name
source: tag source
Returns:
The tag class as demisto indicator type, None if class is not specified.
"""
# indicators from type Attack Pattern are only created if its source is unit42
if not tag_class:
return None
if (tag_class != 'malicious_behavior') or (tag_class == 'malicious_behavior' and source == 'Unit 42'):
return TAG_CLASS_TO_DEMISTO_TYPE.get(tag_class)
return None
def get_tag_groups_names(tag_groups: list) -> list:
"""
Returns the tag groups as a list of the groups names.
Args:
tag_groups: list of all groups
Returns:
The tag groups as a list of the groups names
"""
# tag_groups is a list of dictionaries, each contains a tag group name and its description
results = []
if len(tag_groups) > 0:
for group in tag_groups:
tag_group_name = group.get('tag_group_name', '')
if tag_group_name:
results.append(tag_group_name)
return results
def create_publications(refs: list) -> list:
"""
Creates the publications list of the indicator
Args:
refs: a list of all publications
Returns:
A list of publications of the indicator
"""
publications: list = []
if len(refs) > 0:
for ref in refs:
url = ref.get('url', '')
source = ref.get('source', '')
time_stamp = ref.get('created', '')
title = ref.get('title', '')
publications.append({'link': url, 'title': title, 'source': source, 'timestamp': time_stamp})
return publications
def create_indicators_fields(tag_details: Dict[str, Any]) -> Dict[str, Any]:
"""
Returns the indicator fields
Args:
tag_details: a dictionary containing the tag details.
Returns:
A dictionary represents the indicator fields.
"""
fields: Dict[str, Any] = {}
tag = tag_details.get('tag', {})
refs = json.loads(tag.get('refs', '[]'))
fields['publications'] = create_publications(refs)
fields['aliases'] = tag_details.get('aliases', [])
fields['description'] = tag.get('description', '')
last_hit = tag.get('lasthit', '')
fields['lastseenbysource'] = datetime.strptime(last_hit, AF_TAGS_DATE_FORMAT).strftime(
DATE_FORMAT) if last_hit else None
updated_at = tag.get('updated_at', '')
fields['updateddate'] = datetime.strptime(updated_at, AF_TAGS_DATE_FORMAT).strftime(
DATE_FORMAT) if updated_at else None
fields['reportedby'] = tag.get('source', '')
remove_nulls_from_dictionary(fields)
return fields
def update_integration_context_with_indicator_data(public_tag_name: str, tag_name: str, tag_type: str) -> None:
"""
Updates the integration context with seen tags (to save some API calls)
Args:
tag_name: tag name
public_tag_name: public tag name
tag_type: tag type
"""
integration_context = get_integration_context()
seen_tags = integration_context.get('seen_tags', {})
seen_tags[public_tag_name] = {'tag_name': tag_name, 'tag_type': tag_type}
integration_context['seen_tags'] = seen_tags
set_integration_context(integration_context)
def create_relationships_for_tag(client: Client, name: str, tag_type: str, related_tags: List[str]):
"""
Creates all the relationships of an indicator.
Args:
client: Client class
name: The indicator's name
tag_type: The indicator's type
related_tags: A list of all indicators related to the spesific indicator
Returns:
a list represents the relationships of an indicator.
"""
relationships: list = []
integration_context = get_integration_context()
seen_tags = integration_context.get('seen_tags', {})
for related_tag_public_name in related_tags:
if related_tag_public_name in seen_tags.keys():
related_tag_name = seen_tags.get(related_tag_public_name, {}).get('tag_name', '')
related_tag_type = seen_tags.get(related_tag_public_name, {}).get('tag_type', '')
else:
try:
related_tag_details = client.get_tag_details(related_tag_public_name)
except DemistoException:
demisto.debug(
f'Unit 42 Intel Objects Feed: Could not create relationship for {name} with {related_tag_public_name}.')
continue
tag = related_tag_details.get('tag', {})
related_tag_name = tag.get('tag_name', '')
tag_class = tag.get('tag_class', '')
source = tag.get('source', '')
related_tag_type = get_tag_class(tag_class, source)
if related_tag_type:
relationships.append(
create_relationship(name, tag_type, related_tag_name, related_tag_type).to_indicator())
update_integration_context_with_indicator_data(related_tag_public_name,
related_tag_name,
related_tag_type)
return relationships
def create_relationship(tag_name: str, tag_class: str, related_tag_name: str, related_tag_class: str): # pragma: no cover
"""
Returns an EntityRelationship object for the tag
Args:
tag_name: the tag name
tag_class: the tag type
related_tag_name: the related tag name
related_tag_class: the related tag type
Returns:
EntityRelationship object
"""
return EntityRelationship(
name=MAP_RELATIONSHIPS.get(tag_class, {}).get(related_tag_class),
entity_a=tag_name,
entity_a_type=tag_class,
entity_b=related_tag_name,
entity_b_type=related_tag_class,
reverse_name=MAP_RELATIONSHIPS.get(related_tag_class, {}).get(tag_class),
)
''' COMMAND FUNCTIONS '''
def test_module(client: Client) -> str: # pragma: no cover
"""
Builds the iterator to check that the feed is accessible.
Args:
client: Client object.
Returns:
Returning 'ok' indicates that the integration works like it is supposed to.
Connection to the service is successful.
Raises exceptions if something goes wrong.
"""
client.get_tags(data={'pageSize': 1})
return 'ok'
def fetch_indicators(client: Client,
is_get_command: bool,
tlp_color: Optional[str] = None,
feed_tags: List = None,
limit: int = -1,
create_relationships: bool = True,
) -> List[Dict]:
"""
Retrieves indicators from the feed
Args:
create_relationships:whether to create indicators relationships
is_get_command: whether this method is called from the get-indicators-command
client (Client): Client object
tlp_color (str): Traffic Light Protocol color
feed_tags (list): tags to assign fetched indicators
limit (int): max amount of results to return
Returns:
Indicators list.
"""
iterator = client.build_iterator(is_get_command, limit)
indicators = []
for tag_details in iterator:
tag_dict = tag_details.get('tag', {})
public_tag_name = tag_dict.get('public_tag_name', '')
tag_name = tag_dict.get('tag_name' '')
tag_class = tag_dict.get('tag_class', '')
source = tag_dict.get('source', '')
tag_type = get_tag_class(tag_class, source)
if not tag_type:
continue
raw_data = {
'value': tag_name,
'type': tag_type,
}
update_integration_context_with_indicator_data(public_tag_name, tag_name, tag_type)
raw_data.update(tag_details)
indicator_obj = {
'value': tag_name,
'type': tag_type,
'fields': create_indicators_fields(tag_details),
'rawJSON': raw_data,
'score': SCORES_MAP.get(tag_type)
}
related_tags = tag_details.get('related_tags', [])
if related_tags and create_relationships:
relationships = (create_relationships_for_tag(client, tag_name, tag_type, related_tags))
if relationships:
indicator_obj['relationships'] = relationships
tag_groups = get_tag_groups_names(tag_details.get('tag_groups', []))
if feed_tags or tag_groups:
if feed_tags:
tag_groups.extend(feed_tags)
indicator_obj['fields']['tags'] = tag_groups
if tlp_color:
indicator_obj['fields']['trafficlightprotocol'] = tlp_color
indicators.append(indicator_obj)
return indicators
def get_indicators_command(client: Client,
params: Dict[str, str],
args: Dict[str, str]
) -> CommandResults: # pragma: no cover
"""
Wrapper for retrieving indicators from the feed to the war-room.
Args:
client: Client object
params: A dictionary containing the integration parameters
args: A dictionary containing the command arguments
Returns:
CommandResults object containing a human readable output for war-room representation.
"""
limit = arg_to_number(args.get('limit', '10')) or 10
if limit > PAGE_SIZE:
demisto.debug(f'Unit 42 Intel Objects Feed: limit must be under {PAGE_SIZE}. Setting limit to {PAGE_SIZE}.')
limit = PAGE_SIZE
tlp_color = params.get('tlp_color')
feed_tags = argToList(params.get('feedTags', ''))
indicators = fetch_indicators(client=client,
is_get_command=True,
tlp_color=tlp_color,
feed_tags=feed_tags,
limit=limit,
create_relationships=False)
human_readable = tableToMarkdown('Indicators from Unit42 Intel Objects Feed:', indicators,
headers=['value', 'type', 'fields'], headerTransform=string_to_table_header,
removeNull=True)
return CommandResults(
readable_output=human_readable,
outputs_prefix='',
outputs_key_field='',
raw_response=indicators,
outputs={},
)
def fetch_indicators_command(client: Client, params: Dict[str, Any]) -> List[Dict]:
"""
Wrapper for fetching indicators from the feed to the Indicators tab.
Args:
client: Client object with request
params: demisto.params()
Returns:
List of indicators from the feed.
"""
feed_tags = argToList(params.get('feedTags', ''))
tlp_color = params.get('tlp_color')
create_relationships = params.get('create_relationships', True)
indicators = fetch_indicators(client=client,
is_get_command=False,
tlp_color=tlp_color,
feed_tags=feed_tags,
create_relationships=create_relationships,
)
return indicators
''' MAIN FUNCTION '''
def main(): # pragma: no cover
"""
main function, parses params and runs command functions
"""
params = demisto.params()
insecure = not params.get('insecure', False)
proxy = params.get('proxy', False)
api_key = params.get('api_key', {}).get('password', '')
base_url = params.get('url')
if not api_key:
if is_demisto_version_ge('6.5.0'):
api_key = demisto.getLicenseCustomField('AutoFocusTagsFeed.api_key')
if not api_key:
raise DemistoException('Could not resolve the API key from the license nor the instance configuration.')
else:
raise DemistoException('An API key must be specified in order to use this integration')
command = demisto.command()
args = demisto.args()
demisto.debug(f'Command being called is {command}')
try:
client = Client(
api_key=api_key,
base_url=base_url,
verify=insecure,
proxy=proxy,
)
if command == 'test-module':
return_results(test_module(client))
elif command == 'unit42intel-objects-feed-get-indicators':
return_results(get_indicators_command(client, params, args))
elif command == 'fetch-indicators':
# This is the command that initiates a request to the feed endpoint and create new indicators objects from
# the data fetched. If the integration instance is configured to fetch indicators, then this is the command
# that will be executed at the specified feed fetch interval.
indicators = fetch_indicators_command(client, params)
for iter_ in batch(indicators, batch_size=2000):
demisto.createIndicators(iter_)
else:
raise NotImplementedError(f'Command {command} is not implemented.')
except Exception as e:
demisto.error(traceback.format_exc()) # Print the traceback
return_error(f'Failed to execute {command} command.\nError:\n{str(e)}')
if __name__ in ['__main__', 'builtin', 'builtins']:
main()
| |
"""Unit test for newnet - configuring unshared network subsystem.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import multiprocessing
import os
import unittest
import mock
# Disable W0611: Unused import
import treadmill.tests.treadmill_test_skip_windows # pylint: disable=W0611
import treadmill
from treadmill import newnet
class NewnetTest(unittest.TestCase):
"""Mock test for treadmill.newnet."""
@mock.patch('multiprocessing.synchronize.Event', mock.Mock())
@mock.patch('os.fork', mock.Mock(return_value=1234))
@mock.patch('os.getpid', mock.Mock(return_value=7777))
@mock.patch('os.waitpid', mock.Mock(return_value=(1234, 0)))
@mock.patch('treadmill.syscall.unshare.unshare', mock.Mock(return_value=0))
@mock.patch('treadmill.newnet._configure_veth', mock.Mock())
def test_create_newnet_parent(self):
"""Tests configuring unshared network (parent)"""
# Access protected _configure_veth
# pylint: disable=W0212
mock_event = multiprocessing.synchronize.Event.return_value
newnet.create_newnet(
'foo1234', '192.168.0.100', '192.168.254.254', '10.0.0.1',
)
treadmill.syscall.unshare.unshare.assert_called_with(
treadmill.syscall.unshare.CLONE_NEWNET
)
self.assertTrue(mock_event.set.called)
os.waitpid.assert_called_with(1234, 0)
treadmill.newnet._configure_veth.assert_called_with(
'foo1234', '192.168.0.100', '192.168.254.254', '10.0.0.1',
)
@mock.patch('multiprocessing.synchronize.Event', mock.Mock())
@mock.patch('os.fork', mock.Mock(return_value=0))
@mock.patch('os.getpid', mock.Mock(return_value=7777))
@mock.patch('os.getppid', mock.Mock(return_value=7777))
@mock.patch('treadmill.netdev.link_set_netns', mock.Mock())
@mock.patch('treadmill.utils.sys_exit', mock.Mock())
def test_create_newnet_child(self):
"""Tests configuring veth pair (child)"""
mock_event = multiprocessing.synchronize.Event.return_value
newnet.create_newnet(
'foo1234', '192.168.0.100', '192.168.254.254', '10.0.0.1',
)
self.assertTrue(mock_event.wait.called)
treadmill.netdev.link_set_netns.assert_called_with(
'foo1234', 7777,
)
treadmill.utils.sys_exit.assert_called_with(0)
@mock.patch('multiprocessing.synchronize.Event', mock.Mock())
@mock.patch('os.fork', mock.Mock(return_value=0))
@mock.patch('os.getpid', mock.Mock(return_value=7777))
@mock.patch('os.getppid', mock.Mock(return_value=1))
@mock.patch('treadmill.netdev.link_set_netns', mock.Mock())
@mock.patch('treadmill.utils.sys_exit', mock.Mock())
def test_create_newnet_child_fail(self):
"""Tests configuring veth pair failure (child, parent dies)"""
mock_event = multiprocessing.synchronize.Event.return_value
mock_event.wait.return_value = False
treadmill.utils.sys_exit.side_effect = SystemExit()
self.assertRaises(
SystemExit,
newnet.create_newnet,
'foo1234', '192.168.0.100', '192.168.254.254', '10.0.0.1',
)
self.assertTrue(mock_event.wait.called)
self.assertFalse(treadmill.netdev.link_set_netns.called)
treadmill.utils.sys_exit.assert_called_with(255)
@mock.patch('multiprocessing.synchronize.Event', mock.Mock())
@mock.patch('os.fork', mock.Mock(return_value=1234))
@mock.patch('os.getpid', mock.Mock(return_value=7777))
@mock.patch('os.waitpid', mock.Mock())
@mock.patch('treadmill.syscall.unshare.unshare', mock.Mock(return_value=0))
@mock.patch('treadmill.newnet._configure_veth', mock.Mock())
def test_create_newnet_parent_fail(self):
"""Tests configuring unshared network (parent, child fails)"""
# Access protected _configure_veth
# pylint: disable=W0212
mock_event = multiprocessing.synchronize.Event.return_value
os.waitpid.return_value = (1234, 255 << 8)
self.assertRaises(
treadmill.exc.TreadmillError,
newnet.create_newnet,
'foo1234', '192.168.0.100', '192.168.254.254', '10.0.0.1',
)
treadmill.syscall.unshare.unshare.assert_called_with(
treadmill.syscall.unshare.CLONE_NEWNET
)
self.assertTrue(mock_event.set.called)
os.waitpid.assert_called_with(1234, 0)
self.assertFalse(treadmill.newnet._configure_veth.called)
@mock.patch('treadmill.iptables.initialize_container', mock.Mock())
@mock.patch('treadmill.netdev.addr_add', mock.Mock())
@mock.patch('treadmill.netdev.dev_conf_arp_ignore_set', mock.Mock())
@mock.patch('treadmill.netdev.link_set_name', mock.Mock())
@mock.patch('treadmill.netdev.link_set_up', mock.Mock())
@mock.patch('treadmill.netdev.route_add', mock.Mock())
def test__configure_veth(self):
"""Tests configuring container networking.
"""
# Access protected _configure_veth
# pylint: disable=W0212
newnet._configure_veth(
'test1234', '192.168.0.100', '192.168.254.254'
)
treadmill.netdev.link_set_up.assert_has_calls(
[
mock.call('lo'),
mock.call('eth0'),
]
)
treadmill.netdev.dev_conf_arp_ignore_set.assert_called_with('eth0', 3)
treadmill.netdev.addr_add.assert_called_with(
'192.168.0.100/32', 'eth0', addr_scope='link'
)
treadmill.netdev.route_add.assert_has_calls(
[
mock.call(
'192.168.254.254',
devname='eth0',
route_scope='link'
),
mock.call(
'default',
via='192.168.254.254',
src='192.168.0.100',
)
]
)
self.assertTrue(treadmill.iptables.initialize_container.called)
@mock.patch('treadmill.iptables.initialize_container', mock.Mock())
@mock.patch('treadmill.iptables.add_raw_rule', mock.Mock())
@mock.patch('treadmill.netdev.addr_add', mock.Mock())
@mock.patch('treadmill.netdev.dev_conf_arp_ignore_set', mock.Mock())
@mock.patch('treadmill.netdev.link_set_name', mock.Mock())
@mock.patch('treadmill.netdev.link_set_up', mock.Mock())
@mock.patch('treadmill.netdev.route_add', mock.Mock())
def test__configure_veth_service_ip(self):
"""Tests configuring container networking with service ip.
"""
# Access protected _configure_veth
# pylint: disable=W0212
newnet._configure_veth(
'test1234', '192.168.0.100', '192.168.254.254', '10.0.0.1',
)
treadmill.netdev.link_set_up.assert_has_calls(
[
mock.call('lo'),
mock.call('eth0'),
]
)
treadmill.netdev.dev_conf_arp_ignore_set.assert_called_with('eth0', 3)
treadmill.netdev.addr_add.assert_has_calls(
[
mock.call('10.0.0.1/32', 'eth0', addr_scope='host'),
mock.call('192.168.0.100/32', 'eth0', addr_scope='link'),
]
)
treadmill.netdev.route_add.assert_has_calls(
[
mock.call(
'192.168.254.254',
devname='eth0',
route_scope='link'
),
mock.call(
'default',
via='192.168.254.254',
src='10.0.0.1',
)
]
)
self.assertTrue(treadmill.iptables.initialize_container.called)
treadmill.iptables.add_raw_rule.assert_has_calls(
[
mock.call('nat', 'PREROUTING',
'-i eth0 -j DNAT --to-destination 10.0.0.1'),
mock.call('nat', 'POSTROUTING',
'-o eth0 -j SNAT --to-source 192.168.0.100'),
]
)
if __name__ == '__main__':
unittest.main()
| |
from __future__ import absolute_import, division, print_function
import locale
import re
import os
import sys
import stat
from glob import glob
from os.path import (basename, dirname, join, splitext, isdir, isfile, exists,
islink, realpath, relpath)
try:
from os import readlink
except ImportError:
readlink = False
import io
from subprocess import call, Popen, PIPE
from collections import defaultdict
from conda_build.config import config
from conda_build import external
from conda_build import environ
from conda_build import utils
from conda_build import source
from conda.compat import lchmod
from conda.misc import walk_prefix
from conda.utils import md5_file
if sys.platform.startswith('linux'):
from conda_build import elf
elif sys.platform == 'darwin':
from conda_build import macho
SHEBANG_PAT = re.compile(r'^#!.+$', re.M)
def is_obj(path):
assert sys.platform != 'win32'
return bool((sys.platform.startswith('linux') and elf.is_elf(path)) or
(sys.platform == 'darwin' and macho.is_macho(path)))
def fix_shebang(f, osx_is_app=False):
path = join(config.build_prefix, f)
if is_obj(path):
return
elif os.path.islink(path):
return
with io.open(path, encoding=locale.getpreferredencoding()) as fi:
try:
data = fi.read()
except UnicodeDecodeError: # file is binary
return
m = SHEBANG_PAT.match(data)
if not (m and 'python' in m.group()):
return
py_exec = ('/bin/bash ' + config.build_prefix + '/bin/python.app'
if sys.platform == 'darwin' and osx_is_app else
config.build_prefix + '/bin/' + basename(config.build_python))
new_data = SHEBANG_PAT.sub('#!' + py_exec, data, count=1)
if new_data == data:
return
print("updating shebang:", f)
with io.open(path, 'w', encoding=locale.getpreferredencoding()) as fo:
fo.write(new_data)
os.chmod(path, int('755', 8))
def write_pth(egg_path):
fn = basename(egg_path)
with open(join(environ.get_sp_dir(),
'%s.pth' % (fn.split('-')[0])), 'w') as fo:
fo.write('./%s\n' % fn)
def remove_easy_install_pth(files, preserve_egg_dir=False):
"""
remove the need for easy-install.pth and finally remove easy-install.pth
itself
"""
absfiles = [join(config.build_prefix, f) for f in files]
sp_dir = environ.get_sp_dir()
for egg_path in glob(join(sp_dir, '*-py*.egg')):
if isdir(egg_path):
if preserve_egg_dir or not any(join(egg_path, i) in absfiles for i in walk_prefix(egg_path, False)):
write_pth(egg_path)
continue
print('found egg dir:', egg_path)
try:
os.rename(join(egg_path, 'EGG-INFO/PKG-INFO'),
egg_path + '-info')
except OSError:
pass
utils.rm_rf(join(egg_path, 'EGG-INFO'))
for fn in os.listdir(egg_path):
if fn == '__pycache__':
utils.rm_rf(join(egg_path, fn))
else:
# this might be a name-space package
# so the package directory already exists
# from another installed dependency
if os.path.exists(join(sp_dir, fn)):
utils.copy_into(join(egg_path, fn), join(sp_dir, fn))
utils.rm_rf(join(egg_path, fn))
else:
os.rename(join(egg_path, fn), join(sp_dir, fn))
elif isfile(egg_path):
if not egg_path in absfiles:
continue
print('found egg:', egg_path)
write_pth(egg_path)
utils.rm_rf(join(sp_dir, 'easy-install.pth'))
def rm_py_along_so():
"remove .py (.pyc) files alongside .so or .pyd files"
for root, dirs, files in os.walk(config.build_prefix):
for fn in files:
if fn.endswith(('.so', '.pyd')):
name, unused_ext = splitext(fn)
for ext in '.py', '.pyc':
if name + ext in files:
os.unlink(join(root, name + ext))
def compile_missing_pyc():
sp_dir = environ.get_sp_dir()
stdlib_dir = environ.get_stdlib_dir()
need_compile = False
for root, dirs, files in os.walk(sp_dir):
for fn in files:
if fn.endswith('.py') and fn + 'c' not in files:
need_compile = True
break
if need_compile:
print('compiling .pyc files...')
utils._check_call([config.build_python, '-Wi',
join(stdlib_dir, 'compileall.py'),
'-q', '-x', 'port_v3', sp_dir])
def post_process(files, preserve_egg_dir=False):
remove_easy_install_pth(files, preserve_egg_dir=preserve_egg_dir)
rm_py_along_so()
if config.CONDA_PY < 30:
compile_missing_pyc()
def find_lib(link, path=None):
from conda_build.build import prefix_files
files = prefix_files()
if link.startswith(config.build_prefix):
link = link[len(config.build_prefix) + 1:]
if link not in files:
sys.exit("Error: Could not find %s" % link)
return link
if link.startswith('/'): # but doesn't start with the build prefix
return
if link.startswith('@rpath/'):
# Assume the rpath already points to lib, so there is no need to
# change it.
return
if '/' not in link or link.startswith('@executable_path/'):
link = basename(link)
file_names = defaultdict(list)
for f in files:
file_names[basename(f)].append(f)
if link not in file_names:
sys.exit("Error: Could not find %s" % link)
if len(file_names[link]) > 1:
if path and basename(path) == link:
# The link is for the file itself, just use it
return path
# Allow for the possibility of the same library appearing in
# multiple places.
md5s = set()
for f in file_names[link]:
md5s.add(md5_file(join(config.build_prefix, f)))
if len(md5s) > 1:
sys.exit("Error: Found multiple instances of %s: %s" % (link, file_names[link]))
else:
file_names[link].sort()
print("Found multiple instances of %s (%s). "
"Choosing the first one." % (link, file_names[link]))
return file_names[link][0]
print("Don't know how to find %s, skipping" % link)
def osx_ch_link(path, link):
print("Fixing linking of %s in %s" % (link, path))
link_loc = find_lib(link, path)
if not link_loc:
return
lib_to_link = relpath(dirname(link_loc), 'lib')
# path_to_lib = utils.relative(path[len(config.build_prefix) + 1:])
# e.g., if
# path = '/build_prefix/lib/some/stuff/libstuff.dylib'
# link_loc = 'lib/things/libthings.dylib'
# then
# lib_to_link = 'things'
# path_to_lib = '../..'
# @rpath always means 'lib', link will be at
# @rpath/lib_to_link/basename(link), like @rpath/things/libthings.dylib.
# For when we can't use @rpath, @loader_path means the path to the library
# ('path'), so from path to link is
# @loader_path/path_to_lib/lib_to_link/basename(link), like
# @loader_path/../../things/libthings.dylib.
ret = '@rpath/%s/%s' % (lib_to_link, basename(link))
# XXX: IF the above fails for whatever reason, the below can be used
# TODO: This might contain redundant ..'s if link and path are both in
# some subdirectory of lib.
# ret = '@loader_path/%s/%s/%s' % (path_to_lib, lib_to_link, basename(link))
ret = ret.replace('/./', '/')
return ret
def mk_relative_osx(path, build_prefix=None):
'''
if build_prefix is None, then this is a standard conda build. The path
and all dependencies are in the build_prefix.
if package is built in develop mode, build_prefix is specified. Object
specified by 'path' needs to relink runtime dependences to libs found in
build_prefix/lib/. Also, in develop mode, 'path' is not in 'build_prefix'
'''
if build_prefix is None:
assert path.startswith(config.build_prefix + '/')
else:
config.short_build_prefix = build_prefix
assert sys.platform == 'darwin' and is_obj(path)
s = macho.install_name_change(path, osx_ch_link)
names = macho.otool(path)
if names:
# Strictly speaking, not all object files have install names (e.g.,
# bundles and executables do not). In that case, the first name here
# will not be the install name (i.e., the id), but it isn't a problem,
# because in that case it will be a no-op (with the exception of stub
# files, which give an error, which is handled below).
args = [
'install_name_tool',
'-id',
join('@rpath', relpath(dirname(path),
join(config.build_prefix, 'lib')), basename(names[0])),
path,
]
print(' '.join(args))
p = Popen(args, stderr=PIPE)
stdout, stderr = p.communicate()
stderr = stderr.decode('utf-8')
if "Mach-O dynamic shared library stub file" in stderr:
print("Skipping Mach-O dynamic shared library stub file %s" % path)
return
else:
print(stderr, file=sys.stderr)
if p.returncode:
raise RuntimeError("install_name_tool failed with exit status %d"
% p.returncode)
# Add an rpath to every executable to increase the chances of it
# being found.
args = [
'install_name_tool',
'-add_rpath',
join('@loader_path', relpath(join(config.build_prefix, 'lib'),
dirname(path)), '').replace('/./', '/'),
path,
]
print(' '.join(args))
p = Popen(args, stderr=PIPE)
stdout, stderr = p.communicate()
stderr = stderr.decode('utf-8')
if "Mach-O dynamic shared library stub file" in stderr:
print("Skipping Mach-O dynamic shared library stub file %s\n" % path)
return
elif "would duplicate path, file already has LC_RPATH for:" in stderr:
print("Skipping -add_rpath, file already has LC_RPATH set")
return
else:
print(stderr, file=sys.stderr)
if p.returncode:
raise RuntimeError("install_name_tool failed with exit status %d"
% p.returncode)
if s:
# Skip for stub files, which have to use binary_has_prefix_files to be
# made relocatable.
assert_relative_osx(path)
def mk_relative_linux(f, rpaths=('lib',)):
path = join(config.build_prefix, f)
rpath = ':'.join('$ORIGIN/' + utils.relative(f, d) for d in rpaths)
patchelf = external.find_executable('patchelf')
print('patchelf: file: %s\n setting rpath to: %s' % (path, rpath))
call([patchelf, '--force-rpath', '--set-rpath', rpath, path])
def assert_relative_osx(path):
for name in macho.otool(path):
assert not name.startswith(config.build_prefix), path
def mk_relative(m, f):
assert sys.platform != 'win32'
path = join(config.build_prefix, f)
if not is_obj(path):
return
if sys.platform.startswith('linux'):
mk_relative_linux(f, rpaths=m.get_value('build/rpaths', ['lib']))
elif sys.platform == 'darwin':
mk_relative_osx(path)
def fix_permissions(files):
print("Fixing permissions")
for root, dirs, unused_files in os.walk(config.build_prefix):
for dn in dirs:
lchmod(join(root, dn), int('755', 8))
for f in files:
path = join(config.build_prefix, f)
st = os.lstat(path)
lchmod(path, stat.S_IMODE(st.st_mode) | stat.S_IWUSR) # chmod u+w
def post_build(m, files):
print('number of files:', len(files))
fix_permissions(files)
if sys.platform == 'win32':
return
binary_relocation = bool(m.get_value('build/binary_relocation', True))
if not binary_relocation:
print("Skipping binary relocation logic")
osx_is_app = bool(m.get_value('build/osx_is_app', False))
for f in files:
if f.startswith('bin/'):
fix_shebang(f, osx_is_app=osx_is_app)
if binary_relocation:
mk_relative(m, f)
check_symlinks(files)
def check_symlinks(files):
if readlink is False:
return # Not on Unix system
msgs = []
for f in files:
path = join(config.build_prefix, f)
if islink(path):
link_path = readlink(path)
real_link_path = realpath(path)
if real_link_path.startswith(config.build_prefix):
# If the path is in the build prefix, this is fine, but
# the link needs to be relative
if not link_path.startswith('.'):
# Don't change the link structure if it is already a
# relative link. It's possible that ..'s later in the path
# can result in a broken link still, but we'll assume that
# such crazy things don't happen.
print("Making absolute symlink %s -> %s relative" % (f, link_path))
os.unlink(path)
os.symlink(relpath(real_link_path, dirname(path)), path)
else:
# Symlinks to absolute paths on the system (like /usr) are fine.
if real_link_path.startswith(config.croot):
msgs.append("%s is a symlink to a path that may not "
"exist after the build is completed (%s)" % (f, link_path))
if msgs:
for msg in msgs:
print("Error: %s" % msg, file=sys.stderr)
sys.exit(1)
def get_build_metadata(m):
if exists(join(source.WORK_DIR, '__conda_version__.txt')):
with open(join(source.WORK_DIR, '__conda_version__.txt')) as f:
version = f.read().strip()
print("Setting version from __conda_version__.txt: %s" % version)
m.meta['package']['version'] = version
if exists(join(source.WORK_DIR, '__conda_buildnum__.txt')):
with open(join(source.WORK_DIR, '__conda_buildnum__.txt')) as f:
build_number = f.read().strip()
print("Setting build number from __conda_buildnum__.txt: %s" %
build_number)
m.meta['build']['number'] = build_number
if exists(join(source.WORK_DIR, '__conda_buildstr__.txt')):
with open(join(source.WORK_DIR, '__conda_buildstr__.txt')) as f:
buildstr = f.read().strip()
print("Setting version from __conda_buildstr__.txt: %s" % buildstr)
m.meta['build']['string'] = buildstr
| |
# util/_collections.py
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Collection classes and helpers."""
import sys
import itertools
import weakref
import operator
from langhelpers import symbol
from compat import time_func, threading
EMPTY_SET = frozenset()
class NamedTuple(tuple):
"""tuple() subclass that adds labeled names.
Is also pickleable.
"""
def __new__(cls, vals, labels=None):
t = tuple.__new__(cls, vals)
if labels:
t.__dict__.update(zip(labels, vals))
t._labels = labels
return t
def keys(self):
return [l for l in self._labels if l is not None]
class ImmutableContainer(object):
def _immutable(self, *arg, **kw):
raise TypeError("%s object is immutable" % self.__class__.__name__)
__delitem__ = __setitem__ = __setattr__ = _immutable
class immutabledict(ImmutableContainer, dict):
clear = pop = popitem = setdefault = \
update = ImmutableContainer._immutable
def __new__(cls, *args):
new = dict.__new__(cls)
dict.__init__(new, *args)
return new
def __init__(self, *args):
pass
def __reduce__(self):
return immutabledict, (dict(self), )
def union(self, d):
if not self:
return immutabledict(d)
else:
d2 = immutabledict(self)
dict.update(d2, d)
return d2
def __repr__(self):
return "immutabledict(%s)" % dict.__repr__(self)
class Properties(object):
"""Provide a __getattr__/__setattr__ interface over a dict."""
def __init__(self, data):
self.__dict__['_data'] = data
def __len__(self):
return len(self._data)
def __iter__(self):
return self._data.itervalues()
def __add__(self, other):
return list(self) + list(other)
def __setitem__(self, key, object):
self._data[key] = object
def __getitem__(self, key):
return self._data[key]
def __delitem__(self, key):
del self._data[key]
def __setattr__(self, key, object):
self._data[key] = object
def __getstate__(self):
return {'_data': self.__dict__['_data']}
def __setstate__(self, state):
self.__dict__['_data'] = state['_data']
def __getattr__(self, key):
try:
return self._data[key]
except KeyError:
raise AttributeError(key)
def __contains__(self, key):
return key in self._data
def as_immutable(self):
"""Return an immutable proxy for this :class:`.Properties`."""
return ImmutableProperties(self._data)
def update(self, value):
self._data.update(value)
def get(self, key, default=None):
if key in self:
return self[key]
else:
return default
def keys(self):
return self._data.keys()
def has_key(self, key):
return key in self._data
def clear(self):
self._data.clear()
class OrderedProperties(Properties):
"""Provide a __getattr__/__setattr__ interface with an OrderedDict
as backing store."""
def __init__(self):
Properties.__init__(self, OrderedDict())
class ImmutableProperties(ImmutableContainer, Properties):
"""Provide immutable dict/object attribute to an underlying dictionary."""
class OrderedDict(dict):
"""A dict that returns keys/values/items in the order they were added."""
def __init__(self, ____sequence=None, **kwargs):
self._list = []
if ____sequence is None:
if kwargs:
self.update(**kwargs)
else:
self.update(____sequence, **kwargs)
def clear(self):
self._list = []
dict.clear(self)
def copy(self):
return self.__copy__()
def __copy__(self):
return OrderedDict(self)
def sort(self, *arg, **kw):
self._list.sort(*arg, **kw)
def update(self, ____sequence=None, **kwargs):
if ____sequence is not None:
if hasattr(____sequence, 'keys'):
for key in ____sequence.keys():
self.__setitem__(key, ____sequence[key])
else:
for key, value in ____sequence:
self[key] = value
if kwargs:
self.update(kwargs)
def setdefault(self, key, value):
if key not in self:
self.__setitem__(key, value)
return value
else:
return self.__getitem__(key)
def __iter__(self):
return iter(self._list)
def values(self):
return [self[key] for key in self._list]
def itervalues(self):
return iter([self[key] for key in self._list])
def keys(self):
return list(self._list)
def iterkeys(self):
return iter(self.keys())
def items(self):
return [(key, self[key]) for key in self.keys()]
def iteritems(self):
return iter(self.items())
def __setitem__(self, key, object):
if key not in self:
try:
self._list.append(key)
except AttributeError:
# work around Python pickle loads() with
# dict subclass (seems to ignore __setstate__?)
self._list = [key]
dict.__setitem__(self, key, object)
def __delitem__(self, key):
dict.__delitem__(self, key)
self._list.remove(key)
def pop(self, key, *default):
present = key in self
value = dict.pop(self, key, *default)
if present:
self._list.remove(key)
return value
def popitem(self):
item = dict.popitem(self)
self._list.remove(item[0])
return item
class OrderedSet(set):
def __init__(self, d=None):
set.__init__(self)
self._list = []
if d is not None:
self.update(d)
def add(self, element):
if element not in self:
self._list.append(element)
set.add(self, element)
def remove(self, element):
set.remove(self, element)
self._list.remove(element)
def insert(self, pos, element):
if element not in self:
self._list.insert(pos, element)
set.add(self, element)
def discard(self, element):
if element in self:
self._list.remove(element)
set.remove(self, element)
def clear(self):
set.clear(self)
self._list = []
def __getitem__(self, key):
return self._list[key]
def __iter__(self):
return iter(self._list)
def __add__(self, other):
return self.union(other)
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, self._list)
__str__ = __repr__
def update(self, iterable):
for e in iterable:
if e not in self:
self._list.append(e)
set.add(self, e)
return self
__ior__ = update
def union(self, other):
result = self.__class__(self)
result.update(other)
return result
__or__ = union
def intersection(self, other):
other = set(other)
return self.__class__(a for a in self if a in other)
__and__ = intersection
def symmetric_difference(self, other):
other = set(other)
result = self.__class__(a for a in self if a not in other)
result.update(a for a in other if a not in self)
return result
__xor__ = symmetric_difference
def difference(self, other):
other = set(other)
return self.__class__(a for a in self if a not in other)
__sub__ = difference
def intersection_update(self, other):
other = set(other)
set.intersection_update(self, other)
self._list = [ a for a in self._list if a in other]
return self
__iand__ = intersection_update
def symmetric_difference_update(self, other):
set.symmetric_difference_update(self, other)
self._list = [ a for a in self._list if a in self]
self._list += [ a for a in other._list if a in self]
return self
__ixor__ = symmetric_difference_update
def difference_update(self, other):
set.difference_update(self, other)
self._list = [ a for a in self._list if a in self]
return self
__isub__ = difference_update
class IdentitySet(object):
"""A set that considers only object id() for uniqueness.
This strategy has edge cases for builtin types- it's possible to have
two 'foo' strings in one of these sets, for example. Use sparingly.
"""
_working_set = set
def __init__(self, iterable=None):
self._members = dict()
if iterable:
for o in iterable:
self.add(o)
def add(self, value):
self._members[id(value)] = value
def __contains__(self, value):
return id(value) in self._members
def remove(self, value):
del self._members[id(value)]
def discard(self, value):
try:
self.remove(value)
except KeyError:
pass
def pop(self):
try:
pair = self._members.popitem()
return pair[1]
except KeyError:
raise KeyError('pop from an empty set')
def clear(self):
self._members.clear()
def __sub__(self, other):
return self.difference(other)
def __cmp__(self, other):
raise TypeError('cannot compare sets using cmp()')
def __eq__(self, other):
if isinstance(other, IdentitySet):
return self._members == other._members
else:
return False
def __ne__(self, other):
if isinstance(other, IdentitySet):
return self._members != other._members
else:
return True
def issubset(self, iterable):
other = type(self)(iterable)
if len(self) > len(other):
return False
for m in itertools.ifilterfalse(other._members.__contains__,
self._members.iterkeys()):
return False
return True
def __le__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
return self.issubset(other)
def __lt__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
return len(self) < len(other) and self.issubset(other)
def issuperset(self, iterable):
other = type(self)(iterable)
if len(self) < len(other):
return False
for m in itertools.ifilterfalse(self._members.__contains__,
other._members.iterkeys()):
return False
return True
def __ge__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
return self.issuperset(other)
def __gt__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
return len(self) > len(other) and self.issuperset(other)
def union(self, iterable):
result = type(self)()
# testlib.pragma exempt:__hash__
result._members.update(
self._working_set(self._member_id_tuples()).union(_iter_id(iterable)))
return result
def __or__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
return self.union(other)
def update(self, iterable):
self._members = self.union(iterable)._members
def __ior__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
self.update(other)
return self
def difference(self, iterable):
result = type(self)()
# testlib.pragma exempt:__hash__
result._members.update(
self._working_set(self._member_id_tuples()).difference(_iter_id(iterable)))
return result
def __sub__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
return self.difference(other)
def difference_update(self, iterable):
self._members = self.difference(iterable)._members
def __isub__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
self.difference_update(other)
return self
def intersection(self, iterable):
result = type(self)()
# testlib.pragma exempt:__hash__
result._members.update(
self._working_set(self._member_id_tuples()).intersection(_iter_id(iterable)))
return result
def __and__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
return self.intersection(other)
def intersection_update(self, iterable):
self._members = self.intersection(iterable)._members
def __iand__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
self.intersection_update(other)
return self
def symmetric_difference(self, iterable):
result = type(self)()
# testlib.pragma exempt:__hash__
result._members.update(
self._working_set(self._member_id_tuples()).symmetric_difference(_iter_id(iterable)))
return result
def _member_id_tuples(self):
return ((id(v), v) for v in self._members.itervalues())
def __xor__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
return self.symmetric_difference(other)
def symmetric_difference_update(self, iterable):
self._members = self.symmetric_difference(iterable)._members
def __ixor__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
self.symmetric_difference(other)
return self
def copy(self):
return type(self)(self._members.itervalues())
__copy__ = copy
def __len__(self):
return len(self._members)
def __iter__(self):
return self._members.itervalues()
def __hash__(self):
raise TypeError('set objects are unhashable')
def __repr__(self):
return '%s(%r)' % (type(self).__name__, self._members.values())
class OrderedIdentitySet(IdentitySet):
class _working_set(OrderedSet):
# a testing pragma: exempt the OIDS working set from the test suite's
# "never call the user's __hash__" assertions. this is a big hammer,
# but it's safe here: IDS operates on (id, instance) tuples in the
# working set.
__sa_hash_exempt__ = True
def __init__(self, iterable=None):
IdentitySet.__init__(self)
self._members = OrderedDict()
if iterable:
for o in iterable:
self.add(o)
if sys.version_info >= (2, 5):
class PopulateDict(dict):
"""A dict which populates missing values via a creation function.
Note the creation function takes a key, unlike
collections.defaultdict.
"""
def __init__(self, creator):
self.creator = creator
def __missing__(self, key):
self[key] = val = self.creator(key)
return val
else:
class PopulateDict(dict):
"""A dict which populates missing values via a creation function."""
def __init__(self, creator):
self.creator = creator
def __getitem__(self, key):
try:
return dict.__getitem__(self, key)
except KeyError:
self[key] = value = self.creator(key)
return value
# define collections that are capable of storing
# ColumnElement objects as hashable keys/elements.
column_set = set
column_dict = dict
ordered_column_set = OrderedSet
populate_column_dict = PopulateDict
def unique_list(seq, hashfunc=None):
seen = {}
if not hashfunc:
return [x for x in seq
if x not in seen
and not seen.__setitem__(x, True)]
else:
return [x for x in seq
if hashfunc(x) not in seen
and not seen.__setitem__(hashfunc(x), True)]
class UniqueAppender(object):
"""Appends items to a collection ensuring uniqueness.
Additional appends() of the same object are ignored. Membership is
determined by identity (``is a``) not equality (``==``).
"""
def __init__(self, data, via=None):
self.data = data
self._unique = {}
if via:
self._data_appender = getattr(data, via)
elif hasattr(data, 'append'):
self._data_appender = data.append
elif hasattr(data, 'add'):
self._data_appender = data.add
def append(self, item):
id_ = id(item)
if id_ not in self._unique:
self._data_appender(item)
self._unique[id_] = True
def __iter__(self):
return iter(self.data)
def to_list(x, default=None):
if x is None:
return default
if not isinstance(x, (list, tuple)):
return [x]
else:
return x
def to_set(x):
if x is None:
return set()
if not isinstance(x, set):
return set(to_list(x))
else:
return x
def to_column_set(x):
if x is None:
return column_set()
if not isinstance(x, column_set):
return column_set(to_list(x))
else:
return x
def update_copy(d, _new=None, **kw):
"""Copy the given dict and update with the given values."""
d = d.copy()
if _new:
d.update(_new)
d.update(**kw)
return d
def flatten_iterator(x):
"""Given an iterator of which further sub-elements may also be
iterators, flatten the sub-elements into a single iterator.
"""
for elem in x:
if not isinstance(elem, basestring) and hasattr(elem, '__iter__'):
for y in flatten_iterator(elem):
yield y
else:
yield elem
class WeakIdentityMapping(weakref.WeakKeyDictionary):
"""A WeakKeyDictionary with an object identity index.
Adds a .by_id dictionary to a regular WeakKeyDictionary. Trades
performance during mutation operations for accelerated lookups by id().
The usual cautions about weak dictionaries and iteration also apply to
this subclass.
"""
_none = symbol('none')
def __init__(self):
weakref.WeakKeyDictionary.__init__(self)
self.by_id = {}
self._weakrefs = {}
def __setitem__(self, object, value):
oid = id(object)
self.by_id[oid] = value
if oid not in self._weakrefs:
self._weakrefs[oid] = self._ref(object)
weakref.WeakKeyDictionary.__setitem__(self, object, value)
def __delitem__(self, object):
del self._weakrefs[id(object)]
del self.by_id[id(object)]
weakref.WeakKeyDictionary.__delitem__(self, object)
def setdefault(self, object, default=None):
value = weakref.WeakKeyDictionary.setdefault(self, object, default)
oid = id(object)
if value is default:
self.by_id[oid] = default
if oid not in self._weakrefs:
self._weakrefs[oid] = self._ref(object)
return value
def pop(self, object, default=_none):
if default is self._none:
value = weakref.WeakKeyDictionary.pop(self, object)
else:
value = weakref.WeakKeyDictionary.pop(self, object, default)
if id(object) in self.by_id:
del self._weakrefs[id(object)]
del self.by_id[id(object)]
return value
def popitem(self):
item = weakref.WeakKeyDictionary.popitem(self)
oid = id(item[0])
del self._weakrefs[oid]
del self.by_id[oid]
return item
def clear(self):
# Py2K
# in 3k, MutableMapping calls popitem()
self._weakrefs.clear()
self.by_id.clear()
# end Py2K
weakref.WeakKeyDictionary.clear(self)
def update(self, *a, **kw):
raise NotImplementedError
def _cleanup(self, wr, key=None):
if key is None:
key = wr.key
try:
del self._weakrefs[key]
except (KeyError, AttributeError): # pragma: no cover
pass # pragma: no cover
try:
del self.by_id[key]
except (KeyError, AttributeError): # pragma: no cover
pass # pragma: no cover
class _keyed_weakref(weakref.ref):
def __init__(self, object, callback):
weakref.ref.__init__(self, object, callback)
self.key = id(object)
def _ref(self, object):
return self._keyed_weakref(object, self._cleanup)
class LRUCache(dict):
"""Dictionary with 'squishy' removal of least
recently used items.
"""
def __init__(self, capacity=100, threshold=.5):
self.capacity = capacity
self.threshold = threshold
self._counter = 0
def _inc_counter(self):
self._counter += 1
return self._counter
def __getitem__(self, key):
item = dict.__getitem__(self, key)
item[2] = self._inc_counter()
return item[1]
def values(self):
return [i[1] for i in dict.values(self)]
def setdefault(self, key, value):
if key in self:
return self[key]
else:
self[key] = value
return value
def __setitem__(self, key, value):
item = dict.get(self, key)
if item is None:
item = [key, value, self._inc_counter()]
dict.__setitem__(self, key, item)
else:
item[1] = value
self._manage_size()
def _manage_size(self):
while len(self) > self.capacity + self.capacity * self.threshold:
by_counter = sorted(dict.values(self),
key=operator.itemgetter(2),
reverse=True)
for item in by_counter[self.capacity:]:
try:
del self[item[0]]
except KeyError:
# if we couldnt find a key, most
# likely some other thread broke in
# on us. loop around and try again
break
class ScopedRegistry(object):
"""A Registry that can store one or multiple instances of a single
class on the basis of a "scope" function.
The object implements ``__call__`` as the "getter", so by
calling ``myregistry()`` the contained object is returned
for the current scope.
:param createfunc:
a callable that returns a new object to be placed in the registry
:param scopefunc:
a callable that will return a key to store/retrieve an object.
"""
def __init__(self, createfunc, scopefunc):
"""Construct a new :class:`.ScopedRegistry`.
:param createfunc: A creation function that will generate
a new value for the current scope, if none is present.
:param scopefunc: A function that returns a hashable
token representing the current scope (such as, current
thread identifier).
"""
self.createfunc = createfunc
self.scopefunc = scopefunc
self.registry = {}
def __call__(self):
key = self.scopefunc()
try:
return self.registry[key]
except KeyError:
return self.registry.setdefault(key, self.createfunc())
def has(self):
"""Return True if an object is present in the current scope."""
return self.scopefunc() in self.registry
def set(self, obj):
"""Set the value forthe current scope."""
self.registry[self.scopefunc()] = obj
def clear(self):
"""Clear the current scope, if any."""
try:
del self.registry[self.scopefunc()]
except KeyError:
pass
class ThreadLocalRegistry(ScopedRegistry):
"""A :class:`.ScopedRegistry` that uses a ``threading.local()``
variable for storage.
"""
def __init__(self, createfunc):
self.createfunc = createfunc
self.registry = threading.local()
def __call__(self):
try:
return self.registry.value
except AttributeError:
val = self.registry.value = self.createfunc()
return val
def has(self):
return hasattr(self.registry, "value")
def set(self, obj):
self.registry.value = obj
def clear(self):
try:
del self.registry.value
except AttributeError:
pass
def _iter_id(iterable):
"""Generator: ((id(o), o) for o in iterable)."""
for item in iterable:
yield id(item), item
| |
import logging
import math
import types
from collections import deque, defaultdict
import networkx
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from angr.knowledge_plugins import Function
from . import Analysis, CFGEmulated
from ..errors import SimEngineError, SimMemoryError
# todo include an explanation of the algorithm
# todo include a method that detects any change other than constants
# todo use function names / string references where available
l = logging.getLogger(name=__name__)
# basic block changes
DIFF_TYPE = "type"
DIFF_VALUE = "value"
# exception for trying find basic block changes
class UnmatchedStatementsException(Exception):
pass
# statement difference classes
class Difference:
def __init__(self, diff_type, value_a, value_b):
self.type = diff_type
self.value_a = value_a
self.value_b = value_b
class ConstantChange:
def __init__(self, offset, value_a, value_b):
self.offset = offset
self.value_a = value_a
self.value_b = value_b
# helper methods
def _euclidean_dist(vector_a, vector_b):
"""
:param vector_a: A list of numbers.
:param vector_b: A list of numbers.
:returns: The euclidean distance between the two vectors.
"""
dist = 0
for (x, y) in zip(vector_a, vector_b):
dist += (x-y)*(x-y)
return math.sqrt(dist)
def _get_closest_matches(input_attributes, target_attributes):
"""
:param input_attributes: First dictionary of objects to attribute tuples.
:param target_attributes: Second dictionary of blocks to attribute tuples.
:returns: A dictionary of objects in the input_attributes to the closest objects in the
target_attributes.
"""
closest_matches = {}
# for each object in the first set find the objects with the closest target attributes
for a in input_attributes:
best_dist = float('inf')
best_matches = []
for b in target_attributes:
dist = _euclidean_dist(input_attributes[a], target_attributes[b])
if dist < best_dist:
best_matches = [b]
best_dist = dist
elif dist == best_dist:
best_matches.append(b)
closest_matches[a] = best_matches
return closest_matches
# from https://rosettacode.org/wiki/Levenshtein_distance
def _levenshtein_distance(s1, s2):
"""
:param s1: A list or string
:param s2: Another list or string
:returns: The levenshtein distance between the two
"""
if len(s1) > len(s2):
s1, s2 = s2, s1
distances = range(len(s1) + 1)
for index2, num2 in enumerate(s2):
new_distances = [index2 + 1]
for index1, num1 in enumerate(s1):
if num1 == num2:
new_distances.append(distances[index1])
else:
new_distances.append(1 + min((distances[index1],
distances[index1+1],
new_distances[-1])))
distances = new_distances
return distances[-1]
def _normalized_levenshtein_distance(s1, s2, acceptable_differences):
"""
This function calculates the levenshtein distance but allows for elements in the lists to be different by any number
in the set acceptable_differences.
:param s1: A list.
:param s2: Another list.
:param acceptable_differences: A set of numbers. If (s2[i]-s1[i]) is in the set then they are considered equal.
:returns:
"""
if len(s1) > len(s2):
s1, s2 = s2, s1
acceptable_differences = set(-i for i in acceptable_differences)
distances = range(len(s1) + 1)
for index2, num2 in enumerate(s2):
new_distances = [index2 + 1]
for index1, num1 in enumerate(s1):
if num2 - num1 in acceptable_differences:
new_distances.append(distances[index1])
else:
new_distances.append(1 + min((distances[index1],
distances[index1+1],
new_distances[-1])))
distances = new_distances
return distances[-1]
def _is_better_match(x, y, matched_a, matched_b, attributes_dict_a, attributes_dict_b):
"""
:param x: The first element of a possible match.
:param y: The second element of a possible match.
:param matched_a: The current matches for the first set.
:param matched_b: The current matches for the second set.
:param attributes_dict_a: The attributes for each element in the first set.
:param attributes_dict_b: The attributes for each element in the second set.
:returns: True/False
"""
attributes_x = attributes_dict_a[x]
attributes_y = attributes_dict_b[y]
if x in matched_a:
attributes_match = attributes_dict_b[matched_a[x]]
if _euclidean_dist(attributes_x, attributes_y) >= _euclidean_dist(attributes_x, attributes_match):
return False
if y in matched_b:
attributes_match = attributes_dict_a[matched_b[y]]
if _euclidean_dist(attributes_x, attributes_y) >= _euclidean_dist(attributes_y, attributes_match):
return False
return True
def differing_constants(block_a, block_b):
"""
Compares two basic blocks and finds all the constants that differ from the first block to the second.
:param block_a: The first block to compare.
:param block_b: The second block to compare.
:returns: Returns a list of differing constants in the form of ConstantChange, which has the offset in the
block and the respective constants.
"""
statements_a = [s for s in block_a.vex.statements if s.tag != "Ist_IMark"] + [block_a.vex.next]
statements_b = [s for s in block_b.vex.statements if s.tag != "Ist_IMark"] + [block_b.vex.next]
if len(statements_a) != len(statements_b):
raise UnmatchedStatementsException("Blocks have different numbers of statements")
start_1 = min(block_a.instruction_addrs)
start_2 = min(block_b.instruction_addrs)
changes = []
# check statements
current_offset = None
for statement, statement_2 in zip(statements_a, statements_b):
# sanity check
if statement.tag != statement_2.tag:
raise UnmatchedStatementsException("Statement tag has changed")
if statement.tag == "Ist_IMark":
if statement.addr - start_1 != statement_2.addr - start_2:
raise UnmatchedStatementsException("Instruction length has changed")
current_offset = statement.addr - start_1
continue
differences = compare_statement_dict(statement, statement_2)
for d in differences:
if d.type != DIFF_VALUE:
raise UnmatchedStatementsException("Instruction has changed")
else:
changes.append(ConstantChange(current_offset, d.value_a, d.value_b))
return changes
def compare_statement_dict(statement_1, statement_2):
# should return whether or not the statement's type/effects changed
# need to return the specific number that changed too
if type(statement_1) != type(statement_2):
return [Difference(DIFF_TYPE, None, None)]
# None
if statement_1 is None and statement_2 is None:
return []
# constants
if isinstance(statement_1, (int, float, str, bytes)):
if isinstance(statement_1, float) and math.isnan(statement_1) and math.isnan(statement_2):
return []
elif statement_1 == statement_2:
return []
else:
return [Difference(None, statement_1, statement_2)]
# tuples/lists
if isinstance(statement_1, (tuple, list)):
if len(statement_1) != len(statement_2):
return Difference(DIFF_TYPE, None, None)
differences = []
for s1, s2 in zip(statement_1, statement_2):
differences += compare_statement_dict(s1, s2)
return differences
# Yan's weird types
differences = []
for attr in statement_1.__slots__:
# don't check arch, property, or methods
if attr == "arch":
continue
if hasattr(statement_1.__class__, attr) and isinstance(getattr(statement_1.__class__, attr), property):
continue
if isinstance(getattr(statement_1, attr), types.MethodType):
continue
new_diffs = compare_statement_dict(getattr(statement_1, attr), getattr(statement_2, attr))
# set the difference types
for diff in new_diffs:
if diff.type is None:
diff.type = attr
differences += new_diffs
return differences
class NormalizedBlock:
# block may span multiple calls
def __init__(self, block, function):
addresses = [block.addr]
if block.addr in function.merged_blocks:
for a in function.merged_blocks[block.addr]:
addresses.append(a.addr)
self.addr = block.addr
self.addresses = addresses
self.statements = []
self.all_constants = []
self.operations = []
self.call_targets = []
self.blocks = []
self.instruction_addrs = []
if block.addr in function.call_sites:
self.call_targets = function.call_sites[block.addr]
self.jumpkind = None
for a in addresses:
block = function.project.factory.block(a)
self.instruction_addrs += block.instruction_addrs
irsb = block.vex
self.blocks.append(block)
self.statements += irsb.statements
self.all_constants += irsb.all_constants
self.operations += irsb.operations
self.jumpkind = irsb.jumpkind
self.size = sum([b.size for b in self.blocks])
def __repr__(self):
size = sum([b.size for b in self.blocks])
return '<Normalized Block for %#x, %d bytes>' % (self.addr, size)
class NormalizedFunction:
# a more normalized function
def __init__(self, function: "Function"):
# start by copying the graph
self.graph: networkx.DiGraph = function.graph.copy()
self.project = function._function_manager._kb._project
self.call_sites = dict()
self.startpoint = function.startpoint
self.merged_blocks = dict()
self.orig_function = function
# find nodes which end in call and combine them
done = False
while not done:
done = True
for node in self.graph.nodes():
try:
bl = self.project.factory.block(node.addr)
except (SimMemoryError, SimEngineError):
continue
# merge if it ends with a single call, and the successor has only one predecessor and succ is after
successors = list(self.graph.successors(node))
if bl.vex.jumpkind == "Ijk_Call" and len(successors) == 1 and \
len(list(self.graph.predecessors(successors[0]))) == 1 and successors[0].addr > node.addr:
# add edges to the successors of its successor, and delete the original successors
succ = list(self.graph.successors(node))[0]
for s in self.graph.successors(succ):
self.graph.add_edge(node, s)
self.graph.remove_node(succ)
done = False
# add to merged blocks
if node not in self.merged_blocks:
self.merged_blocks[node] = []
self.merged_blocks[node].append(succ)
if succ in self.merged_blocks:
self.merged_blocks[node] += self.merged_blocks[succ]
del self.merged_blocks[succ]
# stop iterating and start over
break
# set up call sites
for n in self.graph.nodes():
call_targets = []
if n.addr in self.orig_function.get_call_sites():
call_targets.append(self.orig_function.get_call_target(n.addr))
if n.addr in self.merged_blocks:
for block in self.merged_blocks[n]:
if block.addr in self.orig_function.get_call_sites():
call_targets.append(self.orig_function.get_call_target(block.addr))
if len(call_targets) > 0:
self.call_sites[n] = call_targets
class FunctionDiff:
"""
This class computes the a diff between two functions.
"""
def __init__(self, function_a: "Function", function_b: "Function", bindiff=None):
"""
:param function_a: The first angr Function object to diff.
:param function_b: The second angr Function object.
:param bindiff: An optional Bindiff object. Used for some extra normalization during basic block comparison.
"""
self._function_a = NormalizedFunction(function_a)
self._function_b = NormalizedFunction(function_b)
self._project_a = self._function_a.project
self._project_b = self._function_b.project
self._bindiff = bindiff
self._attributes_a = dict()
self._attributes_a = dict()
self._block_matches = set()
self._unmatched_blocks_from_a = set()
self._unmatched_blocks_from_b = set()
self._compute_diff()
@property
def probably_identical(self):
"""
:returns: Whether or not these two functions are identical.
"""
if len(self._unmatched_blocks_from_a | self._unmatched_blocks_from_b) > 0:
return False
for (a, b) in self._block_matches:
if not self.blocks_probably_identical(a, b):
return False
return True
@property
def identical_blocks(self):
"""
:returns: A list of block matches which appear to be identical
"""
identical_blocks = []
for (block_a, block_b) in self._block_matches:
if self.blocks_probably_identical(block_a, block_b):
identical_blocks.append((block_a, block_b))
return identical_blocks
@property
def differing_blocks(self):
"""
:returns: A list of block matches which appear to differ
"""
differing_blocks = []
for (block_a, block_b) in self._block_matches:
if not self.blocks_probably_identical(block_a, block_b):
differing_blocks.append((block_a, block_b))
return differing_blocks
@property
def blocks_with_differing_constants(self):
"""
:return: A list of block matches which appear to differ
"""
differing_blocks = []
diffs = dict()
for (block_a, block_b) in self._block_matches:
if self.blocks_probably_identical(block_a, block_b) and \
not self.blocks_probably_identical(block_a, block_b, check_constants=True):
differing_blocks.append((block_a, block_b))
for block_a, block_b in differing_blocks:
ba = NormalizedBlock(block_a, self._function_a)
bb = NormalizedBlock(block_b, self._function_b)
diffs[(block_a, block_b)] = FunctionDiff._block_diff_constants(ba, bb)
return diffs
@property
def block_matches(self):
return self._block_matches
@property
def unmatched_blocks(self):
return self._unmatched_blocks_from_a, self._unmatched_blocks_from_b
@staticmethod
def get_normalized_block(addr, function):
"""
:param addr: Where to start the normalized block.
:param function: A function containing the block address.
:returns: A normalized basic block.
"""
return NormalizedBlock(addr, function)
def block_similarity(self, block_a, block_b):
"""
:param block_a: The first block address.
:param block_b: The second block address.
:returns: The similarity of the basic blocks, normalized for the base address of the block and function
call addresses.
"""
# handle sim procedure blocks
if self._project_a.is_hooked(block_a) and self._project_b.is_hooked(block_b):
if self._project_a._sim_procedures[block_a] == self._project_b._sim_procedures[block_b]:
return 1.0
else:
return 0.0
try:
block_a = NormalizedBlock(block_a, self._function_a)
except (SimMemoryError, SimEngineError):
block_a = None
try:
block_b = NormalizedBlock(block_b, self._function_b)
except (SimMemoryError, SimEngineError):
block_b = None
# if both were None then they are assumed to be the same, if only one was the same they are assumed to differ
if block_a is None and block_b is None:
return 1.0
elif block_a is None or block_b is None:
return 0.0
# get all elements for computing similarity
tags_a = [s.tag for s in block_a.statements]
tags_b = [s.tag for s in block_b.statements]
consts_a = [c.value for c in block_a.all_constants]
consts_b = [c.value for c in block_b.all_constants]
all_registers_a = [s.offset for s in block_a.statements if hasattr(s, "offset")]
all_registers_b = [s.offset for s in block_b.statements if hasattr(s, "offset")]
jumpkind_a = block_a.jumpkind
jumpkind_b = block_b.jumpkind
# compute total distance
total_dist = 0
total_dist += _levenshtein_distance(tags_a, tags_b)
total_dist += _levenshtein_distance(block_a.operations, block_b.operations)
total_dist += _levenshtein_distance(all_registers_a, all_registers_b)
acceptable_differences = self._get_acceptable_constant_differences(block_a, block_b)
total_dist += _normalized_levenshtein_distance(consts_a, consts_b, acceptable_differences)
total_dist += 0 if jumpkind_a == jumpkind_b else 1
# compute similarity
num_values = max(len(tags_a), len(tags_b))
num_values += max(len(consts_a), len(consts_b))
num_values += max(len(block_a.operations), len(block_b.operations))
num_values += 1 # jumpkind
similarity = 1 - (float(total_dist) / num_values)
return similarity
def blocks_probably_identical(self, block_a, block_b, check_constants=False):
"""
:param block_a: The first block address.
:param block_b: The second block address.
:param check_constants: Whether or not to require matching constants in blocks.
:returns: Whether or not the blocks appear to be identical.
"""
# handle sim procedure blocks
if self._project_a.is_hooked(block_a) and self._project_b.is_hooked(block_b):
return self._project_a._sim_procedures[block_a] == self._project_b._sim_procedures[block_b]
try:
block_a = NormalizedBlock(block_a, self._function_a)
except (SimMemoryError, SimEngineError):
block_a = None
try:
block_b = NormalizedBlock(block_b, self._function_b)
except (SimMemoryError, SimEngineError):
block_b = None
# if both were None then they are assumed to be the same, if only one was None they are assumed to differ
if block_a is None and block_b is None:
return True
elif block_a is None or block_b is None:
return False
# if they represent a different number of blocks they are not the same
if len(block_a.blocks) != len(block_b.blocks):
return False
# check differing constants
try:
diff_constants = FunctionDiff._block_diff_constants(block_a, block_b)
except UnmatchedStatementsException:
return False
if not check_constants:
return True
# get values of differences that probably indicate no change
acceptable_differences = self._get_acceptable_constant_differences(block_a, block_b)
# todo match globals
for c in diff_constants:
if (c.value_a, c.value_b) in self._block_matches:
# constants point to matched basic blocks
continue
if self._bindiff is not None and (c.value_a and c.value_b) in self._bindiff.function_matches:
# constants point to matched functions
continue
# if both are in the binary we'll assume it's okay, although we should really match globals
# TODO use global matches
if self._project_a.loader.main_object.contains_addr(c.value_a) and \
self._project_b.loader.main_object.contains_addr(c.value_b):
continue
# if the difference is equal to the difference in block addr's or successor addr's we'll say it's also okay
if c.value_b - c.value_a in acceptable_differences:
continue
# otherwise they probably are different
return False
# the blocks appear to be identical
return True
@staticmethod
def _block_diff_constants(block_a, block_b):
diff_constants = []
for irsb_a, irsb_b in zip(block_a.blocks, block_b.blocks):
diff_constants += differing_constants(irsb_a, irsb_b)
return diff_constants
@staticmethod
def _compute_block_attributes(function: NormalizedFunction):
"""
:param function: A normalized function object.
:returns: A dictionary of basic block addresses to tuples of attributes.
"""
# The attributes we use are the distance form function start, distance from function exit and whether
# or not it has a subfunction call
distances_from_start = FunctionDiff._distances_from_function_start(function)
distances_from_exit = FunctionDiff._distances_from_function_exit(function)
call_sites = function.call_sites
attributes = {}
for block in function.graph.nodes():
if block in call_sites:
number_of_subfunction_calls = len(call_sites[block])
else:
number_of_subfunction_calls = 0
# there really shouldn't be blocks that can't be reached from the start, but there are for now
dist_start = distances_from_start[block] if block in distances_from_start else 10000
dist_exit = distances_from_exit[block] if block in distances_from_exit else 10000
attributes[block] = (dist_start, dist_exit, number_of_subfunction_calls)
return attributes
@staticmethod
def _distances_from_function_start(function: NormalizedFunction):
"""
:param function: A normalized Function object.
:returns: A dictionary of basic block addresses and their distance to the start of the function.
"""
return networkx.single_source_shortest_path_length(function.graph,
function.startpoint)
@staticmethod
def _distances_from_function_exit(function: NormalizedFunction):
"""
:param function: A normalized Function object.
:returns: A dictionary of basic block addresses and their distance to the exit of the function.
"""
reverse_graph: networkx.DiGraph = function.graph.reverse()
# we aren't guaranteed to have an exit from the function so explicitly add the node
reverse_graph.add_node("start")
found_exits = False
for n in function.graph.nodes():
if len(list(function.graph.successors(n))) == 0:
reverse_graph.add_edge("start", n)
found_exits = True
# if there were no exits (a function with a while 1) let's consider the block with the highest address to
# be the exit. This isn't the most scientific way, but since this case is pretty rare it should be okay
if not found_exits:
last = max(function.graph.nodes(), key=lambda x:x.addr)
reverse_graph.add_edge("start", last)
dists = networkx.single_source_shortest_path_length(reverse_graph, "start")
# remove temp node
del dists["start"]
# correct for the added node
for n in dists:
dists[n] -= 1
return dists
def _compute_diff(self):
"""
Computes the diff of the functions and saves the result.
"""
# get the attributes for all blocks
l.debug("Computing diff of functions: %s, %s",
("%#x" % self._function_a.startpoint.addr) if self._function_a.startpoint is not None else "None",
("%#x" % self._function_b.startpoint.addr) if self._function_b.startpoint is not None else "None"
)
self.attributes_a = self._compute_block_attributes(self._function_a)
self.attributes_b = self._compute_block_attributes(self._function_b)
# get the initial matches
initial_matches = self._get_block_matches(self.attributes_a, self.attributes_b,
tiebreak_with_block_similarity=False)
# Use a queue so we process matches in the order that they are found
to_process = deque(initial_matches)
# Keep track of which matches we've already added to the queue
processed_matches = set((x, y) for (x, y) in initial_matches)
# Keep a dict of current matches, which will be updated if better matches are found
matched_a = dict()
matched_b = dict()
for (x, y) in processed_matches:
matched_a[x] = y
matched_b[y] = x
# while queue is not empty
while to_process:
(block_a, block_b) = to_process.pop()
l.debug("FunctionDiff: Processing (%#x, %#x)", block_a.addr, block_b.addr)
# we could find new matches in the successors or predecessors of functions
block_a_succ = list(self._function_a.graph.successors(block_a))
block_b_succ = list(self._function_b.graph.successors(block_b))
block_a_pred = list(self._function_a.graph.predecessors(block_a))
block_b_pred = list(self._function_b.graph.predecessors(block_b))
# propagate the difference in blocks as delta
delta = tuple((i-j) for i, j in zip(self.attributes_b[block_b], self.attributes_a[block_a]))
# get possible new matches
new_matches = []
# if the blocks are identical then the successors should most likely be matched in the same order
if self.blocks_probably_identical(block_a, block_b) and len(block_a_succ) == len(block_b_succ):
ordered_succ_a = self._get_ordered_successors(self._project_a, block_a, block_a_succ)
ordered_succ_b = self._get_ordered_successors(self._project_b, block_b, block_b_succ)
new_matches.extend(zip(ordered_succ_a, ordered_succ_b))
new_matches += self._get_block_matches(self.attributes_a, self.attributes_b, block_a_succ, block_b_succ,
delta, tiebreak_with_block_similarity=True)
new_matches += self._get_block_matches(self.attributes_a, self.attributes_b, block_a_pred, block_b_pred,
delta, tiebreak_with_block_similarity=True)
# for each of the possible new matches add it if it improves the matching
for (x, y) in new_matches:
if (x, y) not in processed_matches:
processed_matches.add((x, y))
l.debug("FunctionDiff: checking if (%#x, %#x) is better", x.addr, y.addr)
# if it's a better match than what we already have use it
if _is_better_match(x, y, matched_a, matched_b, self.attributes_a, self.attributes_b):
l.debug("FunctionDiff: adding possible match (%#x, %#x)", x.addr, y.addr)
if x in matched_a:
old_match = matched_a[x]
del matched_b[old_match]
if y in matched_b:
old_match = matched_b[y]
del matched_a[old_match]
matched_a[x] = y
matched_b[y] = x
to_process.appendleft((x, y))
# reformat matches into a set of pairs
self._block_matches = set((x, y) for (x, y) in matched_a.items())
# get the unmatched blocks
self._unmatched_blocks_from_a = set(x for x in self._function_a.graph.nodes() if x not in matched_a)
self._unmatched_blocks_from_b = set(x for x in self._function_b.graph.nodes() if x not in matched_b)
@staticmethod
def _get_ordered_successors(project, block, succ):
try:
# add them in order of the vex
addr = block.addr
succ = set(succ)
ordered_succ = []
bl = project.factory.block(addr)
for x in bl.vex.all_constants:
if x in succ:
ordered_succ.append(x)
# add the rest (sorting might be better than no order)
for s in sorted(succ - set(ordered_succ), key=lambda x:x.addr):
ordered_succ.append(s)
return ordered_succ
except (SimMemoryError, SimEngineError):
return sorted(succ, key=lambda x:x.addr)
def _get_block_matches(self, attributes_a, attributes_b, filter_set_a=None, filter_set_b=None, delta=(0, 0, 0),
tiebreak_with_block_similarity=False):
"""
:param attributes_a: A dict of blocks to their attributes
:param attributes_b: A dict of blocks to their attributes
The following parameters are optional.
:param filter_set_a: A set to limit attributes_a to the blocks in this set.
:param filter_set_b: A set to limit attributes_b to the blocks in this set.
:param delta: An offset to add to each vector in attributes_a.
:returns: A list of tuples of matching objects.
"""
# get the attributes that are in the sets
if filter_set_a is None:
filtered_attributes_a = {k: v for k, v in attributes_a.items()}
else:
filtered_attributes_a = {k: v for k, v in attributes_a.items() if k in filter_set_a}
if filter_set_b is None:
filtered_attributes_b = {k: v for k, v in attributes_b.items()}
else:
filtered_attributes_b = {k: v for k, v in attributes_b.items() if k in filter_set_b}
# add delta
for k in filtered_attributes_a:
filtered_attributes_a[k] = tuple((i+j) for i, j in zip(filtered_attributes_a[k], delta))
for k in filtered_attributes_b:
filtered_attributes_b[k] = tuple((i+j) for i, j in zip(filtered_attributes_b[k], delta))
# get closest
closest_a = _get_closest_matches(filtered_attributes_a, filtered_attributes_b)
closest_b = _get_closest_matches(filtered_attributes_b, filtered_attributes_a)
if tiebreak_with_block_similarity:
# use block similarity to break ties in the first set
for a in closest_a:
if len(closest_a[a]) > 1:
best_similarity = 0
best = []
for x in closest_a[a]:
similarity = self.block_similarity(a, x)
if similarity > best_similarity:
best_similarity = similarity
best = [x]
elif similarity == best_similarity:
best.append(x)
closest_a[a] = best
# use block similarity to break ties in the second set
for b in closest_b:
if len(closest_b[b]) > 1:
best_similarity = 0
best = []
for x in closest_b[b]:
similarity = self.block_similarity(x, b)
if similarity > best_similarity:
best_similarity = similarity
best = [x]
elif similarity == best_similarity:
best.append(x)
closest_b[b] = best
# a match (x,y) is good if x is the closest to y and y is the closest to x
matches = []
for a in closest_a:
if len(closest_a[a]) == 1:
match = closest_a[a][0]
if len(closest_b[match]) == 1 and closest_b[match][0] == a:
matches.append((a, match))
return matches
def _get_acceptable_constant_differences(self, block_a, block_b):
# keep a set of the acceptable differences in constants between the two blocks
acceptable_differences = set()
acceptable_differences.add(0)
block_a_base = block_a.instruction_addrs[0]
block_b_base = block_b.instruction_addrs[0]
acceptable_differences.add(block_b_base - block_a_base)
# get matching successors
for target_a, target_b in zip(block_a.call_targets, block_b.call_targets):
# these can be none if we couldn't resolve the call target
if target_a is None or target_b is None:
continue
acceptable_differences.add(target_b - target_a)
acceptable_differences.add((target_b - block_b_base) - (target_a - block_a_base))
# get the difference between the data segments
# this is hackish
if ".bss" in self._project_a.loader.main_object.sections_map and \
".bss" in self._project_b.loader.main_object.sections_map:
bss_a = self._project_a.loader.main_object.sections_map[".bss"].min_addr
bss_b = self._project_b.loader.main_object.sections_map[".bss"].min_addr
acceptable_differences.add(bss_b - bss_a)
acceptable_differences.add((bss_b - block_b_base) - (bss_a - block_a_base))
return acceptable_differences
class BinDiff(Analysis):
"""
This class computes the a diff between two binaries represented by angr Projects
"""
def __init__(self, other_project, enable_advanced_backward_slicing=False, cfg_a=None, cfg_b=None):
"""
:param other_project: The second project to diff
"""
l.debug("Computing cfg's")
back_traversal = not enable_advanced_backward_slicing
if cfg_a is None:
#self.cfg_a = self.project.analyses.CFG(resolve_indirect_jumps=True)
#self.cfg_b = other_project.analyses.CFG(resolve_indirect_jumps=True)
self.cfg_a = self.project.analyses[CFGEmulated].prep()(context_sensitivity_level=1,
keep_state = True,
enable_symbolic_back_traversal = back_traversal,
enable_advanced_backward_slicing = enable_advanced_backward_slicing)
self.cfg_b = other_project.analyses[CFGEmulated].prep()(context_sensitivity_level=1,
keep_state = True,
enable_symbolic_back_traversal = back_traversal,
enable_advanced_backward_slicing = enable_advanced_backward_slicing)
else:
self.cfg_a = cfg_a
self.cfg_b = cfg_b
l.debug("Done computing cfg's")
self._p2 = other_project
self._attributes_a = dict()
self._attributes_a = dict()
self._function_diffs = dict()
self.function_matches = set()
self._unmatched_functions_from_a = set()
self._unmatched_functions_from_b = set()
self._compute_diff()
def functions_probably_identical(self, func_a_addr, func_b_addr, check_consts=False):
"""
Compare two functions and return True if they appear identical.
:param func_a_addr: The address of the first function (in the first binary).
:param func_b_addr: The address of the second function (in the second binary).
:returns: Whether or not the functions appear to be identical.
"""
if self.cfg_a.project.is_hooked(func_a_addr) and self.cfg_b.project.is_hooked(func_b_addr):
return self.cfg_a.project._sim_procedures[func_a_addr] == self.cfg_b.project._sim_procedures[func_b_addr]
func_diff = self.get_function_diff(func_a_addr, func_b_addr)
if check_consts:
return func_diff.probably_identical_with_consts
return func_diff.probably_identical
@property
def identical_functions(self):
"""
:returns: A list of function matches that appear to be identical
"""
identical_funcs = []
for (func_a, func_b) in self.function_matches:
if self.functions_probably_identical(func_a, func_b):
identical_funcs.append((func_a, func_b))
return identical_funcs
@property
def differing_functions(self):
"""
:returns: A list of function matches that appear to differ
"""
different_funcs = []
for (func_a, func_b) in self.function_matches:
if not self.functions_probably_identical(func_a, func_b):
different_funcs.append((func_a, func_b))
return different_funcs
def differing_functions_with_consts(self):
"""
:return: A list of function matches that appear to differ including just by constants
"""
different_funcs = []
for (func_a, func_b) in self.function_matches:
if not self.functions_probably_identical(func_a, func_b, check_consts=True):
different_funcs.append((func_a, func_b))
return different_funcs
@property
def differing_blocks(self):
"""
:returns: A list of block matches that appear to differ
"""
differing_blocks = []
for (func_a, func_b) in self.function_matches:
differing_blocks.extend(self.get_function_diff(func_a, func_b).differing_blocks)
return differing_blocks
@property
def identical_blocks(self):
"""
:return A list of all block matches that appear to be identical
"""
identical_blocks = []
for (func_a, func_b) in self.function_matches:
identical_blocks.extend(self.get_function_diff(func_a, func_b).identical_blocks)
return identical_blocks
@property
def blocks_with_differing_constants(self):
"""
:return: A dict of block matches with differing constants to the tuple of constants
"""
diffs = dict()
for (func_a, func_b) in self.function_matches:
diffs.update(self.get_function_diff(func_a, func_b).blocks_with_differing_constants)
return diffs
@property
def unmatched_functions(self):
return self._unmatched_functions_from_a, self._unmatched_functions_from_b
# gets the diff of two functions in the binaries
def get_function_diff(self, function_addr_a, function_addr_b):
"""
:param function_addr_a: The address of the first function (in the first binary)
:param function_addr_b: The address of the second function (in the second binary)
:returns: the FunctionDiff of the two functions
"""
pair = (function_addr_a, function_addr_b)
if pair not in self._function_diffs:
function_a = self.cfg_a.kb.functions.function(function_addr_a)
function_b = self.cfg_b.kb.functions.function(function_addr_b)
self._function_diffs[pair] = FunctionDiff(function_a, function_b, self)
return self._function_diffs[pair]
@staticmethod
def _compute_function_attributes(cfg):
"""
:param cfg: An angr CFG object
:returns: a dictionary of function addresses to tuples of attributes
"""
# the attributes we use are the number of basic blocks, number of edges, and number of subfunction calls
attributes = dict()
all_funcs = set(cfg.kb.callgraph.nodes())
for function_addr in cfg.kb.functions:
# skip syscalls and functions which are None in the cfg
if cfg.kb.functions.function(function_addr) is None or cfg.kb.functions.function(function_addr).is_syscall:
continue
if cfg.kb.functions.function(function_addr) is not None:
normalized_funtion = NormalizedFunction(cfg.kb.functions.function(function_addr))
number_of_basic_blocks = len(normalized_funtion.graph.nodes())
number_of_edges = len(normalized_funtion.graph.edges())
else:
number_of_basic_blocks = 0
number_of_edges = 0
if function_addr in all_funcs:
number_of_subfunction_calls = len(list(cfg.kb.callgraph.successors(function_addr)))
else:
number_of_subfunction_calls = 0
attributes[function_addr] = (number_of_basic_blocks, number_of_edges, number_of_subfunction_calls)
return attributes
def _get_call_site_matches(self, func_a, func_b):
possible_matches = set()
# Make sure those functions are not SimProcedures
f_a = self.cfg_a.kb.functions.function(func_a)
f_b = self.cfg_b.kb.functions.function(func_b)
if f_a.startpoint is None or f_b.startpoint is None:
return possible_matches
fd = self.get_function_diff(func_a, func_b)
basic_block_matches = fd.block_matches
function_a = fd._function_a
function_b = fd._function_b
for (a, b) in basic_block_matches:
if a in function_a.call_sites and b in function_b.call_sites:
# add them in order
for target_a, target_b in zip(function_a.call_sites[a], function_b.call_sites[b]):
possible_matches.add((target_a, target_b))
# add them in reverse, since if a new call was added the ordering from each side
# will remain constant until the change
for target_a, target_b in zip(reversed(function_a.call_sites[a]),
reversed(function_b.call_sites[b])):
possible_matches.add((target_a, target_b))
return possible_matches
def _get_plt_matches(self):
plt_matches = []
for name, addr in self.project.loader.main_object.plt.items():
if name in self._p2.loader.main_object.plt:
plt_matches.append((addr, self._p2.loader.main_object.plt[name]))
# in the case of sim procedures the actual sim procedure might be in the interfunction graph, not the plt entry
func_to_addr_a = dict()
func_to_addr_b = dict()
for (k, hook) in self.project._sim_procedures.items():
if "resolves" in hook.kwargs:
func_to_addr_a[hook.kwargs['resolves']] = k
for (k, hook) in self._p2._sim_procedures.items():
if "resolves" in hook.kwargs:
func_to_addr_b[hook.kwargs['resolves']] = k
for name, addr in func_to_addr_a.items():
if name in func_to_addr_b:
plt_matches.append((addr, func_to_addr_b[name]))
# remove ones that aren't in the interfunction graph, because these seem to not be consistent
all_funcs_a = set(self.cfg_a.kb.callgraph.nodes())
all_funcs_b = set(self.cfg_b.kb.callgraph.nodes())
plt_matches = [x for x in plt_matches if x[0] in all_funcs_a and x[1] in all_funcs_b]
return plt_matches
def _get_name_matches(self):
names_to_addrs_a = defaultdict(list)
for f in self.cfg_a.functions.values():
if not f.name.startswith("sub_"):
names_to_addrs_a[f.name].append(f.addr)
names_to_addrs_b = defaultdict(list)
for f in self.cfg_b.functions.values():
if not f.name.startswith("sub_"):
names_to_addrs_b[f.name].append(f.addr)
name_matches = []
for name, addrs in names_to_addrs_a.items():
if name in names_to_addrs_b:
for addr_a, addr_b in zip(addrs, names_to_addrs_b[name]):
# if binary a and binary b have different numbers of functions with the same name, we will see them
# in unmatched functions in the end.
name_matches.append((addr_a, addr_b))
return name_matches
def _compute_diff(self):
# get the attributes for all functions
self.attributes_a = self._compute_function_attributes(self.cfg_a)
self.attributes_b = self._compute_function_attributes(self.cfg_b)
# get the initial matches
initial_matches = self._get_plt_matches()
initial_matches += self._get_name_matches()
initial_matches += self._get_function_matches(self.attributes_a, self.attributes_b)
for (a, b) in initial_matches:
l.debug("Initially matched (%#x, %#x)", a, b)
# Use a queue so we process matches in the order that they are found
to_process = deque(initial_matches)
# Keep track of which matches we've already added to the queue
processed_matches = set((x, y) for (x, y) in initial_matches)
# Keep a dict of current matches, which will be updated if better matches are found
matched_a = dict()
matched_b = dict()
for (x, y) in processed_matches:
matched_a[x] = y
matched_b[y] = x
callgraph_a_nodes = set(self.cfg_a.kb.callgraph.nodes())
callgraph_b_nodes = set(self.cfg_b.kb.callgraph.nodes())
# while queue is not empty
while to_process:
(func_a, func_b) = to_process.pop()
l.debug("Processing (%#x, %#x)", func_a, func_b)
# we could find new matches in the successors or predecessors of functions
if not self.project.loader.main_object.contains_addr(func_a):
continue
if not self._p2.loader.main_object.contains_addr(func_b):
continue
func_a_succ = self.cfg_a.kb.callgraph.successors(func_a) if func_a in callgraph_a_nodes else []
func_b_succ = self.cfg_b.kb.callgraph.successors(func_b) if func_b in callgraph_b_nodes else []
func_a_pred = self.cfg_a.kb.callgraph.predecessors(func_a) if func_a in callgraph_a_nodes else []
func_b_pred = self.cfg_b.kb.callgraph.predecessors(func_b) if func_b in callgraph_b_nodes else []
# get possible new matches
new_matches = set(self._get_function_matches(self.attributes_a, self.attributes_b,
func_a_succ, func_b_succ))
new_matches |= set(self._get_function_matches(self.attributes_a, self.attributes_b,
func_a_pred, func_b_pred))
# could also find matches as function calls of matched basic blocks
new_matches.update(self._get_call_site_matches(func_a, func_b))
# for each of the possible new matches add it if it improves the matching
for (x, y) in new_matches:
# skip none functions and syscalls
func_a = self.cfg_a.kb.functions.function(x)
if func_a is None or func_a.is_simprocedure or func_a.is_syscall:
continue
func_b = self.cfg_b.kb.functions.function(y)
if func_b is None or func_b.is_simprocedure or func_b.is_syscall:
continue
if (x, y) not in processed_matches:
processed_matches.add((x, y))
# if it's a better match than what we already have use it
l.debug("Checking function match %s, %s", hex(x), hex(y))
if _is_better_match(x, y, matched_a, matched_b, self.attributes_a, self.attributes_b):
l.debug("Adding potential match %s, %s", hex(x), hex(y))
if x in matched_a:
old_match = matched_a[x]
del matched_b[old_match]
l.debug("Removing previous match (%#x, %#x)", x, old_match)
if y in matched_b:
old_match = matched_b[y]
del matched_a[old_match]
l.debug("Removing previous match (%#x, %#x)", old_match, y)
matched_a[x] = y
matched_b[y] = x
to_process.appendleft((x, y))
# reformat matches into a set of pairs
self.function_matches = set()
for x,y in matched_a.items():
# only keep if the pair is in the binary ranges
if self.project.loader.main_object.contains_addr(x) and self._p2.loader.main_object.contains_addr(y):
self.function_matches.add((x, y))
# get the unmatched functions
self._unmatched_functions_from_a = set(x for x in self.attributes_a.keys() if x not in matched_a)
self._unmatched_functions_from_b = set(x for x in self.attributes_b.keys() if x not in matched_b)
# remove unneeded function diffs
for (x, y) in dict(self._function_diffs):
if (x, y) not in self.function_matches:
del self._function_diffs[(x, y)]
@staticmethod
def _get_function_matches(attributes_a, attributes_b, filter_set_a=None, filter_set_b=None):
"""
:param attributes_a: A dict of functions to their attributes
:param attributes_b: A dict of functions to their attributes
The following parameters are optional.
:param filter_set_a: A set to limit attributes_a to the functions in this set.
:param filter_set_b: A set to limit attributes_b to the functions in this set.
:returns: A list of tuples of matching objects.
"""
# get the attributes that are in the sets
if filter_set_a is None:
filtered_attributes_a = {k: v for k, v in attributes_a.items()}
else:
filtered_attributes_a = {k: v for k, v in attributes_a.items() if k in filter_set_a}
if filter_set_b is None:
filtered_attributes_b = {k: v for k, v in attributes_b.items()}
else:
filtered_attributes_b = {k: v for k, v in attributes_b.items() if k in filter_set_b}
# get closest
closest_a = _get_closest_matches(filtered_attributes_a, filtered_attributes_b)
closest_b = _get_closest_matches(filtered_attributes_b, filtered_attributes_a)
# a match (x,y) is good if x is the closest to y and y is the closest to x
matches = []
for a in closest_a:
if len(closest_a[a]) == 1:
match = closest_a[a][0]
if len(closest_b[match]) == 1 and closest_b[match][0] == a:
matches.append((a, match))
return matches
from angr.analyses import AnalysesHub
AnalysesHub.register_default('BinDiff', BinDiff)
| |
from scipy.misc import imread, imresize, imsave, fromimage, toimage
from scipy.optimize import fmin_l_bfgs_b
import scipy.interpolate
import scipy.ndimage
import numpy as np
import time
import argparse
import warnings
from sklearn.feature_extraction.image import reconstruct_from_patches_2d, extract_patches_2d
from keras.models import Model
from keras.layers import Input
from keras.layers.convolutional import Conv2D, AveragePooling2D, MaxPooling2D
from keras import backend as K
from keras.utils.data_utils import get_file
from keras.utils.layer_utils import convert_all_kernels_in_model
THEANO_WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.1/vgg16_weights_th_dim_ordering_th_kernels_notop.h5'
TF_WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.1/vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5'
TH_19_WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.1/vgg19_weights_th_dim_ordering_th_kernels_notop.h5'
TF_19_WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.1/vgg19_weights_tf_dim_ordering_tf_kernels_notop.h5'
parser = argparse.ArgumentParser(description='Neural style transfer with Keras.')
parser.add_argument('base_image_path', metavar='base', type=str,
help='Path to the image to transform.')
parser.add_argument('style_image_paths', metavar='ref', nargs='+', type=str,
help='Path to the style reference image.')
parser.add_argument('result_prefix', metavar='res_prefix', type=str,
help='Prefix for the saved results.')
parser.add_argument("--image_size", dest="img_size", default=400, type=int,
help='Minimum image size')
parser.add_argument("--content_weight", dest="content_weight", default=0.025, type=float,
help="Weight of content")
parser.add_argument("--style_weight", dest="style_weight", nargs='+', default=[1], type=float,
help="Weight of style, can be multiple for multiple styles")
parser.add_argument("--total_variation_weight", dest="tv_weight", default=8.5e-5, type=float,
help="Total Variation weight")
parser.add_argument("--style_scale", dest="style_scale", default=1.0, type=float,
help="Scale the weighing of the style")
parser.add_argument("--num_iter", dest="num_iter", default=10, type=int,
help="Number of iterations")
parser.add_argument("--content_loss_type", default=0, type=int,
help='Can be one of 0, 1 or 2. Readme contains the required information of each mode.')
parser.add_argument("--content_layer", dest="content_layer", default="conv5_2", type=str,
help="Content layer used for content loss.")
parser.add_argument("--init_image", dest="init_image", default="content", type=str,
help="Initial image used to generate the final image. Options are 'content', 'noise', or 'gray'")
def _calc_patch_grid_dims(shape, patch_size, patch_stride):
x_w, x_h, x_c = shape
num_rows = 1 + (x_h - patch_size) // patch_stride
num_cols = 1 + (x_w - patch_size) // patch_stride
return num_rows, num_cols
def make_patch_grid(x, patch_size, patch_stride=1):
'''x shape: (num_channels, rows, cols)'''
x = x.transpose(2, 1, 0)
patches = extract_patches_2d(x, (patch_size, patch_size))
x_w, x_h, x_c = x.shape
num_rows, num_cols = _calc_patch_grid_dims(x.shape, patch_size, patch_stride)
patches = patches.reshape((num_rows, num_cols, patch_size, patch_size, x_c))
patches = patches.transpose((0, 1, 4, 2, 3))
#patches = np.rollaxis(patches, -1, 2)
return patches
def combine_patches_grid(in_patches, out_shape):
'''Reconstruct an image from these `patches`
input shape: (rows, cols, channels, patch_row, patch_col)
'''
num_rows, num_cols = in_patches.shape[:2]
num_channels = in_patches.shape[-3]
patch_size = in_patches.shape[-1]
num_patches = num_rows * num_cols
in_patches = np.reshape(in_patches, (num_patches, num_channels, patch_size, patch_size)) # (patches, channels, pr, pc)
in_patches = np.transpose(in_patches, (0, 2, 3, 1)) # (patches, p, p, channels)
recon = reconstruct_from_patches_2d(in_patches, out_shape)
return recon.transpose(2, 1, 0)
class PatchMatcher(object):
'''A matcher of image patches inspired by the PatchMatch algorithm.
image shape: (width, height, channels)
'''
def __init__(self, input_shape, target_img, patch_size=1, patch_stride=1, jump_size=0.5,
num_propagation_steps=5, num_random_steps=5, random_max_radius=1.0, random_scale=0.5):
self.input_shape = input_shape
self.patch_size = patch_size
self.patch_stride = patch_stride
self.jump_size = jump_size
self.num_propagation_steps = num_propagation_steps
self.num_random_steps = num_random_steps
self.random_max_radius = random_max_radius
self.random_scale = random_scale
self.num_input_rows, self.num_input_cols = _calc_patch_grid_dims(input_shape, patch_size, patch_stride)
self.target_patches = make_patch_grid(target_img, patch_size)
self.target_patches_normed = self.normalize_patches(self.target_patches)
self.coords = np.random.uniform(0.0, 1.0, # TODO: switch to pixels
(2, self.num_input_rows, self.num_input_cols))# * [[[self.num_input_rows]],[[self.num_input_cols]]]
self.similarity = np.zeros(input_shape[:2:-1], dtype ='float32')
self.min_propagration_row = 1.0 / self.num_input_rows
self.min_propagration_col = 1.0 / self.num_input_cols
self.delta_row = np.array([[[self.min_propagration_row]], [[0.0]]])
self.delta_col = np.array([[[0.0]], [[self.min_propagration_col]]])
def update(self, input_img, reverse_propagation=False):
input_patches = self.get_patches_for(input_img)
self.update_with_patches(self.normalize_patches(input_patches), reverse_propagation=reverse_propagation)
def update_with_patches(self, input_patches, reverse_propagation=False):
self._propagate(input_patches, reverse_propagation=reverse_propagation)
self._random_update(input_patches)
def get_patches_for(self, img):
return make_patch_grid(img, self.patch_size)
def normalize_patches(self, patches):
norm = np.sqrt(np.sum(np.square(patches), axis=(2, 3, 4), keepdims=True))
return patches / norm
def _propagate(self, input_patches, reverse_propagation=False):
if reverse_propagation:
roll_direction = 1
else:
roll_direction = -1
sign = float(roll_direction)
for step_i in range(self.num_propagation_steps):
new_coords = self.clip_coords(np.roll(self.coords, roll_direction, 1) + self.delta_row * sign)
coords_row, similarity_row = self.eval_state(new_coords, input_patches)
new_coords = self.clip_coords(np.roll(self.coords, roll_direction, 2) + self.delta_col * sign)
coords_col, similarity_col = self.eval_state(new_coords, input_patches)
self.coords, self.similarity = self.take_best(coords_row, similarity_row, coords_col, similarity_col)
def _random_update(self, input_patches):
for alpha in range(1, self.num_random_steps + 1): # NOTE this should actually stop when the move is < 1
new_coords = self.clip_coords(self.coords + np.random.uniform(-self.random_max_radius, self.random_max_radius, self.coords.shape) * self.random_scale ** alpha)
self.coords, self.similarity = self.eval_state(new_coords, input_patches)
def eval_state(self, new_coords, input_patches):
new_similarity = self.patch_similarity(input_patches, new_coords)
delta_similarity = new_similarity - self.similarity
coords = np.where(delta_similarity > 0, new_coords, self.coords)
best_similarity = np.where(delta_similarity > 0, new_similarity, self.similarity)
return coords, best_similarity
def take_best(self, coords_a, similarity_a, coords_b, similarity_b):
delta_similarity = similarity_a - similarity_b
best_coords = np.where(delta_similarity > 0, coords_a, coords_b)
best_similarity = np.where(delta_similarity > 0, similarity_a, similarity_b)
return best_coords, best_similarity
def patch_similarity(self, source, coords):
'''Check the similarity of the patches specified in coords.'''
target_vals = self.lookup_coords(self.target_patches_normed, coords)
err = source * target_vals
return np.sum(err, axis=(2, 3, 4))
def clip_coords(self, coords):
# TODO: should this all be in pixel space?
coords = np.clip(coords, 0.0, 1.0)
return coords
def lookup_coords(self, x, coords):
x_shape = np.expand_dims(np.expand_dims(x.shape, -1), -1)
i_coords = np.round(coords * (x_shape[:2] - 1)).astype('int32')
return x[i_coords[0], i_coords[1]]
def get_reconstruction(self, patches=None, combined=None):
if combined is not None:
patches = make_patch_grid(combined, self.patch_size)
if patches is None:
patches = self.target_patches
patches = self.lookup_coords(patches, self.coords)
recon = combine_patches_grid(patches, self.input_shape)
return recon
def scale(self, new_shape, new_target_img):
'''Create a new matcher of the given shape and replace its
state with a scaled up version of the current matcher's state.
'''
new_matcher = PatchMatcher(new_shape, new_target_img, patch_size=self.patch_size,
patch_stride=self.patch_stride, jump_size=self.jump_size,
num_propagation_steps=self.num_propagation_steps,
num_random_steps=self.num_random_steps,
random_max_radius=self.random_max_radius,
random_scale=self.random_scale)
new_matcher.coords = congrid(self.coords, new_matcher.coords.shape, method='neighbour')
new_matcher.similarity = congrid(self.similarity, new_matcher.coords.shape, method='neighbour')
return new_matcher
def congrid(a, newdims, method='linear', centre=False, minusone=False):
'''Arbitrary resampling of source array to new dimension sizes.
Currently only supports maintaining the same number of dimensions.
To use 1-D arrays, first promote them to shape (x,1).
Uses the same parameters and creates the same co-ordinate lookup points
as IDL''s congrid routine, which apparently originally came from a VAX/VMS
routine of the same name.
method:
neighbour - closest value from original data
nearest and linear - uses n x 1-D interpolations using
scipy.interpolate.interp1d
(see Numerical Recipes for validity of use of n 1-D interpolations)
spline - uses ndimage.map_coordinates
centre:
True - interpolation points are at the centres of the bins
False - points are at the front edge of the bin
minusone:
For example- inarray.shape = (i,j) & new dimensions = (x,y)
False - inarray is resampled by factors of (i/x) * (j/y)
True - inarray is resampled by(i-1)/(x-1) * (j-1)/(y-1)
This prevents extrapolation one element beyond bounds of input array.
'''
if not a.dtype in [np.float64, np.float32]:
a = np.cast[float](a)
m1 = np.cast[int](minusone)
ofs = np.cast[int](centre) * 0.5
old = np.array( a.shape )
ndims = len( a.shape )
if len( newdims ) != ndims:
print ("[congrid] dimensions error. "
"This routine currently only support "
"rebinning to the same number of dimensions.")
return None
newdims = np.asarray( newdims, dtype=float )
dimlist = []
if method == 'neighbour':
for i in range( ndims ):
base = np.indices(newdims)[i]
dimlist.append( (old[i] - m1) / (newdims[i] - m1) \
* (base + ofs) - ofs )
cd = np.array( dimlist ).round().astype(int)
newa = a[list( cd )]
return newa
elif method in ['nearest','linear']:
# calculate new dims
for i in range( ndims ):
base = np.arange( newdims[i] )
dimlist.append( (old[i] - m1) / (newdims[i] - m1) \
* (base + ofs) - ofs )
# specify old dims
olddims = [np.arange(i, dtype = np.float) for i in list( a.shape )]
# first interpolation - for ndims = any
mint = scipy.interpolate.interp1d( olddims[-1], a, kind=method )
newa = mint( dimlist[-1] )
trorder = [ndims - 1] + range( ndims - 1 )
for i in range( ndims - 2, -1, -1 ):
newa = newa.transpose( trorder )
mint = scipy.interpolate.interp1d( olddims[i], newa, kind=method )
newa = mint( dimlist[i] )
if ndims > 1:
# need one more transpose to return to original dimensions
newa = newa.transpose( trorder )
return newa
elif method in ['spline']:
oslices = [ slice(0,j) for j in old ]
oldcoords = np.ogrid[oslices]
nslices = [ slice(0,j) for j in list(newdims) ]
newcoords = np.mgrid[nslices]
newcoords_dims = [i for i in range(np.rank(newcoords))]
#make first index last
newcoords_dims.append(newcoords_dims.pop(0))
newcoords_tr = newcoords.transpose(newcoords_dims)
# makes a view that affects newcoords
newcoords_tr += ofs
deltas = (np.asarray(old) - m1) / (newdims - m1)
newcoords_tr *= deltas
newcoords_tr -= ofs
newa = scipy.ndimage.map_coordinates(a, newcoords)
return newa
else:
print("Congrid error: Unrecognized interpolation type.\n",
"Currently only \'neighbour\', \'nearest\',\'linear\',",
"and \'spline\' are supported.")
return None
args = parser.parse_args()
base_image_path = args.base_image_path
style_reference_image_paths = args.style_image_paths
style_image_paths = [path for path in args.style_image_paths]
result_prefix = args.result_prefix
content_weight = args.content_weight
total_variation_weight = args.tv_weight
img_width = img_height = 0
img_WIDTH = img_HEIGHT = 0
aspect_ratio = 0
read_mode = "color"
style_weights = []
if len(style_image_paths) != len(args.style_weight):
weight_sum = sum(args.style_weight) * args.style_scale
count = len(style_image_paths)
for i in range(len(style_image_paths)):
style_weights.append(weight_sum / count)
else:
style_weights = [weight*args.style_scale for weight in args.style_weight]
def pooling_func(x):
# return AveragePooling2D((2, 2), strides=(2, 2))(x)
return MaxPooling2D((2, 2), strides=(2, 2))(x)
#start proc_img
def preprocess_image(image_path, load_dims=False):
global img_width, img_height, img_WIDTH, img_HEIGHT, aspect_ratio
mode = "RGB"
# mode = "RGB" if read_mode == "color" else "L"
img = imread(image_path, mode=mode) # Prevents crashes due to PNG images (ARGB)
if load_dims:
img_WIDTH = img.shape[0]
img_HEIGHT = img.shape[1]
aspect_ratio = float(img_HEIGHT) / img_WIDTH
img_width = args.img_size
img_height = int(img_width * aspect_ratio)
img = imresize(img, (img_width, img_height)).astype('float32')
# RGB -> BGR
img = img[:, :, ::-1]
img[:, :, 0] -= 103.939
img[:, :, 1] -= 116.779
img[:, :, 2] -= 123.68
if K.image_dim_ordering() == "th":
img = img.transpose((2, 0, 1)).astype('float32')
img = np.expand_dims(img, axis=0)
return img
# util function to convert a tensor into a valid image
def deprocess_image(x):
if K.image_dim_ordering() == "th":
x = x.reshape((3, img_width, img_height))
x = x.transpose((1, 2, 0))
else:
x = x.reshape((img_width, img_height, 3))
x[:, :, 0] += 103.939
x[:, :, 1] += 116.779
x[:, :, 2] += 123.68
# BGR -> RGB
x = x[:, :, ::-1]
x = np.clip(x, 0, 255).astype('uint8')
return x
base_image = K.variable(preprocess_image(base_image_path, True))
style_reference_images = [K.variable(preprocess_image(path)) for path in style_image_paths]
# this will contain our generated image
combination_image = K.placeholder((1, img_width, img_height, 3)) # tensorflow
image_tensors = [base_image]
for style_image_tensor in style_reference_images:
image_tensors.append(style_image_tensor)
image_tensors.append(combination_image)
nb_tensors = len(image_tensors)
nb_style_images = nb_tensors - 2 # Content and Output image not considered
# combine the various images into a single Keras tensor
input_tensor = K.concatenate(image_tensors, axis=0)
if K.image_dim_ordering() == "th":
shape = (nb_tensors, 3, img_width, img_height)
else:
shape = (nb_tensors, img_width, img_height, 3)
#build the model
model_input = Input(tensor=input_tensor, shape=shape)
# build the VGG16 network with our 3 images as input
x = Conv2D(filters=64, kernel_size=(3, 3), activation='relu', name='conv1_1', padding='same')(model_input)
x = Conv2D(64, (3, 3), activation='relu', name='conv1_2', padding='same')(x)
x = pooling_func(x)
x = Conv2D(128, (3, 3), activation='relu', name='conv2_1', padding='same')(x)
x = Convolution2D(128, (3, 3), activation='relu', name='conv2_2', padding='same')(x)
x = pooling_func(x)
x = Conv2D(256, (3, 3), activation='relu', name='conv3_1', padding='same')(x)
x = Conv2D(256, (3, 3), activation='relu', name='conv3_2', padding='same')(x)
x = Conv2D(256, (3, 3), activation='relu', name='conv3_3', padding='same')(x)
x = pooling_func(x)
x = Conv2D(512, (3, 3), activation='relu', name='conv4_1', padding='same')(x)
x = Conv2D(512, (3, 3), activation='relu', name='conv4_2', padding='same')(x)
x = Conv2D(512, (3, 3), activation='relu', name='conv4_3', padding='same')(x)
x = pooling_func(x)
x = Conv2D(512, (3, 3), activation='relu', name='conv5_1', padding='same')(x)
x = Conv2D(512, (3, 3), activation='relu', name='conv5_2', padding='same')(x)
x = Conv2D(512, (3, 3), activation='relu', name='conv5_3', padding='same')(x)
x = pooling_func(x)
model = Model(model_input, x)
if K.image_dim_ordering() == "th":
weights = get_file('vgg16_weights_th_dim_ordering_th_kernels_notop.h5', THEANO_WEIGHTS_PATH_NO_TOP, cache_subdir='models')
else:
weights = get_file('vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5', TF_WEIGHTS_PATH_NO_TOP, cache_subdir='models')
print("Weights Path: ", weights)
model.load_weights(weights)
print('Model loaded.')
# get the symbolic outputs of each "key" layer (we gave them unique names).
outputs_dict = dict([(layer.name, layer.output) for layer in model.layers])
shape_dict = dict([(layer.name, layer.output_shape) for layer in model.layers])
# the 3rd loss function, total variation loss,
# designed to keep the generated image locally coherent
def total_variation_loss(x):
assert K.ndim(x) == 4
a = K.square(x[:, :img_width - 1, :img_height - 1, :] - x[:, 1:, :img_height - 1, :])
b = K.square(x[:, :img_width - 1, :img_height - 1, :] - x[:, :img_width - 1, 1:, :])
return K.sum(K.pow(a + b, 1.25))
def make_patches(x, patch_size, patch_stride):
from theano.tensor.nnet.neighbours import images2neibs
'''Break image `x` up into a bunch of patches.'''
patches = images2neibs(x,
(patch_size, patch_size), (patch_stride, patch_stride),
mode='valid')
# neibs are sorted per-channel
patches = K.reshape(patches, (K.shape(x)[1], K.shape(patches)[0] // K.shape(x)[1], patch_size, patch_size))
patches = K.permute_dimensions(patches, (1, 0, 2, 3))
patches_norm = K.sqrt(K.sum(K.square(patches), axis=(1,2,3), keepdims=True))
return patches, patches_norm
def find_patch_matches(comb, comb_norm, ref):
'''For each patch in combination, find the best matching patch in reference'''
# we want cross-correlation here so flip the kernels
convs = K.conv2d(comb, ref[:, :, ::-1, ::-1], border_mode='valid')
argmax = K.argmax(convs / comb_norm, axis=1)
return argmax
def mrf_loss(source, combination, patch_size=3, patch_stride=1):
'''CNNMRF http://arxiv.org/pdf/1601.04589v1.pdf'''
# extract patches from feature maps
source = K.expand_dims(source, 0)
combination = K.expand_dims(combination, 0)
combination_patches, combination_patches_norm = make_patches(K.variable(combination).eval, patch_size, patch_stride)
source_patches, source_patches_norm = make_patches(K.variable(source).eval, patch_size, patch_stride)
# find best patches and calculate loss
patch_ids = find_patch_matches(combination_patches, combination_patches_norm, source_patches / source_patches_norm)
best_source_patches = K.reshape(source_patches[patch_ids], K.shape(combination_patches))
loss = K.sum(K.square(best_source_patches - combination_patches)) / patch_size ** 2
return loss
# an auxiliary loss function
# designed to maintain the "content" of the
# base image in the generated image
def content_loss(base, combination):
channel_dim = 0 if K.image_dim_ordering() == "th" else -1
channels = K.shape(base)[channel_dim]
size = img_width * img_height
if args.content_loss_type == 1:
multiplier = 1 / (2. * channels ** 0.5 * size ** 0.5)
elif args.content_loss_type == 2:
multiplier = 1 / (channels * size)
else:
multiplier = 1.
return multiplier * K.sum(K.square(combination - base))
# combine these loss functions into a single scalar
loss = K.variable(0.)
layer_features = outputs_dict[args.content_layer] # 'conv5_2' or 'conv4_2'
base_image_features = layer_features[0, :, :, :]
combination_features = layer_features[nb_tensors - 1, :, :, :]
loss += content_weight * content_loss(base_image_features,
combination_features)
channel_index = -1
#Style Loss calculation
mrf_layers = ['conv3_1', 'conv4_1']
# feature_layers = ['conv1_1', 'conv2_1', 'conv3_1', 'conv4_1', 'conv5_1']
for layer_name in mrf_layers:
output_features = outputs_dict[layer_name]
shape = shape_dict[layer_name]
combination_features = output_features[nb_tensors - 1, :, :, :]
style_features = output_features[1:nb_tensors - 1, :, :, :]
sl = []
for j in range(nb_style_images):
sl.append(mrf_loss(style_features[j], combination_features))
for j in range(nb_style_images):
loss += (style_weights[j] / len(mrf_layers)) * sl[j]
loss += total_variation_weight * total_variation_loss(combination_image)
# get the gradients of the generated image wrt the loss
grads = K.gradients(loss, combination_image)
outputs = [loss]
if type(grads) in {list, tuple}:
outputs += grads
else:
outputs.append(grads)
f_outputs = K.function([combination_image], outputs)
def eval_loss_and_grads(x):
x = x.reshape((1, img_width, img_height, 3))
outs = f_outputs([x])
loss_value = outs[0]
if len(outs[1:]) == 1:
grad_values = outs[1].flatten().astype('float64')
else:
grad_values = np.array(outs[1:]).flatten().astype('float64')
return loss_value, grad_values
# # this Evaluator class makes it possible
# # to compute loss and gradients in one pass
# # while retrieving them via two separate functions,
# # "loss" and "grads". This is done because scipy.optimize
# # requires separate functions for loss and gradients,
# # but computing them separately would be inefficient.
class Evaluator(object):
def __init__(self):
self.loss_value = None
self.grads_values = None
def loss(self, x):
assert self.loss_value is None
loss_value, grad_values = eval_loss_and_grads(x)
self.loss_value = loss_value
self.grad_values = grad_values
return self.loss_value
def grads(self, x):
assert self.loss_value is not None
grad_values = np.copy(self.grad_values)
self.loss_value = None
self.grad_values = None
return grad_values
evaluator = Evaluator()
# run scipy-based optimization (L-BFGS) over the pixels of the generated image
# so as to minimize the neural style loss
if "content" in args.init_image or "gray" in args.init_image:
x = preprocess_image(base_image_path, True)
elif "noise" in args.init_image:
x = np.random.uniform(0, 255, (1, img_width, img_height, 3)) - 128.
if K.image_dim_ordering() == "th":
x = x.transpose((0, 3, 1, 2))
else:
print("Using initial image : ", args.init_image)
x = preprocess_image(args.init_image)
num_iter = args.num_iter
prev_min_val = -1
# for scaled_img in img_pyramid:
# image_tensors = [base_image]
# for style_image_tensor in style_reference_images:
# image_tensors.append(style_image_tensor)
# image_tensors.append(combination_image)
# input_tensor = K.concatenate(image_tensors, axis=0)
# model_input = Input(tensor=input_tensor, shape=shape)
for img_scale in scaled_imgs:
image_tensors = [base_image]
for style_image_tensor in style_reference_images:
image_tensors.append(style_image_tensor)
image_tensors.append(combination_image)
# combine the various images into a single Keras tensor
input_tensor = K.concatenate(image_tensors, axis=0)
# build the model
model_input = Input(tensor=input_tensor, shape=shape)
for i in range(num_iter):
print("Starting iteration %d of %d" % ((i + 1), num_iter))
start_time = time.time()
x, min_val, info = fmin_l_bfgs_b(evaluator.loss, x.flatten(), fprime=evaluator.grads, maxfun=20)
if prev_min_val == -1:
prev_min_val = min_val
improvement = (prev_min_val - min_val) / prev_min_val * 100
print('Current loss value:', min_val, " Improvement : %0.3f" % improvement, "%")
prev_min_val = min_val
# save current generated image
img = deprocess_image(x.copy())
img_ht = int(img_width * aspect_ratio)
print("Rescaling Image to (%d, %d)" % (img_width, img_ht))
img = imresize(img, (img_width, img_ht), interp="bilinear")
fname = result_prefix + '_at_iteration_%d.png' % (i + 1)
imsave(fname, img)
end_time = time.time()
print('Image saved as', fname)
print('Iteration %d completed in %ds' % (i + 1, end_time - start_time))
| |
import json
import logging
from functools import lru_cache
import pyvat
from django.conf import settings
from django.contrib import auth
from django.core import exceptions as django_exceptions
from django.core.validators import RegexValidator
from django.db import models as django_models
from django.db import transaction
from django.db.models import Q
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from rest_framework import exceptions, serializers
from waldur_core.core import fields as core_fields
from waldur_core.core import models as core_models
from waldur_core.core import serializers as core_serializers
from waldur_core.core.clean_html import clean_html
from waldur_core.core.fields import MappedChoiceField
from waldur_core.media.serializers import ProtectedMediaSerializerMixin
from waldur_core.structure import models
from waldur_core.structure import permissions as structure_permissions
from waldur_core.structure.exceptions import (
ServiceBackendError,
ServiceBackendNotImplemented,
)
from waldur_core.structure.filters import filter_visible_users
from waldur_core.structure.managers import filter_queryset_for_user
from waldur_core.structure.models import CUSTOMER_DETAILS_FIELDS
from waldur_core.structure.registry import get_resource_type, get_service_type
User = auth.get_user_model()
logger = logging.getLogger(__name__)
def get_options_serializer_class(service_type):
return next(
cls
for cls in ServiceOptionsSerializer.get_subclasses()
if get_service_type(cls) == service_type
)
@lru_cache
def get_resource_serializer_class(resource_type):
try:
return next(
cls
for cls in BaseResourceSerializer.get_subclasses()
if get_resource_type(cls.Meta.model) == resource_type
and get_service_type(cls) is not None
)
except StopIteration:
return None
class PermissionFieldFilteringMixin:
"""
Mixin allowing to filter related fields.
In order to constrain the list of entities that can be used
as a value for the field:
1. Make sure that the entity in question has corresponding
Permission class defined.
2. Implement `get_filtered_field_names()` method
in the class that this mixin is mixed into and return
the field in question from that method.
"""
def get_fields(self):
fields = super(PermissionFieldFilteringMixin, self).get_fields()
try:
request = self.context['request']
user = request.user
except (KeyError, AttributeError):
return fields
for field_name in self.get_filtered_field_names():
if field_name not in fields: # field could be not required by user
continue
field = fields[field_name]
field.queryset = filter_queryset_for_user(field.queryset, user)
return fields
def get_filtered_field_names(self):
raise NotImplementedError(
'Implement get_filtered_field_names() ' 'to return list of filtered fields'
)
class PermissionListSerializer(serializers.ListSerializer):
"""
Allows to filter related queryset by user.
Counterpart of PermissionFieldFilteringMixin.
In order to use it set Meta.list_serializer_class. Example:
>>> class PermissionProjectSerializer(BasicProjectSerializer):
>>> class Meta(BasicProjectSerializer.Meta):
>>> list_serializer_class = PermissionListSerializer
>>>
>>> class CustomerSerializer(serializers.HyperlinkedModelSerializer):
>>> projects = PermissionProjectSerializer(many=True, read_only=True)
"""
def to_representation(self, data):
try:
request = self.context['request']
user = request.user
except (KeyError, AttributeError):
pass
else:
if isinstance(data, (django_models.Manager, django_models.query.QuerySet)):
data = filter_queryset_for_user(data.all(), user)
return super(PermissionListSerializer, self).to_representation(data)
class BasicUserSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = User
fields = (
'url',
'uuid',
'username',
'full_name',
'native_name',
)
extra_kwargs = {
'url': {'lookup_field': 'uuid'},
}
class BasicProjectSerializer(core_serializers.BasicInfoSerializer):
class Meta(core_serializers.BasicInfoSerializer.Meta):
model = models.Project
class PermissionProjectSerializer(BasicProjectSerializer):
class Meta(BasicProjectSerializer.Meta):
list_serializer_class = PermissionListSerializer
class ProjectTypeSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = models.ProjectType
fields = ('uuid', 'url', 'name', 'description')
extra_kwargs = {
'url': {'lookup_field': 'uuid', 'view_name': 'project_type-detail'},
}
class ProjectDetailsSerializerMixin(serializers.Serializer):
def validate_description(self, value):
return clean_html(value.strip())
def validate_end_date(self, end_date):
if end_date and end_date < timezone.datetime.today().date():
raise serializers.ValidationError(
{'end_date': _('Cannot be earlier than the current date.')}
)
return end_date
class ProjectSerializer(
ProjectDetailsSerializerMixin,
core_serializers.RestrictedSerializerMixin,
PermissionFieldFilteringMixin,
core_serializers.AugmentedSerializerMixin,
serializers.HyperlinkedModelSerializer,
):
class Meta:
model = models.Project
fields = (
'url',
'uuid',
'name',
'customer',
'customer_uuid',
'customer_name',
'customer_native_name',
'customer_abbreviation',
'description',
'created',
'type',
'type_name',
'type_uuid',
'backend_id',
'end_date',
'oecd_fos_2007_code',
)
extra_kwargs = {
'url': {'lookup_field': 'uuid'},
'customer': {'lookup_field': 'uuid'},
'type': {'lookup_field': 'uuid', 'view_name': 'project_type-detail'},
}
related_paths = {
'customer': ('uuid', 'name', 'native_name', 'abbreviation'),
'type': ('name', 'uuid'),
}
@staticmethod
def eager_load(queryset, request=None):
related_fields = (
'uuid',
'name',
'created',
'description',
'customer__uuid',
'customer__name',
'customer__native_name',
'customer__abbreviation',
)
return queryset.select_related('customer').only(*related_fields)
def get_filtered_field_names(self):
return ('customer',)
def validate(self, attrs):
customer = (
attrs.get('customer') if not self.instance else self.instance.customer
)
end_date = attrs.get('end_date')
if end_date:
structure_permissions.is_owner(self.context['request'], None, customer)
return attrs
class CountrySerializerMixin(serializers.Serializer):
COUNTRIES = core_fields.COUNTRIES
if settings.WALDUR_CORE.get('COUNTRIES'):
COUNTRIES = [
item for item in COUNTRIES if item[0] in settings.WALDUR_CORE['COUNTRIES']
]
country = serializers.ChoiceField(
required=False, choices=COUNTRIES, allow_blank=True
)
country_name = serializers.ReadOnlyField(source='get_country_display')
class CustomerSerializer(
ProtectedMediaSerializerMixin,
CountrySerializerMixin,
core_serializers.RestrictedSerializerMixin,
core_serializers.AugmentedSerializerMixin,
serializers.HyperlinkedModelSerializer,
):
projects = PermissionProjectSerializer(many=True, read_only=True)
owners = BasicUserSerializer(source='get_owners', many=True, read_only=True)
support_users = BasicUserSerializer(
source='get_support_users', many=True, read_only=True
)
service_managers = BasicUserSerializer(
source='get_service_managers', many=True, read_only=True
)
display_name = serializers.ReadOnlyField(source='get_display_name')
division_name = serializers.ReadOnlyField(source='division.name')
division_uuid = serializers.ReadOnlyField(source='division.uuid')
division_parent_name = serializers.ReadOnlyField(source='division.parent.name')
division_parent_uuid = serializers.ReadOnlyField(source='division.parent.uuid')
division_type_name = serializers.ReadOnlyField(source='division.type.name')
division_type_uuid = serializers.ReadOnlyField(source='division.type.uuid')
class Meta:
model = models.Customer
fields = (
'url',
'uuid',
'created',
'division',
'division_name',
'division_uuid',
'division_parent_name',
'division_parent_uuid',
'division_type_name',
'division_type_uuid',
'display_name',
'projects',
'owners',
'support_users',
'service_managers',
'backend_id',
'image',
'default_tax_percent',
'accounting_start_date',
) + CUSTOMER_DETAILS_FIELDS
staff_only_fields = (
'access_subnets',
'accounting_start_date',
'default_tax_percent',
'agreement_number',
'domain',
'division',
)
extra_kwargs = {
'url': {'lookup_field': 'uuid'},
'division': {'lookup_field': 'uuid'},
}
def get_fields(self):
fields = super(CustomerSerializer, self).get_fields()
try:
request = self.context['view'].request
user = request.user
except (KeyError, AttributeError):
return fields
if not user.is_staff:
for field_name in set(CustomerSerializer.Meta.staff_only_fields) & set(
fields.keys()
):
fields[field_name].read_only = True
return fields
def create(self, validated_data):
user = self.context['request'].user
if 'domain' not in validated_data:
# Staff can specify domain name on organization creation
validated_data['domain'] = user.organization
return super(CustomerSerializer, self).create(validated_data)
@staticmethod
def eager_load(queryset, request=None):
return queryset.prefetch_related('projects')
def validate(self, attrs):
country = attrs.get('country')
vat_code = attrs.get('vat_code')
if vat_code:
# Check VAT format
if not pyvat.is_vat_number_format_valid(vat_code, country):
raise serializers.ValidationError(
{'vat_code': _('VAT number has invalid format.')}
)
# Check VAT number in EU VAT Information Exchange System
# if customer is new or either VAT number or country of the customer has changed
if (
not self.instance
or self.instance.vat_code != vat_code
or self.instance.country != country
):
check_result = pyvat.check_vat_number(vat_code, country)
if check_result.is_valid:
attrs['vat_name'] = check_result.business_name
attrs['vat_address'] = check_result.business_address
if not attrs.get('contact_details'):
attrs['contact_details'] = attrs['vat_address']
elif check_result.is_valid is False:
raise serializers.ValidationError(
{'vat_code': _('VAT number is invalid.')}
)
else:
logger.debug(
'Unable to check VAT number %s for country %s. Error message: %s',
vat_code,
country,
check_result.log_lines,
)
raise serializers.ValidationError(
{'vat_code': _('Unable to check VAT number.')}
)
return attrs
class NestedCustomerSerializer(
core_serializers.AugmentedSerializerMixin,
core_serializers.HyperlinkedRelatedModelSerializer,
):
class Meta:
model = models.Customer
fields = ('uuid', 'url')
extra_kwargs = {
'url': {'lookup_field': 'uuid'},
}
class NestedProjectSerializer(
core_serializers.AugmentedSerializerMixin,
core_serializers.HyperlinkedRelatedModelSerializer,
):
class Meta:
model = models.Project
fields = ('uuid', 'url')
extra_kwargs = {
'url': {'lookup_field': 'uuid'},
}
class NestedProjectPermissionSerializer(serializers.ModelSerializer):
url = serializers.HyperlinkedRelatedField(
source='project',
lookup_field='uuid',
view_name='project-detail',
queryset=models.Project.objects.all(),
)
uuid = serializers.ReadOnlyField(source='project.uuid')
name = serializers.ReadOnlyField(source='project.name')
permission = serializers.HyperlinkedRelatedField(
source='pk',
view_name='project_permission-detail',
queryset=models.ProjectPermission.objects.all(),
)
class Meta:
model = models.ProjectPermission
fields = ['url', 'uuid', 'name', 'role', 'permission', 'expiration_time']
class CustomerUserSerializer(serializers.ModelSerializer):
role = serializers.ReadOnlyField()
is_service_manager = serializers.ReadOnlyField()
expiration_time = serializers.ReadOnlyField(source='perm.expiration_time')
permission = serializers.HyperlinkedRelatedField(
source='perm.pk', view_name='customer_permission-detail', read_only=True,
)
projects = NestedProjectPermissionSerializer(many=True, read_only=True)
class Meta:
model = User
fields = [
'url',
'uuid',
'username',
'full_name',
'email',
'role',
'permission',
'projects',
'is_service_manager',
'expiration_time',
]
extra_kwargs = {
'url': {'lookup_field': 'uuid'},
}
def to_representation(self, user):
customer = self.context['customer']
permission = models.CustomerPermission.objects.filter(
customer=customer, user=user, is_active=True
).first()
projects = models.ProjectPermission.objects.filter(
project__customer=customer, user=user, is_active=True
)
is_service_manager = customer.has_user(
user, role=models.CustomerRole.SERVICE_MANAGER
)
setattr(user, 'perm', permission)
setattr(user, 'role', permission and permission.role)
setattr(user, 'projects', projects)
setattr(user, 'is_service_manager', is_service_manager)
return super(CustomerUserSerializer, self).to_representation(user)
class ProjectUserSerializer(serializers.ModelSerializer):
role = serializers.ReadOnlyField()
expiration_time = serializers.ReadOnlyField(source='perm.expiration_time')
permission = serializers.HyperlinkedRelatedField(
source='perm.pk',
view_name='project_permission-detail',
queryset=models.ProjectPermission.objects.all(),
)
class Meta:
model = User
fields = [
'url',
'uuid',
'username',
'full_name',
'email',
'role',
'permission',
'expiration_time',
]
extra_kwargs = {
'url': {'lookup_field': 'uuid'},
}
def to_representation(self, user):
project = self.context['project']
permission = models.ProjectPermission.objects.filter(
project=project, user=user, is_active=True
).first()
setattr(user, 'perm', permission)
setattr(user, 'role', permission and permission.role)
return super(ProjectUserSerializer, self).to_representation(user)
class BasePermissionSerializer(
core_serializers.AugmentedSerializerMixin, serializers.HyperlinkedModelSerializer
):
class Meta:
fields = (
'user',
'user_full_name',
'user_native_name',
'user_username',
'user_uuid',
'user_email',
)
related_paths = {
'user': ('username', 'full_name', 'native_name', 'uuid', 'email'),
}
class BasicCustomerPermissionSerializer(BasePermissionSerializer):
class Meta(BasePermissionSerializer.Meta):
model = models.CustomerPermission
fields = (
'url',
'pk',
'role',
'customer_uuid',
'customer_name',
'customer_native_name',
'customer_abbreviation',
)
related_paths = dict(
customer=('name', 'native_name', 'abbreviation', 'uuid'),
**BasePermissionSerializer.Meta.related_paths
)
extra_kwargs = {
'customer': {
'view_name': 'customer-detail',
'lookup_field': 'uuid',
'queryset': models.Customer.objects.all(),
}
}
class CustomerPermissionSerializer(
PermissionFieldFilteringMixin, BasePermissionSerializer
):
class Meta(BasePermissionSerializer.Meta):
model = models.CustomerPermission
fields = (
'url',
'pk',
'role',
'created',
'expiration_time',
'created_by',
'customer',
'customer_uuid',
'customer_name',
'customer_native_name',
'customer_abbreviation',
) + BasePermissionSerializer.Meta.fields
related_paths = dict(
customer=('name', 'native_name', 'abbreviation', 'uuid'),
**BasePermissionSerializer.Meta.related_paths
)
protected_fields = ('customer', 'role', 'user', 'created_by', 'created')
extra_kwargs = {
'user': {
'view_name': 'user-detail',
'lookup_field': 'uuid',
'queryset': User.objects.all(),
},
'created_by': {
'view_name': 'user-detail',
'lookup_field': 'uuid',
'read_only': True,
},
'customer': {
'view_name': 'customer-detail',
'lookup_field': 'uuid',
'queryset': models.Customer.objects.all(),
},
}
def validate(self, data):
if not self.instance:
customer = data['customer']
user = data['user']
if customer.has_user(user):
raise serializers.ValidationError(
_('The fields customer and user must make a unique set.')
)
return data
def create(self, validated_data):
customer = validated_data['customer']
user = validated_data['user']
role = validated_data['role']
expiration_time = validated_data.get('expiration_time')
created_by = self.context['request'].user
permission, _ = customer.add_user(user, role, created_by, expiration_time)
return permission
def validate_expiration_time(self, value):
if value is not None and value < timezone.now():
raise serializers.ValidationError(
_('Expiration time should be greater than current time.')
)
return value
def get_filtered_field_names(self):
return ('customer',)
class CustomerPermissionLogSerializer(CustomerPermissionSerializer):
class Meta(CustomerPermissionSerializer.Meta):
view_name = 'customer_permission_log-detail'
class CustomerPermissionReviewSerializer(
core_serializers.AugmentedSerializerMixin, serializers.HyperlinkedModelSerializer
):
class Meta:
model = models.CustomerPermissionReview
view_name = 'customer_permission_review-detail'
fields = (
'url',
'uuid',
'reviewer_full_name',
'reviewer_uuid',
'customer_uuid',
'customer_name',
'is_pending',
'created',
'closed',
)
read_only_fields = (
'is_pending',
'closed',
)
related_paths = {
'reviewer': ('full_name', 'uuid'),
'customer': ('name', 'uuid'),
}
extra_kwargs = {
'url': {'lookup_field': 'uuid'},
}
class ProjectPermissionSerializer(
PermissionFieldFilteringMixin, BasePermissionSerializer
):
customer_name = serializers.ReadOnlyField(source='project.customer.name')
class Meta(BasePermissionSerializer.Meta):
model = models.ProjectPermission
fields = (
'url',
'pk',
'role',
'created',
'expiration_time',
'created_by',
'project',
'project_uuid',
'project_name',
'customer_name',
) + BasePermissionSerializer.Meta.fields
related_paths = dict(
project=('name', 'uuid'), **BasePermissionSerializer.Meta.related_paths
)
protected_fields = ('project', 'role', 'user', 'created_by', 'created')
extra_kwargs = {
'user': {
'view_name': 'user-detail',
'lookup_field': 'uuid',
'queryset': User.objects.all(),
},
'created_by': {
'view_name': 'user-detail',
'lookup_field': 'uuid',
'read_only': True,
},
'project': {
'view_name': 'project-detail',
'lookup_field': 'uuid',
'queryset': models.Project.objects.all(),
},
}
def validate(self, data):
if not self.instance:
project = data['project']
user = data['user']
if project.has_user(user):
raise serializers.ValidationError(
_('The fields project and user must make a unique set.')
)
return data
def create(self, validated_data):
project = validated_data['project']
user = validated_data['user']
role = validated_data['role']
expiration_time = validated_data.get('expiration_time')
created_by = self.context['request'].user
permission, _ = project.add_user(user, role, created_by, expiration_time)
return permission
def validate_expiration_time(self, value):
if value is not None and value < timezone.now():
raise serializers.ValidationError(
_('Expiration time should be greater than current time.')
)
return value
def get_filtered_field_names(self):
return ('project',)
class BasicProjectPermissionSerializer(BasePermissionSerializer):
customer_name = serializers.ReadOnlyField(source='project.customer.name')
class Meta(BasePermissionSerializer.Meta):
model = models.ProjectPermission
fields = (
'url',
'pk',
'role',
'project_uuid',
'project_name',
'customer_name',
)
related_paths = dict(
project=('name', 'uuid'), **BasePermissionSerializer.Meta.related_paths
)
extra_kwargs = {
'project': {
'view_name': 'project-detail',
'lookup_field': 'uuid',
'queryset': models.Project.objects.all(),
}
}
class ProjectPermissionLogSerializer(ProjectPermissionSerializer):
class Meta(ProjectPermissionSerializer.Meta):
view_name = 'project_permission_log-detail'
class UserSerializer(
core_serializers.AugmentedSerializerMixin, serializers.HyperlinkedModelSerializer
):
email = serializers.EmailField()
agree_with_policy = serializers.BooleanField(
write_only=True,
required=False,
help_text=_('User must agree with the policy to register.'),
)
competence = serializers.ChoiceField(
choices=settings.WALDUR_CORE.get('USER_COMPETENCE_LIST', []),
allow_blank=True,
required=False,
)
token = serializers.ReadOnlyField(source='auth_token.key')
customer_permissions = serializers.SerializerMethodField()
project_permissions = serializers.SerializerMethodField()
requested_email = serializers.SerializerMethodField()
full_name = serializers.CharField(max_length=200, required=False)
def get_customer_permissions(self, user):
permissions = models.CustomerPermission.objects.filter(
user=user, is_active=True
).select_related('customer')
serializer = BasicCustomerPermissionSerializer(
instance=permissions, many=True, context=self.context
)
return serializer.data
def get_project_permissions(self, user):
permissions = models.ProjectPermission.objects.filter(
user=user, is_active=True
).select_related('project')
serializer = BasicProjectPermissionSerializer(
instance=permissions, many=True, context=self.context
)
return serializer.data
def get_requested_email(self, user):
try:
requested_email = core_models.ChangeEmailRequest.objects.get(user=user)
return requested_email.email
except core_models.ChangeEmailRequest.DoesNotExist:
pass
class Meta:
model = User
fields = (
'url',
'uuid',
'username',
'full_name',
'native_name',
'job_title',
'email',
'phone_number',
'organization',
'civil_number',
'description',
'is_staff',
'is_active',
'is_support',
'token',
'token_lifetime',
'registration_method',
'date_joined',
'agree_with_policy',
'agreement_date',
'preferred_language',
'competence',
'customer_permissions',
'project_permissions',
'requested_email',
'affiliations',
'first_name',
'last_name',
)
read_only_fields = (
'uuid',
'civil_number',
'registration_method',
'date_joined',
'agreement_date',
'customer_permissions',
'project_permissions',
'affiliations',
'first_name',
'last_name',
)
extra_kwargs = {
'url': {'lookup_field': 'uuid'},
}
protected_fields = ('email',)
def get_fields(self):
fields = super(UserSerializer, self).get_fields()
try:
request = self.context['view'].request
user = request.user
except (KeyError, AttributeError):
return fields
if user.is_anonymous:
return fields
if not user.is_staff:
protected_fields = ('is_active', 'is_staff', 'is_support', 'description')
if user.is_support:
for field in protected_fields:
if field in fields:
fields[field].read_only = True
else:
for field in protected_fields:
if field in fields:
del fields[field]
if not self._can_see_token(user):
del fields['token']
del fields['token_lifetime']
if request.method in ('PUT', 'PATCH'):
fields['username'].read_only = True
protected_methods = settings.WALDUR_CORE[
'PROTECT_USER_DETAILS_FOR_REGISTRATION_METHODS'
]
if (
user.registration_method
and user.registration_method in protected_methods
):
detail_fields = (
'full_name',
'native_name',
'job_title',
'email',
'phone_number',
'organization',
)
for field in detail_fields:
fields[field].read_only = True
return fields
def _can_see_token(self, user):
# Nobody apart from the user herself can see her token.
# User can see the token either via details view or /api/users/me
if isinstance(self.instance, list) and len(self.instance) == 1:
return self.instance[0] == user
else:
return self.instance == user
def validate(self, attrs):
agree_with_policy = attrs.pop('agree_with_policy', False)
if self.instance and not self.instance.agreement_date:
if not agree_with_policy:
raise serializers.ValidationError(
{'agree_with_policy': _('User must agree with the policy.')}
)
else:
attrs['agreement_date'] = timezone.now()
# Convert validation error from Django to DRF
# https://github.com/tomchristie/django-rest-framework/issues/2145
try:
user = User(id=getattr(self.instance, 'id', None), **attrs)
user.clean()
except django_exceptions.ValidationError as error:
raise exceptions.ValidationError(error.message_dict)
return attrs
class UserEmailChangeSerializer(serializers.Serializer):
email = serializers.EmailField()
class PasswordSerializer(serializers.Serializer):
password = serializers.CharField(
min_length=7,
validators=[
RegexValidator(
regex=r'\d', message=_('Ensure this field has at least one digit.'),
),
RegexValidator(
regex='[a-zA-Z]',
message=_('Ensure this field has at least one latin letter.'),
),
],
)
class SshKeySerializer(serializers.HyperlinkedModelSerializer):
user_uuid = serializers.ReadOnlyField(source='user.uuid')
class Meta:
model = core_models.SshPublicKey
fields = (
'url',
'uuid',
'name',
'public_key',
'fingerprint',
'user_uuid',
'is_shared',
'type',
)
read_only_fields = ('fingerprint', 'is_shared')
extra_kwargs = {
'url': {'lookup_field': 'uuid'},
}
def validate_name(self, value):
return value.strip()
def validate_public_key(self, value):
value = value.strip()
if len(value.splitlines()) > 1:
raise serializers.ValidationError(
_('Key is not valid: it should be single line.')
)
try:
fingerprint = core_models.get_ssh_key_fingerprint(value)
except (IndexError, TypeError):
raise serializers.ValidationError(
_('Key is not valid: cannot generate fingerprint from it.')
)
if core_models.SshPublicKey.objects.filter(fingerprint=fingerprint).exists():
raise serializers.ValidationError(
_('Key with same fingerprint already exists.')
)
return value
class MoveProjectSerializer(serializers.Serializer):
customer = NestedCustomerSerializer(
queryset=models.Customer.objects.all(), required=True, many=False
)
class ServiceOptionsSerializer(serializers.Serializer):
class Meta:
secret_fields = ()
@classmethod
def get_subclasses(cls):
for subclass in cls.__subclasses__():
yield from subclass.get_subclasses()
yield subclass
class ServiceSettingsSerializer(
PermissionFieldFilteringMixin,
core_serializers.RestrictedSerializerMixin,
core_serializers.AugmentedSerializerMixin,
serializers.HyperlinkedModelSerializer,
):
customer_native_name = serializers.ReadOnlyField(source='customer.native_name')
state = MappedChoiceField(
choices=[(v, k) for k, v in core_models.StateMixin.States.CHOICES],
choice_mappings={v: k for k, v in core_models.StateMixin.States.CHOICES},
read_only=True,
)
scope = core_serializers.GenericRelatedField(
related_models=models.BaseResource.get_all_models(),
required=False,
allow_null=True,
)
options = serializers.DictField()
class Meta:
model = models.ServiceSettings
fields = (
'url',
'uuid',
'name',
'type',
'state',
'error_message',
'shared',
'customer',
'customer_name',
'customer_native_name',
'terms_of_services',
'scope',
'options',
)
protected_fields = ('type', 'customer')
read_only_fields = ('shared', 'state', 'error_message')
related_paths = ('customer',)
extra_kwargs = {
'url': {'lookup_field': 'uuid'},
'customer': {'lookup_field': 'uuid'},
}
def get_filtered_field_names(self):
return ('customer',)
@staticmethod
def eager_load(queryset, request=None):
return queryset.select_related('customer')
def get_fields(self):
fields = super(ServiceSettingsSerializer, self).get_fields()
method = self.context['view'].request.method
if method == 'GET' and 'options' in fields:
fields['options'] = serializers.SerializerMethodField('get_options')
return fields
def get_options(self, service):
options = {
'backend_url': service.backend_url,
'username': service.username,
'password': service.password,
'domain': service.domain,
'token': service.token,
**service.options,
}
request = self.context['request']
if request.user.is_staff:
return options
if service.customer and service.customer.has_user(
request.user, models.CustomerRole.OWNER
):
return options
options_serializer_class = get_options_serializer_class(service.type)
secret_fields = options_serializer_class.Meta.secret_fields
return {k: v for (k, v) in options.items() if k not in secret_fields}
def validate(self, attrs):
if 'options' not in attrs:
return attrs
service_type = self.instance and self.instance.type or attrs['type']
options_serializer_class = get_options_serializer_class(service_type)
options_serializer = options_serializer_class(
instance=self.instance, data=attrs['options'], context=self.context
)
options_serializer.is_valid(raise_exception=True)
service_options = options_serializer.validated_data
attrs.update(service_options)
self._validate_settings(models.ServiceSettings(**attrs))
return attrs
def _validate_settings(self, service_settings):
try:
backend = service_settings.get_backend()
backend.validate_settings()
except ServiceBackendError as e:
raise serializers.ValidationError(_('Wrong settings: %s.') % e)
except ServiceBackendNotImplemented:
pass
class BasicResourceSerializer(serializers.Serializer):
uuid = serializers.ReadOnlyField()
name = serializers.ReadOnlyField()
resource_type = serializers.SerializerMethodField()
def get_resource_type(self, resource):
return get_resource_type(resource)
class ManagedResourceSerializer(BasicResourceSerializer):
project_name = serializers.ReadOnlyField(source='project.name')
project_uuid = serializers.ReadOnlyField(source='project.uuid')
customer_uuid = serializers.ReadOnlyField(source='project.customer.uuid')
customer_name = serializers.ReadOnlyField(source='project.customer.name')
class TagList(list):
"""
This class serializes tags as JSON list as the last step of serialization process.
"""
def __str__(self):
return json.dumps(self)
class TagSerializer(serializers.Serializer):
"""
This serializer updates tags field using django-taggit API.
"""
def create(self, validated_data):
if 'tags' in validated_data:
tags = validated_data.pop('tags')
instance = super(TagSerializer, self).create(validated_data)
instance.tags.set(*tags)
else:
instance = super(TagSerializer, self).create(validated_data)
return instance
def update(self, instance, validated_data):
if 'tags' in validated_data:
tags = validated_data.pop('tags')
instance = super(TagSerializer, self).update(instance, validated_data)
instance.tags.set(*tags)
else:
instance = super(TagSerializer, self).update(instance, validated_data)
return instance
class TagListSerializerField(serializers.Field):
child = serializers.CharField()
default_error_messages = {
'not_a_list': _('Expected a list of items but got type "{input_type}".'),
'invalid_json': _(
'Invalid json list. A tag list submitted in string form must be valid json.'
),
'not_a_str': _('All list items must be of string type.'),
}
def to_internal_value(self, value):
if isinstance(value, str):
if not value:
value = '[]'
try:
value = json.loads(value)
except ValueError:
self.fail('invalid_json')
if not isinstance(value, list):
self.fail('not_a_list', input_type=type(value).__name__)
for s in value:
if not isinstance(s, str):
self.fail('not_a_str')
self.child.run_validation(s)
return value
def get_attribute(self, instance):
"""
Fetch tags from cache defined in TagMixin.
"""
return instance.get_tags()
def to_representation(self, value):
if not isinstance(value, TagList):
value = TagList(value)
return value
class BaseResourceSerializer(
core_serializers.RestrictedSerializerMixin,
PermissionFieldFilteringMixin,
core_serializers.AugmentedSerializerMixin,
TagSerializer,
serializers.HyperlinkedModelSerializer,
):
state = serializers.ReadOnlyField(source='get_state_display')
project = serializers.HyperlinkedRelatedField(
queryset=models.Project.objects.all(),
view_name='project-detail',
lookup_field='uuid',
)
project_name = serializers.ReadOnlyField(source='project.name')
project_uuid = serializers.ReadOnlyField(source='project.uuid')
service_name = serializers.ReadOnlyField(source='service_settings.name')
service_settings = serializers.HyperlinkedRelatedField(
queryset=models.ServiceSettings.objects.all(),
view_name='servicesettings-detail',
lookup_field='uuid',
)
service_settings_uuid = serializers.ReadOnlyField(source='service_settings.uuid')
service_settings_state = serializers.ReadOnlyField(
source='service_settings.human_readable_state'
)
service_settings_error_message = serializers.ReadOnlyField(
source='service_settings.error_message'
)
customer = serializers.HyperlinkedRelatedField(
source='project.customer',
view_name='customer-detail',
read_only=True,
lookup_field='uuid',
)
customer_name = serializers.ReadOnlyField(source='project.customer.name')
customer_abbreviation = serializers.ReadOnlyField(
source='project.customer.abbreviation'
)
customer_native_name = serializers.ReadOnlyField(
source='project.customer.native_name'
)
created = serializers.DateTimeField(read_only=True)
resource_type = serializers.SerializerMethodField()
tags = TagListSerializerField(required=False)
access_url = serializers.SerializerMethodField()
class Meta:
model = NotImplemented
fields = (
'url',
'uuid',
'name',
'description',
'service_name',
'service_settings',
'service_settings_uuid',
'service_settings_state',
'service_settings_error_message',
'project',
'project_name',
'project_uuid',
'customer',
'customer_name',
'customer_native_name',
'customer_abbreviation',
'tags',
'error_message',
'error_traceback',
'resource_type',
'state',
'created',
'modified',
'backend_id',
'access_url',
)
protected_fields = (
'project',
'service_settings',
)
read_only_fields = ('error_message', 'error_traceback', 'backend_id')
extra_kwargs = {
'url': {'lookup_field': 'uuid'},
}
def get_filtered_field_names(self):
return ('project', 'service_settings')
def get_resource_type(self, obj):
return get_resource_type(obj)
def get_resource_fields(self):
return [f.name for f in self.Meta.model._meta.get_fields()]
# an optional generic URL for accessing a resource
def get_access_url(self, obj):
return obj.get_access_url()
def get_fields(self):
fields = super(BaseResourceSerializer, self).get_fields()
# skip validation on object update
if not self.instance:
service_type = get_service_type(self.Meta.model)
if (
'service_settings' in fields
and not fields['service_settings'].read_only
):
queryset = fields['service_settings'].queryset.filter(type=service_type)
fields['service_settings'].queryset = queryset
return fields
@transaction.atomic
def create(self, validated_data):
data = validated_data.copy()
fields = self.get_resource_fields()
# Remove `virtual` properties which ain't actually belong to the model
data = {key: value for key, value in data.items() if key in fields}
resource = super(BaseResourceSerializer, self).create(data)
resource.increase_backend_quotas_usage()
return resource
@classmethod
def get_subclasses(cls):
for subclass in cls.__subclasses__():
yield from subclass.get_subclasses()
if subclass.Meta.model != NotImplemented:
yield subclass
class BaseResourceActionSerializer(BaseResourceSerializer):
project = serializers.HyperlinkedRelatedField(
view_name='project-detail', lookup_field='uuid', read_only=True,
)
service_settings = serializers.HyperlinkedRelatedField(
view_name='servicesettings-detail', lookup_field='uuid', read_only=True,
)
class Meta(BaseResourceSerializer.Meta):
pass
class SshPublicKeySerializerMixin(serializers.HyperlinkedModelSerializer):
ssh_public_key = serializers.HyperlinkedRelatedField(
view_name='sshpublickey-detail',
lookup_field='uuid',
queryset=core_models.SshPublicKey.objects.all(),
required=False,
write_only=True,
)
def get_fields(self):
fields = super(SshPublicKeySerializerMixin, self).get_fields()
if 'request' in self.context:
user = self.context['request'].user
ssh_public_key = fields.get('ssh_public_key')
if ssh_public_key:
if not user.is_staff:
visible_users = list(filter_visible_users(User.objects.all(), user))
subquery = Q(user__in=visible_users) | Q(is_shared=True)
ssh_public_key.queryset = ssh_public_key.queryset.filter(subquery)
return fields
class VirtualMachineSerializer(SshPublicKeySerializerMixin, BaseResourceSerializer):
external_ips = serializers.ListField(
child=serializers.IPAddressField(protocol='ipv4'), read_only=True,
)
internal_ips = serializers.ListField(
child=serializers.IPAddressField(protocol='ipv4'), read_only=True,
)
class Meta(BaseResourceSerializer.Meta):
fields = BaseResourceSerializer.Meta.fields + (
'start_time',
'cores',
'ram',
'disk',
'min_ram',
'min_disk',
'ssh_public_key',
'user_data',
'external_ips',
'internal_ips',
'latitude',
'longitude',
'key_name',
'key_fingerprint',
'image_name',
)
read_only_fields = BaseResourceSerializer.Meta.read_only_fields + (
'start_time',
'cores',
'ram',
'disk',
'min_ram',
'min_disk',
'external_ips',
'internal_ips',
'latitude',
'longitude',
'key_name',
'key_fingerprint',
'image_name',
)
protected_fields = BaseResourceSerializer.Meta.protected_fields + (
'user_data',
'ssh_public_key',
)
def create(self, validated_data):
if 'image' in validated_data:
validated_data['image_name'] = validated_data['image'].name
return super(VirtualMachineSerializer, self).create(validated_data)
class BasePropertySerializer(
core_serializers.AugmentedSerializerMixin, serializers.HyperlinkedModelSerializer,
):
class Meta:
model = NotImplemented
class DivisionSerializer(serializers.HyperlinkedModelSerializer):
type = serializers.ReadOnlyField(source='type.name')
parent_uuid = serializers.ReadOnlyField(source='parent.uuid')
parent_name = serializers.ReadOnlyField(source='parent.type.name')
class Meta:
model = models.Division
fields = ('uuid', 'url', 'name', 'type', 'parent_uuid', 'parent_name', 'parent')
extra_kwargs = {
'url': {'lookup_field': 'uuid'},
'parent': {'lookup_field': 'uuid'},
}
class DivisionTypesSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = models.DivisionType
fields = (
'uuid',
'url',
'name',
)
extra_kwargs = {
'url': {'lookup_field': 'uuid', 'view_name': 'division-type-detail'},
}
| |
from omgeo.processor import _Processor
import re
class _PreProcessor(_Processor):
"""Takes, processes, and returns a geocoding.places.PlaceQuery object."""
def process(self, pq):
raise NotImplementedError(
'PreProcessor subclasses must implement process().')
class ReplaceRangeWithNumber(_PreProcessor):
"""
Class to take only the first part of an address range
or hyphenated house number to use for geocoding.
This affects the query and address PlaceQuery attributes.
=============================== ========================================
Input Output
=============================== ========================================
``4109-4113 Main St`` ``4109 Main St``
``4109-13 Main St`` ``4109 Main St``
``322-1/2 Water Street`` ``322 Water Street``
``123-2 Maple Lane`` ``123 Maple Lane``
``272-B Greenough St, 19127`` ``272 Greenough St, 19127``
``272 Greenough St 19127-1112`` ``272 Greenough St 19127-1112``
``19127-1112`` ``19127-1112`` (not affected)
``76-20 34th Ave, Queens NY`` ``76 34th Ave, Queens NY`` (see warning)
=============================== ========================================
.. warning::
This may cause problems with addresses presented in the
hyphenated Queens-style format, where the part before the
hyphen indicates the cross street, and the part after
indicates the house number.
"""
#: Regular expression to represent ranges like:
#: * 789-791
#: * 789-91
#: * 201A-201B
#: * 201A-B
RE_STREET_NUMBER = re.compile('(^\d+\w*-\d*\w*)\s', re.IGNORECASE)
def replace_range(self, addr_str):
match = self.RE_STREET_NUMBER.match(addr_str)
if match is not None:
old = match.group(1)
new = old.split('-', 1)[0]
addr_str = addr_str.replace(old, new, 1)
return addr_str
def process(self, pq):
"""
:arg PlaceQuery pq: PlaceQuery instance
:returns: PlaceQuery instance with truncated address range / number
"""
pq.query = self.replace_range(pq.query)
pq.address = self.replace_range(pq.address)
return pq
class ParseSingleLine(_PreProcessor):
"""
Adapted from `Cicero Live <http://azavea.com/packages/azavea_cicero/blocks/cicero_live/view.js>`_
"""
# Some Regexes:
re_unit_numbered = re.compile('(su?i?te|p\W*[om]\W*b(?:ox)?|(?:ap|dep)(?:ar)?t(?:me?nt)?|ro*m|flo*r?|uni?t|bu?i?ldi?n?g|ha?nga?r|lo?t|pier|slip|spa?ce?|stop|tra?i?le?r|bo?x|no\.?)\s+|#', re.IGNORECASE)
re_unit_not_numbered = re.compile('ba?se?me?n?t|fro?nt|lo?bby|lowe?r|off?i?ce?|pe?n?t?ho?u?s?e?|rear|side|uppe?r', re.IGNORECASE)
re_UK_postcode = re.compile('[A-Z]{1,2}[0-9R][0-9A-Z]? *[0-9][A-Z]{0,2}', re.IGNORECASE)
re_blank = re.compile('\s')
def _comma_join(self, left, right):
if left == '':
return right
else:
return '%s, %s' % (left, right)
def process(self, pq):
"""
:arg PlaceQuery pq: PlaceQuery instance
:returns: PlaceQuery instance with :py:attr:`query`
converted to individual elements
"""
if pq.query != '':
postcode = address = city = '' # define the vars we'll use
# global regex postcode search, pop off last result
postcode_matches = self.re_UK_postcode.findall(pq.query)
if len(postcode_matches) > 0:
postcode = postcode_matches[-1]
query_parts = [part.strip() for part in pq.query.split(',')]
if postcode is not '' and re.search(postcode, query_parts[0]):
# if postcode is in the first part of query_parts, there are probably no commas
# get just the part before the postcode
part_before_postcode = query_parts[0].split(postcode)[0].strip()
if self.re_blank.search(part_before_postcode) is None:
address = part_before_postcode
else:
address = query_parts[0] #perhaps it isn't really a postcode (apt num, etc)
else:
address = query_parts[0] # no postcode to worry about
for part in query_parts[1:]:
part = part.strip()
if postcode is not '' and re.search(postcode, part) is not None:
part = part.replace(postcode, '').strip() # if postcode is in part, remove it
if self.re_unit_numbered.search(part) is not None:
# test to see if part is secondary address, like "Ste 402"
address = self._comma_join(address, part)
elif self.re_unit_not_numbered.search(part) is not None:
# ! might cause problems if 'Lower' or 'Upper' is in the city name
# test to see if part is secondary address, like "Basement"
address = self._comma_join(address, part)
else:
city = self._comma_join(city, part)# it's probably a city (or "City, County")
# set pq parts if they aren't already set (we don't want to overwrite explicit params)
if pq.postal == '': pq.postal = postcode
if pq.address == '': pq.address = address
if pq.city == '': pq.city = city
return pq
class CountryPreProcessor(_PreProcessor):
"""
Used to filter acceptable countries
and standardize country names or codes.
"""
def __init__(self, acceptable_countries=None, country_map=None):
"""
:arg list acceptable_countries: A list of acceptable countries.
None is used to indicate that all countries are acceptable.
(default ``[]``)
An empty string is also an acceptable country. To require
a country, use the `RequireCountry` preprocessor.
:arg dict country_map: A map of the input PlaceQuery.country property
to the country value accepted by the geocoding service.
For example, suppose that the geocoding service recognizes
'GB', but not 'UK' -- and 'US', but not 'USA'::
country_map = {'UK':'GB', 'USA':'US'}
"""
self.acceptable_countries = acceptable_countries if acceptable_countries is not None else []
self.country_map = country_map if country_map is not None else {}
def process(self, pq):
"""
:arg PlaceQuery pq: PlaceQuery instance
:returns: modified PlaceQuery, or ``False`` if country is not acceptable.
"""
# Map country, but don't let map overwrite
if pq.country not in self.acceptable_countries and pq.country in self.country_map:
pq.country = self.country_map[pq.country]
if pq.country != '' and \
self.acceptable_countries != [] and \
pq.country not in self.acceptable_countries:
return False
return pq
def __repr__(self):
return '<%s: Accept %s mapped as %s>' % (self.__class__.__name__,
self.acceptable_countries, self.country_map)
class CancelIfRegexInAttr(_PreProcessor):
"""
Return False if given regex is found in ANY of the given
PlaceQuery attributes, otherwise return original PlaceQuery instance.
In the event that a given attribute does not exist in the given
PlaceQuery, no exception will be raised.
"""
def __init__(self, regex, attrs, ignorecase=True):
"""
:arg str regex: a regex string to match (represents what you do *not* want)
:arg attrs: a list or tuple of strings of attribute names to look through
:arg bool ignorecase: set to ``False`` for a case-sensitive match (default ``True``)
"""
regex_type = type(regex)
if isinstance(regex, str):
raise Exception('First param "regex" must be a regex of type'\
' str, not %s.' % regex_type)
attrs_type = type(attrs)
if attrs_type not in (list, tuple):
raise Exception('Second param "attrs" must be a list or tuple'\
' of PlaceQuery attributes, not %s.' % attrs_type)
if any(not isinstance(attr, str) for attr in attrs):
raise Exception('All given PlaceQuery attributes must be strings.')
self.attrs = attrs
if ignorecase:
self.regex = re.compile(regex, re.IGNORECASE)
else:
self.regex = re.compile(regex)
def process(self, pq):
attrs = [getattr(pq, attr) for attr in self.attrs if hasattr(pq, attr)]
if any([self.regex.match(attr) is not None for attr in attrs]):
return False # if a match is found
return pq
def __repr__(self):
case_sensitive = 'insensitive' if self.ignorecase else 'sensitive'
return '<%s: Break if %s in %s (case %s)>' % (self.__class__.__name__,
self.regex, self.attrs, case_sensitive)
class CancelIfPOBox(_PreProcessor):
def process(self, pq):
"""
:arg PlaceQuery pq: PlaceQuery instance
:returns: ``False`` if the address is starts with any variation of "PO Box".
Otherwise, return original :py:class:`PlaceQuery`.
"""
regex = r'^\s*P\.?\s*O\.?\s*B\.?O?X?[\s\d]'
return CancelIfRegexInAttr(regex, ('address', 'query')).process(pq)
class RequireCountry(_PreProcessor):
"""
Return False if no default country is set in first parameter.
Otherwise, return the default country if country is empty.
"""
def __init__(self, default_country=''):
"""
:arg str default_country: default country to use if there is
no country set in the PlaceQuery instance sent to this processor.
If this argument is not set or empty and PlaceQuery instance does
not have a country (pq.country == ''), the processor will return
False and the PlaceQuery will be rejected during geocoding.
(default ``''``)
"""
self.default_country = default_country
def process(self, pq):
"""
:arg PlaceQuery pq: PlaceQuery instance
:returns: One of the three following values:
* unmodified PlaceQuery instance if pq.country is not empty
* PlaceQuery instance with pq.country changed to default country.
* ``False`` if pq.country is empty and self.default_country == ''.
"""
if pq.country.strip() == '':
if self.default_country == '':
return False
else:
pq.country = self.default_country
return pq
| |
#!/usr/bin/env python
# Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import os
import shutil
import socket
import subprocess
import threading
import time
import shlex
import sys
import config as nodepool_config
import exceptions
import provider_manager
import stats
import zk
MINS = 60
HOURS = 60 * MINS
IMAGE_TIMEOUT = 6 * HOURS # How long to wait for an image save
SUSPEND_WAIT_TIME = 30 # How long to wait between checks for
# ZooKeeper connectivity if it disappears.
# HP Cloud requires qemu compat with 0.10. That version works elsewhere,
# so just hardcode it for all qcow2 building
DEFAULT_QEMU_IMAGE_COMPAT_OPTIONS = "--qemu-img-options 'compat=0.10'"
class DibImageFile(object):
'''
Class used as an API to finding locally built DIB image files, and
also used to represent the found files. Image files are named using
a unique ID, but can be available in multiple formats (with different
extensions).
'''
def __init__(self, image_id, extension=None):
self.image_id = image_id
self.extension = extension
self.md5 = None
self.md5_file = None
self.sha256 = None
self.sha256_file = None
@staticmethod
def from_path(path):
image_file = os.path.basename(path)
image_id, extension = image_file.rsplit('.', 1)
return DibImageFile(image_id, extension)
@staticmethod
def from_image_id(images_dir, image_id):
images = []
for image_filename in os.listdir(images_dir):
if os.path.isfile(os.path.join(images_dir, image_filename)):
image = DibImageFile.from_path(image_filename)
if image.image_id == image_id:
images.append(image)
return images
@staticmethod
def from_images_dir(images_dir):
return [DibImageFile.from_path(x) for x in os.listdir(images_dir)]
def to_path(self, images_dir, with_extension=True):
my_path = os.path.join(images_dir, self.image_id)
if with_extension:
if self.extension is None:
raise exceptions.BuilderError(
'Cannot specify image extension of None'
)
my_path += '.' + self.extension
md5_path = '%s.%s' % (my_path, 'md5')
md5 = self._checksum(md5_path)
if md5:
self.md5_file = md5_path
self.md5 = md5[0:32]
sha256_path = '%s.%s' % (my_path, 'sha256')
sha256 = self._checksum(sha256_path)
if sha256:
self.sha256_file = sha256_path
self.sha256 = sha256[0:64]
return my_path
def _checksum(self, filename):
if not os.path.isfile(filename):
return None
with open(filename, 'r') as f:
data = f.read()
return data
class BaseWorker(threading.Thread):
def __init__(self, config_path, interval, zk):
super(BaseWorker, self).__init__()
self.log = logging.getLogger("nodepool.builder.BaseWorker")
self.daemon = True
self._running = False
self._config = None
self._config_path = config_path
self._zk = zk
self._hostname = socket.gethostname()
self._statsd = stats.get_client()
self._interval = interval
def _checkForZooKeeperChanges(self, new_config):
'''
Check config for ZooKeeper cluster changes.
If the defined set of ZooKeeper servers changes, the connection
will use the new server set.
'''
if self._config.zookeeper_servers != new_config.zookeeper_servers:
self.log.debug("Detected ZooKeeper server changes")
self._zk.resetHosts(new_config.zookeeper_servers.values())
@property
def running(self):
return self._running
def shutdown(self):
self._running = False
class CleanupWorker(BaseWorker):
'''
The janitor of nodepool-builder that will remove images from providers
and any local DIB builds.
'''
def __init__(self, name, config_path, interval, zk):
super(CleanupWorker, self).__init__(config_path, interval, zk)
self.log = logging.getLogger("nodepool.builder.CleanupWorker.%s" % name)
self.name = 'CleanupWorker.%s' % name
def _buildUploadRecencyTable(self):
'''
Builds a table for each image of the most recent uploads to each
provider.
Example)
image1:
providerA: [ (build_id, upload_id, upload_time), ... ]
providerB: [ (build_id, upload_id, upload_time), ... ]
image2:
providerC: [ (build_id, upload_id, upload_time), ... ]
'''
self._rtable = {}
for image in self._zk.getImageNames():
self._rtable[image] = {}
for build in self._zk.getBuilds(image, zk.READY):
for provider in self._zk.getBuildProviders(image, build.id):
if provider not in self._rtable[image]:
self._rtable[image][provider] = []
uploads = self._zk.getMostRecentBuildImageUploads(
2, image, build.id, provider, zk.READY)
for upload in uploads:
self._rtable[image][provider].append(
(build.id, upload.id, upload.state_time)
)
# Sort uploads by state_time (upload time) and keep the 2 most recent
for i in self._rtable.keys():
for p in self._rtable[i].keys():
self._rtable[i][p].sort(key=lambda x: x[2], reverse=True)
self._rtable[i][p] = self._rtable[i][p][:2]
def _isRecentUpload(self, image, provider, build_id, upload_id):
'''
Search for an upload for a build within the recency table.
'''
provider = self._rtable[image].get(provider)
if not provider:
return False
for b_id, u_id, u_time in provider:
if build_id == b_id and upload_id == u_id:
return True
return False
def _inProgressUpload(self, upload):
'''
Determine if an upload is in progress.
'''
if upload.state != zk.UPLOADING:
return False
try:
with self._zk.imageUploadLock(upload.image_name, upload.build_id,
upload.provider_name,
blocking=False):
pass
except exceptions.ZKLockException:
return True
return False
def _removeDibItem(self, filename):
if filename is None:
return
try:
os.remove(filename)
self.log.info("Removed DIB file %s" % filename)
except OSError as e:
if e.errno != 2: # No such file or directory
raise e
def _deleteLocalBuild(self, image, build_id, builder):
'''
Remove expired image build from local disk.
:param str image: Name of the image whose build we are deleting.
:param str build_id: ID of the build we want to delete.
:param str builder: hostname of the build.
:returns: True if files were deleted, False if none were found.
'''
base = "-".join([image, build_id])
files = DibImageFile.from_image_id(self._config.imagesdir, base)
if not files:
# NOTE(pabelanger): It is possible we don't have any files because
# diskimage-builder failed. So, check to see if we have the correct
# builder so we can removed the data from zookeeper.
if builder == self._hostname:
return True
return False
self.log.info("Doing cleanup for %s:%s" % (image, build_id))
manifest_dir = None
for f in files:
filename = f.to_path(self._config.imagesdir, True)
if not manifest_dir:
path, ext = filename.rsplit('.', 1)
manifest_dir = path + ".d"
map(self._removeDibItem, [filename, f.md5_file, f.sha256_file])
try:
shutil.rmtree(manifest_dir)
self.log.info("Removed DIB manifest %s" % manifest_dir)
except OSError as e:
if e.errno != 2: # No such file or directory
raise e
return True
def _cleanupProvider(self, provider, image, build_id):
all_uploads = self._zk.getUploads(image, build_id, provider.name)
for upload in all_uploads:
if self._isRecentUpload(image, provider.name, build_id, upload.id):
continue
self._deleteUpload(upload)
def _cleanupObsoleteProviderUploads(self, provider, image, build_id):
image_names_for_provider = provider.images.keys()
if image in image_names_for_provider:
# This image is in use for this provider
return
all_uploads = self._zk.getUploads(image, build_id, provider.name)
for upload in all_uploads:
self._deleteUpload(upload)
def _deleteUpload(self, upload):
deleted = False
if upload.state != zk.DELETING:
if not self._inProgressUpload(upload):
data = zk.ImageUpload()
data.state = zk.DELETING
self._zk.storeImageUpload(upload.image_name, upload.build_id,
upload.provider_name, data,
upload.id)
deleted = True
if upload.state == zk.DELETING or deleted:
manager = self._config.provider_managers[upload.provider_name]
try:
# It is possible we got this far, but don't actually have an
# external_name. This could mean that zookeeper and cloud
# provider are some how out of sync.
if upload.external_name:
base = "-".join([upload.image_name, upload.build_id])
self.log.info("Deleting image build %s from %s" %
(base, upload.provider_name))
manager.deleteImage(upload.external_name)
except Exception:
self.log.exception(
"Unable to delete image %s from %s:",
upload.external_name, upload.provider_name)
else:
self._zk.deleteUpload(upload.image_name, upload.build_id,
upload.provider_name, upload.id)
def _inProgressBuild(self, build, image):
'''
Determine if a DIB build is in progress.
'''
if build.state != zk.BUILDING:
return False
try:
with self._zk.imageBuildLock(image, blocking=False):
# An additional state check is needed to make sure it hasn't
# changed on us. If it has, then let's pretend a build is
# still in progress so that it is checked again later with
# its new build state.
b = self._zk.getBuild(image, build.id)
if b.state != zk.BUILDING:
return True
pass
except exceptions.ZKLockException:
return True
return False
def _cleanup(self):
'''
Clean up builds on disk and in providers.
'''
known_providers = self._config.providers.values()
image_names = self._zk.getImageNames()
self._buildUploadRecencyTable()
for image in image_names:
try:
self._cleanupImage(known_providers, image)
except Exception:
self.log.exception("Exception cleaning up image %s:", image)
def _filterLocalBuilds(self, image, builds):
'''Return the subset of builds that are local'''
ret = []
for build in builds:
base = "-".join([image, build.id])
files = DibImageFile.from_image_id(self._config.imagesdir, base)
if files:
ret.append(build)
return ret
def _cleanupCurrentProviderUploads(self, provider, image, build_id):
'''
Remove cruft from a current build.
Current builds (the ones we want to keep) are treated special since
we want to remove any ZK nodes for uploads that failed exceptionally
hard (i.e., we could not set the state to FAILED and they remain as
UPLOADING), and we also want to remove any uploads that have been
marked for deleting.
'''
cruft = self._zk.getUploads(image, build_id, provider,
states=[zk.UPLOADING, zk.DELETING])
for upload in cruft:
if (upload.state == zk.UPLOADING and
not self._inProgressUpload(upload)
):
self.log.info("Removing failed upload record: %s" % upload)
self._zk.deleteUpload(image, build_id, provider, upload.id)
elif upload.state == zk.DELETING:
self.log.info("Removing deleted upload and record: %s" % upload)
self._deleteUpload(upload)
def _cleanupImage(self, known_providers, image):
'''
Clean up one image.
'''
# Get the list of all builds, then work from that so that we
# have a consistent view of the data.
all_builds = self._zk.getBuilds(image)
builds_to_keep = set([b for b in sorted(all_builds, reverse=True,
key=lambda y: y.state_time)
if b.state==zk.READY][:2])
local_builds = set(self._filterLocalBuilds(image, all_builds))
diskimage = self._config.diskimages.get(image)
if not diskimage and not local_builds:
# This builder is and was not responsible for this image,
# so ignore it.
return
# Remove any local builds that are not in use.
if not diskimage or (diskimage and not diskimage.in_use):
builds_to_keep -= local_builds
# TODO(jeblair): When all builds for an image which is not
# in use are deleted, the image znode should be deleted as
# well.
for build in all_builds:
# Start by deleting any uploads that are no longer needed
# because this image has been removed from a provider
# (since this should be done regardless of the build
# state).
for provider in known_providers:
try:
self._cleanupObsoleteProviderUploads(provider, image,
build.id)
if build in builds_to_keep:
self._cleanupCurrentProviderUploads(provider.name,
image,
build.id)
except Exception:
self.log.exception("Exception cleaning up uploads "
"of build %s of image %s in "
"provider %s:",
build, image, provider)
# If the build is in the delete state, we will try to
# delete the entire thing regardless.
if build.state != zk.DELETING:
# If it is in any other state, we will only delete it
# if it is older than the most recent two ready
# builds, or is in the building state but not actually
# building.
if build in builds_to_keep:
continue
elif self._inProgressBuild(build, image):
continue
for provider in known_providers:
try:
self._cleanupProvider(provider, image, build.id)
except Exception:
self.log.exception("Exception cleaning up build %s "
"of image %s in provider %s:",
build, image, provider)
uploads_exist = False
for p in self._zk.getBuildProviders(image, build.id):
if self._zk.getImageUploadNumbers(image, build.id, p):
uploads_exist = True
break
if not uploads_exist:
if build.state != zk.DELETING:
with self._zk.imageBuildNumberLock(
image, build.id, blocking=False
):
build.state = zk.DELETING
self._zk.storeBuild(image, build, build.id)
# Release the lock here so we can delete the build znode
if self._deleteLocalBuild(image, build.id, build.builder):
if not self._zk.deleteBuild(image, build.id):
self.log.error("Unable to delete build %s because"
" uploads still remain.", build)
def run(self):
'''
Start point for the CleanupWorker thread.
'''
self._running = True
while self._running:
# Don't do work if we've lost communication with the ZK cluster
while self._zk and (self._zk.suspended or self._zk.lost):
self.log.info("ZooKeeper suspended. Waiting")
time.sleep(SUSPEND_WAIT_TIME)
try:
self._run()
except Exception:
self.log.exception("Exception in CleanupWorker:")
time.sleep(10)
time.sleep(self._interval)
provider_manager.ProviderManager.stopProviders(self._config)
def _run(self):
'''
Body of run method for exception handling purposes.
'''
new_config = nodepool_config.loadConfig(self._config_path)
if not self._config:
self._config = new_config
self._checkForZooKeeperChanges(new_config)
provider_manager.ProviderManager.reconfigure(self._config, new_config,
use_taskmanager=False)
self._config = new_config
self._cleanup()
class BuildWorker(BaseWorker):
def __init__(self, name, config_path, interval, zk, dib_cmd):
super(BuildWorker, self).__init__(config_path, interval, zk)
self.log = logging.getLogger("nodepool.builder.BuildWorker.%s" % name)
self.name = 'BuildWorker.%s' % name
self.dib_cmd = dib_cmd
def _running_under_virtualenv(self):
# NOTE: borrowed from pip:locations.py
if hasattr(sys, 'real_prefix'):
return True
elif sys.prefix != getattr(sys, "base_prefix", sys.prefix):
return True
return False
def _activate_virtualenv(self):
"""Run as a pre-exec function to activate current virtualenv
If we are invoked directly as /path/ENV/nodepool-builer (as
done by an init script, for example) then /path/ENV/bin will
not be in our $PATH, meaning we can't find disk-image-create.
Apart from that, dib also needs to run in an activated
virtualenv so it can find utils like dib-run-parts. Run this
before exec of dib to ensure the current virtualenv (if any)
is activated.
"""
if self._running_under_virtualenv():
activate_this = os.path.join(sys.prefix, "bin", "activate_this.py")
if not os.path.exists(activate_this):
raise exceptions.BuilderError("Running in a virtualenv, but "
"cannot find: %s" % activate_this)
execfile(activate_this, dict(__file__=activate_this))
def _checkForScheduledImageUpdates(self):
'''
Check every DIB image to see if it has aged out and needs rebuilt.
'''
for diskimage in self._config.diskimages.values():
# Check if we've been told to shutdown
# or if ZK connection is suspended
if not self.running or self._zk.suspended or self._zk.lost:
return
try:
self._checkImageForScheduledImageUpdates(diskimage)
except Exception:
self.log.exception("Exception checking for scheduled "
"update of diskimage %s",
diskimage.name)
def _checkImageForScheduledImageUpdates(self, diskimage):
'''
Check one DIB image to see if it needs to be rebuilt.
.. note:: It's important to lock the image build before we check
the state time and then build to eliminate any race condition.
'''
# Check if diskimage builds are paused.
if diskimage.pause:
return
if not diskimage.image_types:
# We don't know what formats to build.
return
now = int(time.time())
builds = self._zk.getMostRecentBuilds(1, diskimage.name, zk.READY)
# If there is no build for this image, or it has aged out
# or if the current build is missing an image type from
# the config file, start a new build.
if (not builds
or (now - builds[0].state_time) >= diskimage.rebuild_age
or not set(builds[0].formats).issuperset(diskimage.image_types)
):
try:
with self._zk.imageBuildLock(diskimage.name, blocking=False):
# To avoid locking each image repeatedly, we have an
# second, redundant check here to verify that a new
# build didn't appear between the first check and the
# lock acquisition. If it's not the same build as
# identified in the first check above, assume another
# BuildWorker created the build for us and continue.
builds2 = self._zk.getMostRecentBuilds(1, diskimage.name, zk.READY)
if builds2 and builds[0].id != builds2[0].id:
return
self.log.info("Building image %s" % diskimage.name)
data = zk.ImageBuild()
data.state = zk.BUILDING
data.builder = self._hostname
bnum = self._zk.storeBuild(diskimage.name, data)
data = self._buildImage(bnum, diskimage)
self._zk.storeBuild(diskimage.name, data, bnum)
except exceptions.ZKLockException:
# Lock is already held. Skip it.
pass
def _checkForManualBuildRequest(self):
'''
Query ZooKeeper for any manual image build requests.
'''
for diskimage in self._config.diskimages.values():
# Check if we've been told to shutdown
# or if ZK connection is suspended
if not self.running or self._zk.suspended or self._zk.lost:
return
try:
self._checkImageForManualBuildRequest(diskimage)
except Exception:
self.log.exception("Exception checking for manual "
"update of diskimage %s",
diskimage)
def _checkImageForManualBuildRequest(self, diskimage):
'''
Query ZooKeeper for a manual image build request for one image.
'''
# Check if diskimage builds are paused.
if diskimage.pause:
return
# Reduce use of locks by adding an initial check here and
# a redundant check after lock acquisition.
if not self._zk.hasBuildRequest(diskimage.name):
return
try:
with self._zk.imageBuildLock(diskimage.name, blocking=False):
# Redundant check
if not self._zk.hasBuildRequest(diskimage.name):
return
self.log.info(
"Manual build request for image %s" % diskimage.name)
data = zk.ImageBuild()
data.state = zk.BUILDING
data.builder = self._hostname
bnum = self._zk.storeBuild(diskimage.name, data)
data = self._buildImage(bnum, diskimage)
self._zk.storeBuild(diskimage.name, data, bnum)
# Remove request on a successful build
if data.state == zk.READY:
self._zk.removeBuildRequest(diskimage.name)
except exceptions.ZKLockException:
# Lock is already held. Skip it.
pass
def _buildImage(self, build_id, diskimage):
'''
Run the external command to build the diskimage.
:param str build_id: The ID for the build (used in image filename).
:param diskimage: The diskimage as retrieved from our config file.
:returns: An ImageBuild object of build-related data.
:raises: BuilderError if we failed to execute the build command.
'''
base = "-".join([diskimage.name, build_id])
image_file = DibImageFile(base)
filename = image_file.to_path(self._config.imagesdir, False)
env = os.environ.copy()
env['DIB_RELEASE'] = diskimage.release
env['DIB_IMAGE_NAME'] = diskimage.name
env['DIB_IMAGE_FILENAME'] = filename
# Note we use a reference to the nodepool config here so
# that whenever the config is updated we get up to date
# values in this thread.
if self._config.elementsdir:
env['ELEMENTS_PATH'] = self._config.elementsdir
# send additional env vars if needed
for k, v in diskimage.env_vars.items():
env[k] = v
img_elements = diskimage.elements
img_types = ",".join(diskimage.image_types)
qemu_img_options = ''
if 'qcow2' in img_types:
qemu_img_options = DEFAULT_QEMU_IMAGE_COMPAT_OPTIONS
cmd = ('%s -x -t %s --checksum --no-tmpfs %s -o %s %s' %
(self.dib_cmd, img_types, qemu_img_options, filename,
img_elements))
log = logging.getLogger("nodepool.image.build.%s" %
(diskimage.name,))
self.log.info('Running %s' % cmd)
try:
p = subprocess.Popen(
shlex.split(cmd),
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
preexec_fn=self._activate_virtualenv,
env=env)
except OSError as e:
raise exceptions.BuilderError(
"Failed to exec '%s'. Error: '%s'" % (cmd, e.strerror)
)
while True:
ln = p.stdout.readline()
log.info(ln.strip())
if not ln:
break
p.wait()
# It's possible the connection to the ZK cluster could have been
# interrupted during the build. If so, wait for it to return.
# It could transition directly from SUSPENDED to CONNECTED, or go
# through the LOST state before CONNECTED.
while self._zk.suspended or self._zk.lost:
self.log.info("ZooKeeper suspended during build. Waiting")
time.sleep(SUSPEND_WAIT_TIME)
build_data = zk.ImageBuild()
build_data.builder = self._hostname
if self._zk.didLoseConnection:
self.log.info("ZooKeeper lost while building %s" % diskimage.name)
self._zk.resetLostFlag()
build_data.state = zk.FAILED
elif p.returncode:
self.log.info("DIB failed creating %s" % diskimage.name)
build_data.state = zk.FAILED
else:
self.log.info("DIB image %s is built" % diskimage.name)
build_data.state = zk.READY
build_data.formats = img_types.split(",")
if self._statsd:
# record stats on the size of each image we create
for ext in img_types.split(','):
key = 'nodepool.dib_image_build.%s.%s.size' % (diskimage.name, ext)
# A bit tricky because these image files may be sparse
# files; we only want the true size of the file for
# purposes of watching if we've added too much stuff
# into the image. Note that st_blocks is defined as
# 512-byte blocks by stat(2)
size = os.stat("%s.%s" % (filename, ext)).st_blocks * 512
self.log.debug("%s created image %s.%s (size: %d) " %
(diskimage.name, filename, ext, size))
self._statsd.gauge(key, size)
return build_data
def run(self):
'''
Start point for the BuildWorker thread.
'''
self._running = True
while self._running:
# Don't do work if we've lost communication with the ZK cluster
while self._zk and (self._zk.suspended or self._zk.lost):
self.log.info("ZooKeeper suspended. Waiting")
time.sleep(SUSPEND_WAIT_TIME)
try:
self._run()
except Exception:
self.log.exception("Exception in BuildWorker:")
time.sleep(10)
time.sleep(self._interval)
def _run(self):
'''
Body of run method for exception handling purposes.
'''
# NOTE: For the first iteration, we expect self._config to be None
new_config = nodepool_config.loadConfig(self._config_path)
if not self._config:
self._config = new_config
self._checkForZooKeeperChanges(new_config)
self._config = new_config
self._checkForScheduledImageUpdates()
self._checkForManualBuildRequest()
class UploadWorker(BaseWorker):
def __init__(self, name, config_path, interval, zk):
super(UploadWorker, self).__init__(config_path, interval, zk)
self.log = logging.getLogger("nodepool.builder.UploadWorker.%s" % name)
self.name = 'UploadWorker.%s' % name
def _reloadConfig(self):
'''
Reload the nodepool configuration file.
'''
new_config = nodepool_config.loadConfig(self._config_path)
if not self._config:
self._config = new_config
self._checkForZooKeeperChanges(new_config)
provider_manager.ProviderManager.reconfigure(self._config, new_config,
use_taskmanager=False)
self._config = new_config
def _uploadImage(self, build_id, upload_id, image_name, images, provider):
'''
Upload a local DIB image build to a provider.
:param str build_id: Unique ID of the image build to upload.
:param str upload_id: Unique ID of the upload.
:param str image_name: Name of the diskimage.
:param list images: A list of DibImageFile objects from this build
that available for uploading.
:param provider: The provider from the parsed config file.
'''
start_time = time.time()
timestamp = int(start_time)
image = None
for i in images:
if provider.image_type == i.extension:
image = i
break
if not image:
raise exceptions.BuilderInvalidCommandError(
"Unable to find image file of type %s for id %s to upload" %
(provider.image_type, build_id)
)
self.log.debug("Found image file of type %s for image id: %s" %
(image.extension, image.image_id))
filename = image.to_path(self._config.imagesdir, with_extension=True)
dummy_image = type('obj', (object,),
{'name': image_name, 'id': image.image_id})
ext_image_name = provider.template_hostname.format(
provider=provider, image=dummy_image,
timestamp=str(timestamp)
)
self.log.info("Uploading DIB image build %s from %s to %s" %
(build_id, filename, provider.name))
manager = self._config.provider_managers[provider.name]
provider_image = provider.images.get(image_name)
if provider_image is None:
raise exceptions.BuilderInvalidCommandError(
"Could not find matching provider image for %s" % image_name
)
meta = provider_image.meta.copy()
meta['nodepool_build_id'] = build_id
meta['nodepool_upload_id'] = upload_id
try:
external_id = manager.uploadImage(
ext_image_name, filename,
image_type=image.extension,
meta=meta,
md5=image.md5,
sha256=image.sha256,
)
except Exception:
self.log.exception("Failed to upload image %s to provider %s" %
(image_name, provider.name))
data = zk.ImageUpload()
data.state = zk.FAILED
return data
if self._statsd:
dt = int((time.time() - start_time) * 1000)
key = 'nodepool.image_update.%s.%s' % (image_name,
provider.name)
self._statsd.timing(key, dt)
self._statsd.incr(key)
base = "-".join([image_name, build_id])
self.log.info("Image build %s in %s is ready" %
(base, provider.name))
data = zk.ImageUpload()
data.state = zk.READY
data.external_id = external_id
data.external_name = ext_image_name
return data
def _checkForProviderUploads(self):
'''
Check for any image builds that need to be uploaded to providers.
If we find any builds in the 'ready' state that haven't been uploaded
to providers, do the upload if they are available on the local disk.
'''
for provider in self._config.providers.values():
for image in provider.images.values():
uploaded = False
# Check if we've been told to shutdown
# or if ZK connection is suspended
if not self.running or self._zk.suspended or self._zk.lost:
return
try:
uploaded = self._checkProviderImageUpload(provider, image)
except Exception:
self.log.exception("Error uploading image %s "
"to provider %s:",
image.name, provider.name)
# NOTE: Due to the configuration file disagreement issue
# (the copy we have may not be current), if we took the time
# to attempt to upload an image, let's short-circuit this loop
# to give us a chance to reload the configuration file.
if uploaded:
return
def _checkProviderImageUpload(self, provider, image):
'''
The main body of _checkForProviderUploads. This encapsulates
checking whether an image for a provider should be uploaded
and performing the upload. It is a separate function so that
exception handling can treat all provider-image uploads
indepedently.
:returns: True if an upload was attempted, False otherwise.
'''
# Check if image uploads are paused.
if provider.images.get(image.name).pause:
return False
# Search for the most recent 'ready' image build
builds = self._zk.getMostRecentBuilds(1, image.name,
zk.READY)
if not builds:
return False
build = builds[0]
# Search for locally built images. The image name and build
# sequence ID is used to name the image.
local_images = DibImageFile.from_image_id(
self._config.imagesdir, "-".join([image.name, build.id]))
if not local_images:
return False
# See if this image has already been uploaded
upload = self._zk.getMostRecentBuildImageUploads(
1, image.name, build.id, provider.name, zk.READY)
if upload:
return False
# See if this provider supports the available image formats
if provider.image_type not in build.formats:
return False
try:
with self._zk.imageUploadLock(
image.name, build.id, provider.name,
blocking=False
):
# Verify once more that it hasn't been uploaded since the
# last check.
upload = self._zk.getMostRecentBuildImageUploads(
1, image.name, build.id, provider.name, zk.READY)
if upload:
return False
# NOTE: Due to the configuration file disagreement issue
# (the copy we have may not be current), we try to verify
# that another thread isn't trying to delete this build just
# before we upload.
b = self._zk.getBuild(image.name, build.id)
if b.state == zk.DELETING:
return False
# New upload number with initial state 'uploading'
data = zk.ImageUpload()
data.state = zk.UPLOADING
upnum = self._zk.storeImageUpload(
image.name, build.id, provider.name, data)
data = self._uploadImage(build.id, upnum, image.name,
local_images, provider)
# Set final state
self._zk.storeImageUpload(image.name, build.id,
provider.name, data, upnum)
return True
except exceptions.ZKLockException:
# Lock is already held. Skip it.
return False
def run(self):
'''
Start point for the UploadWorker thread.
'''
self._running = True
while self._running:
# Don't do work if we've lost communication with the ZK cluster
while self._zk and (self._zk.suspended or self._zk.lost):
self.log.info("ZooKeeper suspended. Waiting")
time.sleep(SUSPEND_WAIT_TIME)
try:
self._reloadConfig()
self._checkForProviderUploads()
except Exception:
self.log.exception("Exception in UploadWorker:")
time.sleep(10)
time.sleep(self._interval)
provider_manager.ProviderManager.stopProviders(self._config)
class NodePoolBuilder(object):
'''
Main class for the Nodepool Builder.
The builder has the responsibility to:
* Start and maintain the working state of each worker thread.
'''
log = logging.getLogger("nodepool.builder.NodePoolBuilder")
def __init__(self, config_path, num_builders=1, num_uploaders=4):
'''
Initialize the NodePoolBuilder object.
:param str config_path: Path to configuration file.
:param int num_builders: Number of build workers to start.
:param int num_uploaders: Number of upload workers to start.
'''
self._config_path = config_path
self._config = None
self._num_builders = num_builders
self._build_workers = []
self._num_uploaders = num_uploaders
self._upload_workers = []
self._janitor = None
self._running = False
self.cleanup_interval = 60
self.build_interval = 10
self.upload_interval = 10
self.dib_cmd = 'disk-image-create'
self.zk = None
# This lock is needed because the run() method is started in a
# separate thread of control, which can return before the scheduler
# has completed startup. We need to avoid shutting down before the
# startup process has completed.
self._start_lock = threading.Lock()
#=======================================================================
# Private methods
#=======================================================================
def _getAndValidateConfig(self):
config = nodepool_config.loadConfig(self._config_path)
if not config.zookeeper_servers.values():
raise RuntimeError('No ZooKeeper servers specified in config.')
if not config.imagesdir:
raise RuntimeError('No images-dir specified in config.')
return config
#=======================================================================
# Public methods
#=======================================================================
def start(self):
'''
Start the builder.
The builder functionality is encapsulated within threads run
by the NodePoolBuilder. This starts the needed sub-threads
which will run forever until we tell them to stop.
'''
with self._start_lock:
if self._running:
raise exceptions.BuilderError('Cannot start, already running.')
self._config = self._getAndValidateConfig()
self._running = True
# All worker threads share a single ZooKeeper instance/connection.
self.zk = zk.ZooKeeper()
self.zk.connect(self._config.zookeeper_servers.values())
self.log.debug('Starting listener for build jobs')
# Create build and upload worker objects
for i in range(self._num_builders):
w = BuildWorker(i, self._config_path, self.build_interval,
self.zk, self.dib_cmd)
w.start()
self._build_workers.append(w)
for i in range(self._num_uploaders):
w = UploadWorker(i, self._config_path, self.upload_interval,
self.zk)
w.start()
self._upload_workers.append(w)
self._janitor = CleanupWorker(0, self._config_path,
self.cleanup_interval, self.zk)
self._janitor.start()
# Wait until all threads are running. Otherwise, we have a race
# on the worker _running attribute if shutdown() is called before
# run() actually begins.
while not all([
x.running for x in (self._build_workers
+ self._upload_workers
+ [self._janitor])
]):
time.sleep(0)
def stop(self):
'''
Stop the builder.
Signal the sub threads to begin the shutdown process. We don't
want this method to return until the scheduler has successfully
stopped all of its own threads.
'''
with self._start_lock:
self.log.debug("Stopping. NodePoolBuilder shutting down workers")
for worker in (self._build_workers
+ self._upload_workers
+ [self._janitor]
):
worker.shutdown()
self._running = False
self.log.debug('Waiting for jobs to complete')
# Do not exit until all of our owned threads exit.
for worker in (self._build_workers
+ self._upload_workers
+ [self._janitor]
):
worker.join()
self.log.debug('Terminating ZooKeeper connection')
self.zk.disconnect()
self.log.debug('Stopping providers')
provider_manager.ProviderManager.stopProviders(self._config)
self.log.debug('Finished stopping')
| |
from __future__ import print_function, unicode_literals
import datetime
import os
import random
import mock
import pytest
from aspen.utils import utcnow
from gratipay.billing.instruments import CreditCard
from gratipay.exceptions import (
UsernameIsEmpty,
UsernameTooLong,
UsernameAlreadyTaken,
UsernameContainsInvalidCharacters,
UsernameIsRestricted,
BadAmount,
)
from gratipay.models.exchange_route import ExchangeRoute
from gratipay.models.participant import (
LastElsewhere, NeedConfirmation, NonexistingElsewhere, Participant
)
from gratipay.testing import Harness, D,P,T
# TODO: Test that accounts elsewhere are not considered claimed by default
class TestNeedConfirmation(Harness):
def test_need_confirmation1(self):
assert not NeedConfirmation(False, False, False)
def test_need_confirmation2(self):
assert NeedConfirmation(False, False, True)
def test_need_confirmation3(self):
assert not NeedConfirmation(False, True, False)
def test_need_confirmation4(self):
assert NeedConfirmation(False, True, True)
def test_need_confirmation5(self):
assert NeedConfirmation(True, False, False)
def test_need_confirmation6(self):
assert NeedConfirmation(True, False, True)
def test_need_confirmation7(self):
assert NeedConfirmation(True, True, False)
def test_need_confirmation8(self):
assert NeedConfirmation(True, True, True)
class TestParticipant(Harness):
def setUp(self):
Harness.setUp(self)
for username in ['alice', 'bob', 'carl']:
p = self.make_participant(username, claimed_time='now', elsewhere='twitter')
setattr(self, username, p)
def test_comparison(self):
assert self.alice == self.alice
assert not (self.alice != self.alice)
assert self.alice != self.bob
assert not (self.alice == self.bob)
assert self.alice != None
assert not (self.alice == None)
def test_delete_elsewhere_last(self):
with pytest.raises(LastElsewhere):
self.alice.delete_elsewhere('twitter', self.alice.id)
def test_delete_elsewhere_last_signin(self):
self.make_elsewhere('bountysource', self.alice.id, 'alice')
with pytest.raises(LastElsewhere):
self.alice.delete_elsewhere('twitter', self.alice.id)
def test_delete_elsewhere_nonsignin(self):
g = self.make_elsewhere('bountysource', 1, 'alice')
alice = self.alice
alice.take_over(g)
accounts = alice.get_accounts_elsewhere()
assert accounts['twitter'] and accounts['bountysource']
alice.delete_elsewhere('bountysource', 1)
accounts = alice.get_accounts_elsewhere()
assert accounts['twitter'] and accounts.get('bountysource') is None
def test_delete_elsewhere_nonexisting(self):
with pytest.raises(NonexistingElsewhere):
self.alice.delete_elsewhere('github', 1)
def test_delete_elsewhere(self):
g = self.make_elsewhere('github', 1, 'alice')
alice = self.alice
alice.take_over(g)
# test preconditions
accounts = alice.get_accounts_elsewhere()
assert accounts['twitter'] and accounts['github']
# do the thing
alice.delete_elsewhere('twitter', alice.id)
# unit test
accounts = alice.get_accounts_elsewhere()
assert accounts.get('twitter') is None and accounts['github']
class Tests(Harness):
def random_restricted_username(self):
"""Helper method to chooses a restricted username for testing"""
from gratipay import RESTRICTED_USERNAMES
random_item = random.choice(RESTRICTED_USERNAMES)
while any(map(random_item.startswith, ('%', '~'))):
random_item = random.choice(RESTRICTED_USERNAMES)
return random_item
def setUp(self):
Harness.setUp(self)
self.participant = self.make_participant('user1') # Our protagonist
def test_claiming_participant(self):
now = utcnow()
self.participant.set_as_claimed()
actual = self.participant.claimed_time - now
expected = datetime.timedelta(seconds=0.1)
assert actual < expected
def test_changing_username_successfully(self):
self.participant.change_username('user2')
actual = P('user2')
assert self.participant == actual
def test_changing_username_to_nothing(self):
with self.assertRaises(UsernameIsEmpty):
self.participant.change_username('')
def test_changing_username_to_all_spaces(self):
with self.assertRaises(UsernameIsEmpty):
self.participant.change_username(' ')
def test_changing_username_strips_spaces(self):
self.participant.change_username(' aaa ')
actual = P('aaa')
assert self.participant == actual
def test_changing_username_returns_the_new_username(self):
returned = self.participant.change_username(' foo bar baz ')
assert returned == 'foo bar baz', returned
def test_changing_username_to_too_long(self):
with self.assertRaises(UsernameTooLong):
self.participant.change_username('123456789012345678901234567890123')
def test_changing_username_to_already_taken(self):
self.make_participant('user2')
with self.assertRaises(UsernameAlreadyTaken):
self.participant.change_username('user2')
def test_changing_username_to_already_taken_is_case_insensitive(self):
self.make_participant('UsEr2')
with self.assertRaises(UsernameAlreadyTaken):
self.participant.change_username('uSeR2')
def test_changing_username_to_invalid_characters(self):
with self.assertRaises(UsernameContainsInvalidCharacters):
self.participant.change_username(u"\u2603") # Snowman
def test_changing_username_to_restricted_name(self):
username = self.random_restricted_username()
with self.assertRaises(UsernameIsRestricted):
self.participant.change_username(username)
assert os.path.exists(self.client.www_root + '/' + username)
# id
def test_participant_gets_a_long_id(self):
actual = type(self.make_participant('alice').id)
assert actual == long
# set_payment_instruction - spi
def test_spi_sets_payment_instruction(self):
alice = self.make_participant('alice', claimed_time='now', last_bill_result='')
team = self.make_team()
alice.set_payment_instruction(team, '1.00')
actual = alice.get_payment_instruction(team)['amount']
assert actual == D('1.00')
def test_spi_returns_a_dict(self):
alice = self.make_participant('alice', claimed_time='now', last_bill_result='')
team = self.make_team()
actual = alice.set_payment_instruction(team, '1.00')
assert isinstance(actual, dict)
assert isinstance(actual['amount'], D)
assert actual['amount'] == 1
def test_spi_allows_up_to_a_thousand(self):
alice = self.make_participant('alice', claimed_time='now', last_bill_result='')
team = self.make_team()
alice.set_payment_instruction(team, '1000.00')
def test_spi_doesnt_allow_a_penny_more(self):
alice = self.make_participant('alice', claimed_time='now', last_bill_result='')
team = self.make_team()
self.assertRaises(BadAmount, alice.set_payment_instruction, team, '1000.01')
def test_spi_allows_a_zero_payment_instruction(self):
alice = self.make_participant('alice', claimed_time='now', last_bill_result='')
team = self.make_team()
alice.set_payment_instruction(team, '0.00')
def test_spi_doesnt_allow_a_penny_less(self):
alice = self.make_participant('alice', claimed_time='now', last_bill_result='')
team = self.make_team()
self.assertRaises(BadAmount, alice.set_payment_instruction, team, '-0.01')
def test_spi_is_free_rider_defaults_to_none(self):
alice = self.make_participant('alice', claimed_time='now', last_bill_result='')
assert alice.is_free_rider is None
def test_spi_sets_is_free_rider_to_false(self):
alice = self.make_participant('alice', claimed_time='now', last_bill_result='')
gratipay = self.make_team('Gratipay', owner=self.make_participant('Gratipay').username)
alice.set_payment_instruction(gratipay, '0.01')
assert alice.is_free_rider is False
assert P('alice').is_free_rider is False
def test_spi_resets_is_free_rider_to_null(self):
alice = self.make_participant('alice', claimed_time='now', last_bill_result='')
gratipay = self.make_team('Gratipay', owner=self.make_participant('Gratipay').username)
alice.set_payment_instruction(gratipay, '0.00')
assert alice.is_free_rider is None
assert P('alice').is_free_rider is None
def test_spi_sets_id_fields(self):
alice = self.make_participant('alice', claimed_time='now', last_bill_result='')
team = self.make_team()
actual = alice.set_payment_instruction(team, '1.00')
assert actual['participant_id'] == alice.id
assert actual['team_id'] == team.id
# get_teams - gt
def test_get_teams_gets_teams(self):
self.make_team(is_approved=True)
picard = P('picard')
assert [t.slug for t in picard.get_teams()] == ['TheEnterprise']
def test_get_teams_can_get_only_approved_teams(self):
self.make_team(is_approved=True)
picard = P('picard')
self.make_team('The Stargazer', owner=picard, is_approved=False)
assert [t.slug for t in picard.get_teams(only_approved=True)] == ['TheEnterprise']
def test_get_teams_can_get_only_open_teams(self):
self.make_team()
picard = P('picard')
self.make_team('The Stargazer', owner=picard, is_closed=True)
assert [t.slug for t in picard.get_teams(only_open=True)] == ['TheEnterprise']
def test_get_teams_can_get_all_teams(self):
self.make_team(is_approved=True)
picard = P('picard')
self.make_team('The Stargazer', owner=picard, is_approved=False)
self.make_team('The Trident', owner=picard, is_approved=False, is_closed=True)
assert [t.slug for t in picard.get_teams()] == \
['TheEnterprise', 'TheStargazer', 'TheTrident']
# giving
def test_giving_only_includes_funded_payment_instructions(self):
alice = self.make_participant('alice', claimed_time='now', last_bill_result='')
bob = self.make_participant('bob', claimed_time='now')
carl = self.make_participant('carl', claimed_time='now', last_bill_result="Fail!")
team = self.make_team(is_approved=True)
alice.set_payment_instruction(team, '3.00') # The only funded tip
bob.set_payment_instruction(team, '5.00')
carl.set_payment_instruction(team, '7.00')
assert alice.giving == D('3.00')
assert bob.giving == D('0.00')
assert carl.giving == D('0.00')
funded_tip = self.db.one("SELECT * FROM payment_instructions WHERE is_funded ORDER BY id")
assert funded_tip.participant_id == alice.id
def test_giving_only_includes_the_latest_payment_instruction(self):
alice = self.make_participant('alice', claimed_time='now', last_bill_result='')
team = self.make_team(is_approved=True)
alice.set_payment_instruction(team, '12.00')
alice.set_payment_instruction(team, '4.00')
assert alice.giving == D('4.00')
@mock.patch('braintree.PaymentMethod.delete')
def test_giving_is_updated_when_credit_card_is_updated(self, btd):
alice = self.make_participant('alice', claimed_time='now', last_bill_result='fail')
team = self.make_team(is_approved=True)
alice.set_payment_instruction(team, '5.00') # Not funded, failing card
assert alice.giving == D('0.00')
assert T(team.slug).receiving == D('0.00')
# Alice updates her card..
ExchangeRoute.from_network(alice, 'braintree-cc').invalidate()
ExchangeRoute.insert(alice, 'braintree-cc', '/cards/bar')
assert alice.giving == D('5.00')
assert T(team.slug).receiving == D('5.00')
@mock.patch('braintree.PaymentMethod.delete')
def test_giving_is_updated_when_credit_card_fails(self, btd):
alice = self.make_participant('alice', claimed_time='now', last_bill_result='')
team = self.make_team(is_approved=True)
alice.set_payment_instruction(team, '5.00') # funded
assert alice.giving == D('5.00')
assert T(team.slug).receiving == D('5.00')
assert P(team.owner).taking == D('5.00')
ExchangeRoute.from_network(alice, 'braintree-cc').update_error("Card expired")
assert P('alice').giving == D('0.00')
assert T(team.slug).receiving == D('0.00')
assert P(team.owner).taking == D('0.00')
# credit_card_expiring
def test_credit_card_expiring_no_card(self):
alice = self.make_participant('alice', claimed_time='now')
assert alice.credit_card_expiring() == None
@mock.patch.object(CreditCard, "from_route")
def test_credit_card_expiring_valid_card(self, cc):
alice = self.make_participant('alice', claimed_time='now', last_bill_result='')
cc.return_value = CreditCard(
expiration_year=2050,
expiration_month=12
)
assert alice.credit_card_expiring() == False
@mock.patch.object(CreditCard, "from_route")
def test_credit_card_expiring_expired_card(self, cc):
alice = self.make_participant('alice', claimed_time='now', last_bill_result='')
cc.return_value = CreditCard(
expiration_year=2010,
expiration_month=12
)
assert alice.credit_card_expiring() == True
# dues
def test_dues_are_cancelled_along_with_payment_instruction(self):
alice = self.make_participant('alice', claimed_time='now', last_bill_result='')
team = self.make_team(is_approved=True)
alice.set_payment_instruction(team, '5.00')
# Fake dues
self.db.run("""
UPDATE payment_instructions ppi
SET due = '5.00'
WHERE ppi.participant_id = %s
AND ppi.team_id = %s
""", (alice.id, team.id, ))
assert alice.get_due(team) == D('5.00')
# Increase subscription amount
alice.set_payment_instruction(team, '10.00')
assert alice.get_due(team) == D('5.00')
# Cancel the subscription
alice.set_payment_instruction(team, '0.00')
assert alice.get_due(team) == D('0.00')
# Revive the subscription
alice.set_payment_instruction(team, '5.00')
assert alice.get_due(team) == D('0.00')
# get_age_in_seconds - gais
def test_gais_gets_age_in_seconds(self):
alice = self.make_participant('alice', claimed_time='now')
actual = alice.get_age_in_seconds()
assert 0 < actual < 1
def test_gais_returns_negative_one_if_None(self):
alice = self.make_participant('alice', claimed_time=None)
actual = alice.get_age_in_seconds()
assert actual == -1
# resolve_unclaimed - ru
def test_ru_returns_None_for_orphaned_participant(self):
resolved = self.make_participant('alice').resolve_unclaimed()
assert resolved is None, resolved
def test_ru_returns_bitbucket_url_for_stub_from_bitbucket(self):
unclaimed = self.make_elsewhere('bitbucket', '1234', 'alice')
stub = P(unclaimed.participant.username)
actual = stub.resolve_unclaimed()
assert actual == "/on/bitbucket/alice/"
def test_ru_returns_github_url_for_stub_from_github(self):
unclaimed = self.make_elsewhere('github', '1234', 'alice')
stub = P(unclaimed.participant.username)
actual = stub.resolve_unclaimed()
assert actual == "/on/github/alice/"
def test_ru_returns_twitter_url_for_stub_from_twitter(self):
unclaimed = self.make_elsewhere('twitter', '1234', 'alice')
stub = P(unclaimed.participant.username)
actual = stub.resolve_unclaimed()
assert actual == "/on/twitter/alice/"
def test_ru_returns_openstreetmap_url_for_stub_from_openstreetmap(self):
unclaimed = self.make_elsewhere('openstreetmap', '1', 'alice')
stub = P(unclaimed.participant.username)
actual = stub.resolve_unclaimed()
assert actual == "/on/openstreetmap/alice/"
# archive
def test_archive_fails_for_team_owner(self):
alice = self.make_participant('alice')
self.make_team(owner=alice)
with self.db.get_cursor() as cursor:
pytest.raises(alice.StillOnATeam, alice.archive, cursor)
def test_archive_fails_if_balance_is_positive(self):
alice = self.make_participant('alice', balance=2)
with self.db.get_cursor() as cursor:
pytest.raises(alice.BalanceIsNotZero, alice.archive, cursor)
def test_archive_fails_if_balance_is_negative(self):
alice = self.make_participant('alice', balance=-2)
with self.db.get_cursor() as cursor:
pytest.raises(alice.BalanceIsNotZero, alice.archive, cursor)
def test_archive_clears_claimed_time(self):
alice = self.make_participant('alice')
with self.db.get_cursor() as cursor:
archived_as = alice.archive(cursor)
assert P(archived_as).claimed_time is None
def test_archive_records_an_event(self):
alice = self.make_participant('alice')
with self.db.get_cursor() as cursor:
archived_as = alice.archive(cursor)
payload = self.db.one("SELECT * FROM events WHERE payload->>'action' = 'archive'").payload
assert payload['values']['old_username'] == 'alice'
assert payload['values']['new_username'] == archived_as
# suggested_payment
def test_suggested_payment_is_zero_for_new_user(self):
alice = self.make_participant('alice')
assert alice.suggested_payment == 0
# mo - member_of
def test_mo_indicates_membership(self):
enterprise = self.make_team(available=50)
alice = self.make_participant( 'alice'
, email_address='alice@example.com'
, verified_in='TT'
, claimed_time='now'
)
picard = Participant.from_username('picard')
enterprise.add_member(alice, picard)
assert alice.member_of(enterprise)
def test_mo_indicates_non_membership(self):
enterprise = self.make_team()
assert not self.make_participant('alice').member_of(enterprise)
def test_mo_is_false_for_owners(self):
enterprise = self.make_team()
assert not Participant.from_username('picard').member_of(enterprise)
| |
# -*- coding: utf-8 -*-
"""Inception-ResNet V2 model for Keras.
Model naming and structure follows TF-slim implementation (which has some additional
layers and different number of filters from the original arXiv paper):
https://github.com/tensorflow/models/blob/master/slim/nets/inception_resnet_v2.py
Pre-trained ImageNet weights are also converted from TF-slim, which can be found in:
https://github.com/tensorflow/models/tree/master/slim#pre-trained-models
# Reference
- [Inception-v4, Inception-ResNet and the Impact of
Residual Connections on Learning](https://arxiv.org/abs/1602.07261)
"""
from __future__ import print_function
from __future__ import absolute_import
import os
import numpy as np
import warnings
import gc
from datetime import datetime
from keras.preprocessing import image
from keras.models import Model
from keras.layers import Activation
from keras.layers import AveragePooling2D
from keras.layers import BatchNormalization
from keras.layers import Concatenate
from keras.layers import Conv2D
from keras.layers import Dense
from keras.layers import GlobalAveragePooling2D
from keras.layers import Input
from keras.layers import Lambda
from keras.layers import MaxPooling2D
from keras import backend as K
from keras.optimizers import SGD
from keras.preprocessing.image import ImageDataGenerator
from keras.callbacks import ModelCheckpoint
SCENE_MODEL_SAVE_PATH = r"C:\Users\Air\Desktop"
MODEL_WEIGHT_FILE = r'C:\Users\Air\Desktop\inception_resnet_v2_weights_tf_dim_ordering_tf_kernels.h5'
def preprocess_input(x):
"""Preprocesses a numpy array encoding a batch of images.
This function applies the "Inception" preprocessing which converts
the RGB values from [0, 255] to [-1, 1]. Note that this preprocessing
function is different from `imagenet_utils.preprocess_input()`.
# Arguments
x: a 4D numpy array consists of RGB values within [0, 255].
# Returns
Preprocessed array.
"""
x /= 255.
x -= 0.5
x *= 2.
return x
def conv2d_bn(x,
filters,
kernel_size,
strides=1,
padding='same',
activation='relu',
use_bias=False,
name=None):
"""Utility function to apply conv + BN.
# Arguments
x: input tensor.
filters: filters in `Conv2D`.
kernel_size: kernel size as in `Conv2D`.
padding: padding mode in `Conv2D`.
activation: activation in `Conv2D`.
strides: strides in `Conv2D`.
name: name of the ops; will become `name + '_ac'` for the activation
and `name + '_bn'` for the batch norm layer.
# Returns
Output tensor after applying `Conv2D` and `BatchNormalization`.
"""
x = Conv2D(filters,
kernel_size,
strides=strides,
padding=padding,
use_bias=use_bias,
name=name)(x)
if not use_bias:
bn_axis = 1 if K.image_data_format() == 'channels_first' else 3
bn_name = None if name is None else name + '_bn'
x = BatchNormalization(axis=bn_axis, scale=False, name=bn_name)(x)
if activation is not None:
ac_name = None if name is None else name + '_ac'
x = Activation(activation, name=ac_name)(x)
return x
def inception_resnet_block(x, scale, block_type, block_idx, activation='relu'):
"""Adds a Inception-ResNet block.
This function builds 3 types of Inception-ResNet blocks mentioned
in the paper, controlled by the `block_type` argument (which is the
block name used in the official TF-slim implementation):
- Inception-ResNet-A: `block_type='block35'`
- Inception-ResNet-B: `block_type='block17'`
- Inception-ResNet-C: `block_type='block8'`
# Arguments
x: input tensor.
scale: scaling factor to scale the residuals (i.e., the output of
passing `x` through an inception module) before adding them
to the shortcut branch. Let `r` be the output from the residual branch,
the output of this block will be `x + scale * r`.
block_type: `'block35'`, `'block17'` or `'block8'`, determines
the network structure in the residual branch.
block_idx: an `int` used for generating layer names. The Inception-ResNet blocks
are repeated many times in this network. We use `block_idx` to identify
each of the repetitions. For example, the first Inception-ResNet-A block
will have `block_type='block35', block_idx=0`, ane the layer names will have
a common prefix `'block35_0'`.
activation: activation function to use at the end of the block
(see [activations](keras./activations.md)).
When `activation=None`, no activation is applied
(i.e., "linear" activation: `a(x) = x`).
# Returns
Output tensor for the block.
# Raises
ValueError: if `block_type` is not one of `'block35'`,
`'block17'` or `'block8'`.
"""
if block_type == 'block35':
branch_0 = conv2d_bn(x, 32, 1)
branch_1 = conv2d_bn(x, 32, 1)
branch_1 = conv2d_bn(branch_1, 32, 3)
branch_2 = conv2d_bn(x, 32, 1)
branch_2 = conv2d_bn(branch_2, 48, 3)
branch_2 = conv2d_bn(branch_2, 64, 3)
branches = [branch_0, branch_1, branch_2]
elif block_type == 'block17':
branch_0 = conv2d_bn(x, 192, 1)
branch_1 = conv2d_bn(x, 128, 1)
branch_1 = conv2d_bn(branch_1, 160, [1, 7])
branch_1 = conv2d_bn(branch_1, 192, [7, 1])
branches = [branch_0, branch_1]
elif block_type == 'block8':
branch_0 = conv2d_bn(x, 192, 1)
branch_1 = conv2d_bn(x, 192, 1)
branch_1 = conv2d_bn(branch_1, 224, [1, 3])
branch_1 = conv2d_bn(branch_1, 256, [3, 1])
branches = [branch_0, branch_1]
else:
raise ValueError('Unknown Inception-ResNet block type. '
'Expects "block35", "block17" or "block8", '
'but got: ' + str(block_type))
block_name = block_type + '_' + str(block_idx)
channel_axis = 1 if K.image_data_format() == 'channels_first' else 3
mixed = Concatenate(axis=channel_axis, name=block_name + '_mixed')(branches)
up = conv2d_bn(mixed,
K.int_shape(x)[channel_axis],
1,
activation=None,
use_bias=True,
name=block_name + '_conv')
x = Lambda(lambda inputs, scale: inputs[0] + inputs[1] * scale,
output_shape=K.int_shape(x)[1:],
arguments={'scale': scale},
name=block_name)([x, up])
if activation is not None:
x = Activation(activation, name=block_name + '_ac')(x)
return x
def InceptionResNetV2(img_rows, img_cols, color_type=1, num_classes=None):
# Determine proper input shape
# Handle Dimension Ordering for different backends
global bn_axis
if K.image_dim_ordering() == 'tf':
bn_axis = 3
img_input = Input(shape=(img_rows, img_cols, color_type), name='data')
else:
bn_axis = 1
img_input = Input(shape=(color_type, img_rows, img_cols), name='data')
# Stem block: 35 x 35 x 192
x = conv2d_bn(img_input, 32, 3, strides=2, padding='valid')
x = conv2d_bn(x, 32, 3, padding='valid')
x = conv2d_bn(x, 64, 3)
x = MaxPooling2D(3, strides=2)(x)
x = conv2d_bn(x, 80, 1, padding='valid')
x = conv2d_bn(x, 192, 3, padding='valid')
x = MaxPooling2D(3, strides=2)(x)
# Mixed 5b (Inception-A block): 35 x 35 x 320
branch_0 = conv2d_bn(x, 96, 1)
branch_1 = conv2d_bn(x, 48, 1)
branch_1 = conv2d_bn(branch_1, 64, 5)
branch_2 = conv2d_bn(x, 64, 1)
branch_2 = conv2d_bn(branch_2, 96, 3)
branch_2 = conv2d_bn(branch_2, 96, 3)
branch_pool = AveragePooling2D(3, strides=1, padding='same')(x)
branch_pool = conv2d_bn(branch_pool, 64, 1)
branches = [branch_0, branch_1, branch_2, branch_pool]
channel_axis = 1 if K.image_data_format() == 'channels_first' else 3
x = Concatenate(axis=channel_axis, name='mixed_5b')(branches)
# 10x block35 (Inception-ResNet-A block): 35 x 35 x 320
for block_idx in range(1, 11):
x = inception_resnet_block(x,
scale=0.17,
block_type='block35',
block_idx=block_idx)
# Mixed 6a (Reduction-A block): 17 x 17 x 1088
branch_0 = conv2d_bn(x, 384, 3, strides=2, padding='valid')
branch_1 = conv2d_bn(x, 256, 1)
branch_1 = conv2d_bn(branch_1, 256, 3)
branch_1 = conv2d_bn(branch_1, 384, 3, strides=2, padding='valid')
branch_pool = MaxPooling2D(3, strides=2, padding='valid')(x)
branches = [branch_0, branch_1, branch_pool]
x = Concatenate(axis=channel_axis, name='mixed_6a')(branches)
# 20x block17 (Inception-ResNet-B block): 17 x 17 x 1088
for block_idx in range(1, 21):
x = inception_resnet_block(x,
scale=0.1,
block_type='block17',
block_idx=block_idx)
# Mixed 7a (Reduction-B block): 8 x 8 x 2080
branch_0 = conv2d_bn(x, 256, 1)
branch_0 = conv2d_bn(branch_0, 384, 3, strides=2, padding='valid')
branch_1 = conv2d_bn(x, 256, 1)
branch_1 = conv2d_bn(branch_1, 288, 3, strides=2, padding='valid')
branch_2 = conv2d_bn(x, 256, 1)
branch_2 = conv2d_bn(branch_2, 288, 3)
branch_2 = conv2d_bn(branch_2, 320, 3, strides=2, padding='valid')
branch_pool = MaxPooling2D(3, strides=2, padding='valid')(x)
branches = [branch_0, branch_1, branch_2, branch_pool]
x = Concatenate(axis=channel_axis, name='mixed_7a')(branches)
# 10x block8 (Inception-ResNet-C block): 8 x 8 x 2080
for block_idx in range(1, 10):
x = inception_resnet_block(x,
scale=0.2,
block_type='block8',
block_idx=block_idx)
x = inception_resnet_block(x,
scale=1.,
activation=None,
block_type='block8',
block_idx=10)
# Final convolution block: 8 x 8 x 1536
x = conv2d_bn(x, 1536, 1, name='conv_7b')
# Classification block
x_fc = GlobalAveragePooling2D(name='avg_pool')(x)
x_fc = Dense(80, activation='softmax', name='predictions')(x_fc)
# Create model
model = Model(img_input, x_fc, name='inception_resnet_v2')
# Load weights
model.load_weights(MODEL_WEIGHT_FILE)
# Learning rate is changed to 0.001
sgd = SGD(lr=0.0002, decay=0.7, momentum=0.9, nesterov=True)
model.compile(optimizer=sgd, loss='categorical_crossentropy', metrics=['accuracy'])
return model
if __name__ == '__main__':
img_rows, img_cols = 299, 299 # Resolution of inputs
channel = 3
num_classes = 80
batch_size = 16
nb_epoch = 3
nb_train_samples = 53880
nb_validation_samples = 7120
# Load our model
model = InceptionResNetV2(img_rows, img_cols, channel, num_classes)
#classes
our_class = []
for i in range(num_classes):
our_class.append(str(i))
# data arguement
train_datagen = ImageDataGenerator(
rotation_range=20,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
fill_mode='nearest')
test_datagen = ImageDataGenerator()
train_generator = train_datagen.flow_from_directory(
'/home/yan/Desktop/QlabChallengerRepo/dataset_224/data/train/',
target_size=(img_rows,img_cols),
batch_size=batch_size,
classes=our_class)
validation_generator = test_datagen.flow_from_directory(
'/home/yan/Desktop/QlabChallengerRepo/dataset_224/data/valid/',
target_size=(img_rows,img_cols),
batch_size=batch_size,
classes=our_class)
#print(train_generator.class_indices)
#print(validation_generator.class_indices)
# Callback
checkpointer = ModelCheckpoint(filepath=(SCENE_MODEL_SAVE_PATH+'/INCEPTION_RESNET_V2_MODEL_WEIGHTS.{epoch:02d}-{val_acc:.5f}.hdf5'),
monitor='val_acc',
verbose=1,
save_weights_only= True,
save_best_only=False)
# Start Fine-tuning
model.fit_generator(train_generator,
steps_per_epoch=nb_train_samples//batch_size,
epochs=nb_epoch,
shuffle=True,
verbose=1,
callbacks=[checkpointer],
validation_data=validation_generator,
validation_steps=nb_validation_samples//batch_size)
gc.collect()
| |
from pyparsing import *
from itertools import *
import os
#~ Local libraries
from syntax.syntax import Syntax
from ekab.tbox import *
from queries.cq import *
from queries.ucq import UCQ
from queries.ecq import ECQ
from queries.rewrite import rewrite
class EKab:
#~ A EKab object represents an EKab planning problem and its constituents.
#~ An eKab is a tuple <C,C_0,T,A_0,act,rul>, where:
#~ - C is the (infinite) object domain
#~ - C_0 is a finite subset of C
#~ - T is a DL-Lite TBox
#~ - A_0 is a DL-Lite ABox
#~ - act is a set of parametric actions
#~ - rul is a set of condition-action actions
#~ The planning problem is represented by a tuple <K,G>, where K in an eKab, and G a boolean ECQ.
#~
#~ A EKab object contains the following elements (and we show also their initial value):
#~ - self.__concepts = set()
#~ Represents the set of atomic concepts used in the TBox T.
#~ Each atomic concept is saved as a string.
#~ - self.__conceptsParse = NoMatch()
#~ Used when parsing, it's the conjuction of all atomic concepts,
#~ i.e. Literal(concept name) + ... + Literal(concept name)
#~ - self.__roles = set()
#~ Represent the set of atomic roles used in the TBox T.
#~ Each atomic role is saved as a string.
#~ - self.__rolesParse = NoMatch()
#~ Used when parsing, it's the conjuction of all atomic roles,
#~ i.e. Literal(role name) + ... + Literal(role name)
#~ - self.__axiomsPos = set()
#~ The set of positive axioms of the TBox T.
#~ Positive axioms are the one like:
#~ basicConcept isA genericConcept, basicRole isA genericConcept
#~ Each axiom is saved as a tuple where the first element represents
#~ the left argument, and the second the right argument.
#~ - self.__axiomsNeg = set()
#~ The set of negative axioms of the TBox T.
#~ Negative axioms are the one like:
#~ basicConcept isA not genericConcept, basicRole isA not genericConcept
#~ Each axiom is saved as a tuple where the first element represents
#~ the left argument, and the second the right argument.
#~ We don't need to save the "not".
#~ - self.__axiomsFunct = set()
#~ The set of functional axioms of the TBox T.
#~ Functional axioms are the one like:
#~ (funct basicRole)
#~ For each axiom is saved only the basicRole, which is either a string (i.e., an atomic role)
#~ or a tuple (i.e. inverse of an atomic role).
#~ We don't need to save the "funct".
#~ - self.__rules = set()
#~ The set of condition-action rules.
#~ Each rule is a tuple "(ruleName, ruleCond, ruleAction)", where:
#~ - ruleName is a string of the name of the rule;
#~ - ruleCond is an ECQ() representing the condition;
#~ - ruleAction is a string of the called action.
#~ - self.__actions = set()
#~ The set of actions.
#~ Each action is a tuple "(actionName, actionParam, actionEffects)" where:
#~ - actionName is a string of the name of the action;
#~ - actionParam is a tuple of strings of the parameteres of the action;
#~ - actionEffects is a tuple of conditional effects.
#~ Each conditional effect is a tuple "(effectCond, effectAdd, effectDel)", where:
#~ - effectCond is an ECQ() representing the effect's condition;
#~ - effectAdd is a tuple of QueryAtom() that represents the atoms that will be added;
#~ - effectDel is a tuple of QueryAtom() that represents the atoms that will be deleted;
#~ - self.__queryUnsat = None
#~ Represents the query to check the satisfiability of the KB <T,A>
#~ It is an UCQ with inequalities, thus an object from UCQ(queryToParse, inequalitiesAllowed = True)
#~ - self.__rulesRewritten = set()
#~ It is the set of condition-action rules from self.__rules, where ruleCond has been rewritten
#~ in order to compile away the need of the TBox T
#~ - self.__actionsRewritten = set()
#~ It is the set of actions from self.__actions, where effectCond has been rewritten
#~ in order to compile away the need of the TBox T
#~ - self.__queryUnsatRewritten = None
#~ It is the rewritten version of __queryUnsat,
#~ in order to compile away the need of the TBox T
#~
#~ As we adopt the PDDL syntax to represent the problem, there are two files:
#~ the planning domain
#~ the planning problem
#~ The planning domain contains:
#~ - the planning domain name
#~ - the TBox T
#~ - the actions
#~ - the condition-action rules
#~ while the planning problem contains:
#~ - the initial state, represented by A_0
#~ - the goal formula G
def __init__(self, planningDomainFile, planningProblemFile = None):
self.__domainName = None
self.__problemName = None
self.__concepts = set()
self.__conceptsParse = NoMatch() #Used when parsing, it's the list of all atomic concepts
self.__roles = set()
self.__rolesParse = NoMatch() #Used when parsing, it's the list of all atomic roles
self.__axiomsPos = set()
self.__axiomsNeg = set()
self.__axiomsFunct = set()
self.__rules = set()
self.__actions = set()
self.__queryUnsat = None
self.__individuals = set()
self.__assertions = set()
self.__goalQuery = None
self.__rulesRewritten = set()
self.__actionsRewritten = set()
self.__queryUnsatRewritten = None
self.__goalQueryRewritten = None
#~ Check if planningDomainFile is a proper file path
if not isinstance(planningDomainFile, str) or not os.path.isfile(planningDomainFile):
raise Exception("The provided file path for the planning domain is not valid!\n" + str(planningDomainFile))
#~ Check if planningProblemFile is a proper file path
if not isinstance(planningProblemFile, str) or not os.path.isfile(planningProblemFile):
raise Exception("The provided file path for the planning problem is not valid!\n" + str(planningProblemFile))
#~ Parse the planning domain and planning problem
#~ The parsing is divided in parts:
#~ - parse the domain file, in order to retrieve the domain name and check that
#~ - the requirements and the predicates are ok. If not, raise an exception.
#~ From the predicates we create the list of concepts and roles;
#~ - parse the problem file, in order to retrieve the individuals, the ABox assertions, and the goal;
#~ - parse the domain file again to retrieve the axioms, the condition-action rules, and the actions.
self.__parseNameRequirementsPredicates(planningDomainFile)
self.__parseProblem(planningProblemFile)
self.__parseAxiomsRulesActions(planningDomainFile)
#~ Calculate queryUnsat
self.__calculateQueryUnsat()
#~ Compile the TBox away
self.__compileTBoxAway()
def __parseNameRequirementsPredicates(self, planningDomainFile):
#~ Predicates are defined in the usual PDDL way:
#~ (:predicates
#~ (C ?x)
#~ (P ?x ?y)
#~ ...
#~ )
syntax = Syntax()
#~ A concept is composed by a name and only one variable, e.g. C ?x
#~ We remove the variable and replace it with the keyword "Concept"
pddlConcept = Group(syntax.allowed_word.setResultsName("predicateName") + syntax.variable.setParseAction(replaceWith("Concept")).setResultsName("predicateType"))
#~ A role is composed by a name and exactly two variables, e.g. P ?x ?y
#~ We remove the first variable and replace it with the keyword "Role"
#~ while we remove completely the second
pddlRole = Group(syntax.allowed_word.setResultsName("predicateName") + \
syntax.variable.setParseAction(replaceWith('Role')).setResultsName("predicateType") + \
syntax.variable.setParseAction().suppress() )
#~ We put pddlVariable.setParseAction() in order to remove the effect of the previous pddlVariable.setParseAction(replaceWith('Role'))
#~ If we fail to do so, then all the times we use self.variable, it will be substituted by the word "Role"
#~ A PDDL Predicate is either concepts or roles
pddlPredicate = pddlConcept ^ pddlRole
pddlPredicates = syntax.leftPar + syntax.pddlPredicatesTag.suppress() + Group(OneOrMore(syntax.leftPar + pddlPredicate + syntax.rightPar)).setResultsName("predicates") + syntax.rightPar
pddlDomain = StringStart() + \
syntax.leftPar + syntax.pddlDefineTag + \
syntax.leftPar + syntax.pddlDomainTag + \
syntax.pddlDomainName.setResultsName("domainName") + \
syntax.rightPar + \
syntax.pddlRequirements.suppress() + \
pddlPredicates + \
Regex("(.*\r*\n*)*\)\r*\n*").suppress()
#~ Parse the file
result = pddlDomain.parseFile(planningDomainFile)
self.__domainName = result["domainName"] #Save the domain name
#~ Save the concepts and roles
for predicate in result["predicates"]:
#~ If a predicate has the same name as a keyword (e.g., "exists", "not", etc.), raise an Exception
if str(predicate["predicateName"]) in syntax.keywords:
raise Exception("It is not possible to use the word \"" + str(predicate["predicateName"]) + "\" as a name for a predicate, as it is a keyword.")
if predicate["predicateType"][0] == "Concept":
self.__concepts.add(str(predicate["predicateName"]))
self.__conceptsParse = self.__conceptsParse ^ Literal(str(predicate["predicateName"]))
else:
self.__roles.add(str(predicate["predicateName"]))
self.__rolesParse = self.__rolesParse ^ Literal(str(predicate["predicateName"]))
def __parseAxiomsRulesActions(self, planningDomainFile):
#~ The functions parse the following elements:
#~ - axioms
#~ - condition-action rules
#~ - actions
#~ The possible axioms, as defined in DL-Lite, are (here written in PDDL-EKab syntax):
#~ (isA pddlBasicRole pddlGeneralRole)
#~ (isA pddlBasicConcept pddlGeneralConcept)
#~ (funct pddlBasicRole)
#~ The possible terms participating in axioms are, instead:
#~ pddlAtomicRole := nome del ruolo definito in predicates
#~ pddlBasicRole := pddlAtomicRole | (inverse pddlAtomicRole)
#~ pddlGeneralRole := pddlBasicRole | (not pddlBasicRole)
#~ pddlAtomicRole := nome del concetto definito in predicates
#~ pddlBasicConcept := pddlAtomicRole | (exists pddlBasicRole)
#~ pddlGeneralConcept := pddlBasicConcept | (not pddlBasicConcept) | (existsQualified pddlBasicRole pddlBasicConcept) | topC
#~
#~ The parser divides immediately the axioms in: positive axioms, negative axioms, and functionality axioms
syntax = Syntax()
pddlAtomicRole = self.__rolesParse
pddlAtomicConcept = self.__conceptsParse
pddlBasicRole = pddlAtomicRole ^ Group((syntax.leftPar + Literal(syntax.inverse) + pddlAtomicRole + syntax.rightPar))
pddlBasicConcept = pddlAtomicConcept ^ Group((syntax.leftPar + Literal(syntax.exists) + pddlBasicRole + syntax.rightPar))
pddlGeneralConcept = syntax.topC ^ \
Group((syntax.leftPar + Literal(syntax.existsQualified) + pddlBasicConcept + pddlBasicRole + syntax.rightPar)) ^ \
pddlBasicConcept
#~ pddlGeneralRole = Group((self.leftPar + "not" + pddlBasicRole + self.rightPar)) ^ \
#~ pddlBasicRole
pddlAxiomPos = Group(\
Empty().setParseAction(replaceWith("AxiomPos")) + \
Group(((syntax.isA + pddlBasicConcept + pddlGeneralConcept) ^ (syntax.isA + pddlBasicRole + pddlBasicRole))) \
)
pddlAxiomNeg = Group( \
Empty().setParseAction(replaceWith("AxiomNeg")) + \
Group(((syntax.isA + pddlBasicConcept + syntax.leftPar + Literal(syntax.neg).suppress() + pddlBasicConcept + syntax.rightPar) ^ \
(syntax.isA + pddlBasicRole + syntax.leftPar + Literal(syntax.neg).suppress() + pddlBasicRole + syntax.rightPar)) \
))
pddlAxiomFunct = Group(Empty().setParseAction(replaceWith("AxiomFunct")) + Literal(syntax.funct).suppress() + pddlBasicRole)
#~ pddlAxioms = syntax.leftPar + syntax.pddlAxiomsTag + \
#~ Group(OneOrMore(syntax.leftPar + (pddlAxiomPos ^ pddlAxiomNeg ^ pddlAxiomFunct) + syntax.rightPar)).setResultsName("axioms") + syntax.rightPar
pddlAxioms = syntax.leftPar + syntax.pddlAxiomsTag + \
Group(OneOrMore(nestedExpr())).setResultsName("axioms") + syntax.rightPar
#~ A condition-action rule has the form of:
#~ ECQ -> action name
#~ This, in the PDDL-EKab sytax is expressed as:
#~ (:rule rule-name
#~ :condition ECQ
#~ :action action-name
#~ )
pddlRuleName = syntax.allowed_word.setResultsName("ruleName") # Rule name
pddlRuleCondition = syntax.pddlRuleConditionTag + nestedExpr().setResultsName("ruleCondition")
pddlRuleAction = syntax.pddlRuleActionTag + syntax.allowed_word.setResultsName("ruleAction")
pddlRule = Group(syntax.leftPar + syntax.pddlRuleTag + pddlRuleName + pddlRuleCondition + pddlRuleAction + syntax.rightPar)
pddlRules = Group(ZeroOrMore(pddlRule)).setResultsName("rules")
#~ An action has the form of:
#~ name (input parameters): { list of effects }
#~ Each effect has the form:
#~ ECQ ~> add F+, del F-
#~ where F+ and F- are a set of atomic effects.
#~ This, in the PDDL-EKab sytax is expressed as:
#~ (:action action-name
#~ :parameters ( list of variables )
#~ :effects (
#~ :condition ECQ
#~ :add (atoms list)
#~ :remove (atoms list)
#~ )
#~ ...
#~ (
#~ :condition ECQ
#~ :add (atoms list)
#~ :delete (atoms list)
#~ )
#~ )
pddlActionName = syntax.allowed_word.setResultsName("actionName")
pddlActionParameters = Literal(":parameters").suppress() + syntax.leftPar + Group(ZeroOrMore(syntax.variable)).setResultsName("actionParameters") + syntax.rightPar
pddlActionEffectCondition = syntax.pddlActionEffectConditionTag + nestedExpr().setResultsName("effectCondition")
pddlActionEffectAdd = syntax.pddlActionEffectAddTag + nestedExpr().setResultsName("effectAdd")
pddlActionEffectDel = syntax.pddlActionEffectDelTag + nestedExpr().setResultsName("effectDelete")
pddlActionEffect = Group(syntax.leftPar + pddlActionEffectCondition + pddlActionEffectAdd + syntax.rightPar) ^ \
Group(syntax.leftPar + pddlActionEffectCondition + pddlActionEffectDel + syntax.rightPar) ^ \
Group(syntax.leftPar + pddlActionEffectCondition + pddlActionEffectAdd + pddlActionEffectDel + syntax.rightPar)
pddlActionEffects = syntax.pddlActionEffectsTag + Group(OneOrMore(pddlActionEffect)).setResultsName("actionEffects")
pddlAction = Group(syntax.leftPar + syntax.pddlActionTag + pddlActionName + pddlActionParameters + pddlActionEffects + syntax.rightPar)
pddlActions = Group(ZeroOrMore(pddlAction)).setResultsName("actions")
#~ The section (:predicates ...) is matched by the Regex expression:
#~ Regex("\(\s*\:predicates\s*\r*\n*(\s*\(.*\)\s*\r*\n*)*\s*\)").suppress()
pddlDomain = StringStart() + \
syntax.leftPar + syntax.pddlDefineTag + \
syntax.leftPar + syntax.pddlDomainTag + \
Literal(self.__domainName).suppress() + syntax.rightPar + \
syntax.pddlRequirements.suppress() + \
Regex("\(\s*\:predicates\s*\r*\n*(\s*\(.*\)\s*\r*\n*)*\s*\)").suppress() + \
pddlAxioms + \
pddlRules + \
pddlActions + \
syntax.rightPar + \
StringEnd()
#~ Parse the file
try:
result = pddlDomain.parseFile(planningDomainFile)
except ParseException as x:
print ("Line {e.lineno}, column {e.col}:\n'{e.line}'".format(e=x))
raise
#~
#~ Analyse the axioms
#~
for axiomParse in result["axioms"]:
axiom = Axiom(axiomParse, self.__concepts, self.__roles)
if axiom.disjoint():
self.__axiomsNeg.add(axiom)
elif axiom.functionality():
self.__axiomsFunct.add(axiom)
else:
self.__axiomsPos.add(axiom)
#~
#~ Analyse the rules
#~
for rule in result["rules"]:
#~ Check that no other action has the same name
for otherRule in result["rules"]:
if rule != otherRule and str(rule["ruleName"]) == str(otherRule["ruleName"]):
raise Exception("There are two rule with the same name: " + str(rule["ruleName"]))
#~ Create the ECQ
ruleCond = ECQ(rule["ruleCondition"][0], self.__concepts, self.__roles, self.__individuals)
#~ We need to check that the terms used in the rules are present in the
#~ TBox vocabulary (thus, in the predicate section)
if not ruleCond.concepts().issubset(self.__concepts):
raise Exception("The following rule is using terms that are not concepts in the predicates: (:" + str(rule["ruleName"]) + " ... )\n" \
"Terms that are not concepts: " + str(ruleCond.concepts().difference(self.__concepts)))
if not ruleCond.roles().issubset(self.__roles):
raise Exception("The following rule is using terms that are not roles in the predicates: (:" + str(rule["ruleName"]) + " ... )\n" \
"Terms that are not roles: " + str(ruleCond.roles().difference(self.__roles)))
#~ Check that the action called exists
found = False
for action in result["actions"]:
if str(rule["ruleAction"]) == str(action["actionName"]):
found = True
if not found:
raise Exception("The rule (:" + str(rule["ruleName"]) + " ... ) is calling an action ("+ str(rule["ruleAction"]) + " ... ) that is not specified after.")
#~ Save the rule
self.__rules.add((str(rule["ruleName"]), ruleCond, str(rule["ruleAction"])))
#~
#~ Analyse the actions
#~
for action in result["actions"]:
#~ Check that no other action has the same name
for otherAction in result["actions"]:
if action != otherAction and str(action["actionName"]) == str(otherAction["actionName"]):
raise Exception("There are two actions with the same name: " + str(action["actionName"]))
#~ Save the parameters in a tuple
actionParameters = tuple([Variable(parameter) for parameter in action["actionParameters"]])
#~ Every action can be called only by one rule, and
#~ the free variables of the rule condition must be a subset of
#~ the parameters of the action.
counter = 0
for rule in self.__rules:
if str(action["actionName"]) == rule[2]:
#~ Increase the counter
counter += 1
#~ Check the free variables
if not rule[1].freeVars().issubset(actionParameters):
raise Exception("The rule " + rule[0] + " calls the action " + str(action["actionName"]) + \
", but the free variables of its condition (" + str(rule[1].freeVars()) + \
") are not a subset of the parameters of the action (" + str(actionParameters) + ").")
#~ Check that only one rule calls the action
if counter != 1:
raise Exception("There are " + str(counter) + " rules that call the action " + str(action["actionName"]) +". There must be 1!")
#~ Analyze each effect
effects = [] # Temporary list for the effects
for effect in action["actionEffects"]:
#~ Create the ECQ of the effect
effectCond = ECQ(effect["effectCondition"][0], self.__concepts, self.__roles, self.__individuals)
#~ We need to check that the terms used in the effect condition are present in the
#~ TBox vocabulary (thus, in the predicate section)
if not effectCond.concepts().issubset(self.__concepts):
raise Exception("The following action is using terms that are not concepts in the predicates: (:" + str(action["actionName"]) + " ... )\n" \
"Terms that are not concepts: " + str(effectCond.concepts().difference(self.__concepts)))
if not effectCond.roles().issubset(self.__roles):
raise Exception("The following action is using terms that are not roles in the predicates: (:" + str(action["actionName"]) + " ... )\n" \
"Terms that are not roles: " + str(effectCond.roles().difference(self.__roles)))
effectConcepts = set() #Set used to save the concepts that appear in the effects, for check purposes
effectRoles = set() #Set used to save the roles that appear in the effects, for check purposes
effectVars = set() #Set used to save the variables that appear in the effects, for check purposes
effectAdd = [] # Temporary list for the addition effects
if "effectAdd" in effect.keys():
for atomicEffect in effect["effectAdd"][0]:
atom = QueryAtom(atomicEffect, self.__concepts, self.__roles, self.__individuals)
if atom.atomType() == "role":
effectRoles.add(atom.term())
if isinstance(atom.var1(), Variable):
effectVars.add(atom.var1())
if isinstance(atom.var2(), Variable):
effectVars.add(atom.var2())
elif atom.atomType() == "concept":
effectConcepts.add(atom.term())
if isinstance(atom.var1(), Variable):
effectVars.add(atom.var1())
else:
raise Exception("An atomic effect can be only about a concept or a role. The following atom is not valid: " + str(atom))
effectAdd.append(atom) # Add the effect to the list
effectDel = [] # Temporary list for the deletion effects
if "effectDelete" in effect.keys():
for atomicEffect in effect["effectDelete"][0]:
atom = QueryAtom(atomicEffect, self.__concepts, self.__roles, self.__individuals)
if atom.atomType() == "role":
effectRoles.add(atom.term())
if isinstance(atom.var1(), Variable):
effectVars.add(atom.var1())
if isinstance(atom.var2(), Variable):
effectVars.add(atom.var2())
elif atom.atomType() == "concept":
effectConcepts.add(atom.term())
if isinstance(atom.var1(), Variable):
effectVars.add(atom.var1())
else:
raise Exception("An atomic effect can be only about a concept or a role. The following atom is not valid: " + str(atom))
effectDel.append(atom) # Add the effect to the list
#~ We need to check that the terms used in the atomic effects are present in the
#~ TBox vocabulary (thus, in the predicate section)
if not effectConcepts.issubset(self.__concepts):
raise Exception("The following action is using terms that are not concepts in the predicates: (:" + str(action["actionName"]) + " ... )\n" \
"Terms that are not concepts: " + str(effectConcepts.difference(self.__concepts)))
if not effectRoles.issubset(self.__roles):
raise Exception("The following action is using terms that are not roles in the predicates: (:" + str(action["actionName"]) + " ... )\n" \
"Terms that are not roles: " + str(effectRoles.difference(self.__roles)))
#~ We need to check that the variables used in the atomic effects are present either
#~ in the ECQ of the effect, or in the parameters of the action.
if not effectVars.issubset(effectCond.freeVars().union(actionParameters)):
raise Exception("The following action is using variables in the atomic effects that do not appear among the free variables of the effect's condition: (:" + str(action["actionName"]) + " ... )\n" \
"Variables that do not appear in the condition: " + str(effectVars.difference(effectCond.freeVars())))
#~ Save the analyzed effect as a tuple
effects.append(tuple((effectCond,tuple(effectAdd),tuple(effectDel))))
#~ Save the action
self.__actions.add( tuple((str(action["actionName"]),actionParameters,tuple(effects))) )
def __calculateQueryUnsat(self):
#~ Function that calculates the unsatisfiability query for the given DL KB,
#~ which is an UCQ with inequalities
queryUnsat = ""
counter = 0 #counter used for the generation of unique variables
for negAxiom in self.__axiomsNeg:
#~ Each axioms is composed of two elements, the left one and the right one.
#~ An element could be:
#~ - A , saved as the string 'A' and has to be in self.__concepts
#~ - \exists P , saved as the tuple ('exists','P')
#~ - \exists P^- , saved as the tuple ('exists', ('inverse','P'))
#~ - P , saved as the string 'P' and has to be in self.__roles
#~ - P^- , saved as the tuple ('inverse','P')
#~ Thus the possible negative axioms are:
#~ - A is not A
#~ - A is not \exists P
#~ - A is not \exists P^-
#~ - \exists P is not A
#~ - \exists P^- is not A
#~ - \exists P1 is not \exists P2
#~ - \exists P1^- is not \exists P2
#~ - \exists P1 is not \exists P2^-
#~ - \exists P1^- is not \exists P2?-
#~ - P1 is not P2
#~ - P1^- is not P2
#~ - P1 is not P2^-
#~ - P1^- is not P2^-
#~ The following "if" is dedicated to tind the following axioms:
#~ - A is not A
#~ - A is not \exists P
#~ - A is not \exists P^-
if negAxiom.leftTerm() in self.__concepts:
if negAxiom.rightTerm() in self.__concepts:
#~ - A is not A
#~ We have to add to queryUnsat the subquery:
#~ \exists x_counter. (elementLeft(x_counter) and elementRight(x_counter)]
#~ which, written in a PDDL-EKab format, will be:
#~ (exists (?x_counter) (and (elementLeft ?x_counter) (elementRight ?x_counter))
queryUnsat += "(exists (?x_"+str(counter)+") (and ("+ negAxiom.leftTerm() +" ?x_"+str(counter)+ \
") ("+negAxiom.rightTerm()+" ?x_"+str(counter)+") ))\n"
elif negAxiom.rightTermExists and negAxiom.rightTerm() in self.__roles:
if not negAxiom.rightTermInverse():
#~ - A is not \exists P
#~ We have to add to queryUnsat the subquery:
#~ \exists x_counter, y_2. (elementLeft(x_counter) and elementRight[1](x_counter, y_2)]
#~ which, written in a PDDL-EKab format, will be:
#~ (exists (?x_counter ?y_2) (and (elementLeft ?x_counter) (elementRight[1] ?x_counter ?y_2))
queryUnsat += "(exists (?x_"+str(counter)+" ?y_2) (and ("+negAxiom.leftTerm()+" ?x_"+str(counter)+ \
") ("+negAxiom.rightTerm()+" ?x_"+str(counter)+" ?y_2) ))\n"
elif negAxiom.rightTermInverse():
#~ - A is not \exists P^-
#~ We have to add to queryUnsat the subquery:
#~ \exists x_counter,?y_2. (elementLeft(x_counter) and elementRight[1][1](?y_2, x_counter)]
#~ which, written in a PDDL-EKab format, will be:
#~ (exists (?x_counter ?y_2) (and (elementLeft ?x_counter) (elementRight[1][1] ?y_2 ?x_counter))
queryUnsat += "(exists (?x_"+str(counter)+" ?y_2) (and ("+negAxiom.leftTerm()+" ?x_"+str(counter)+ \
") ("+negAxiom.rightTerm()+" ?y_2 ?x_"+str(counter)+") ))\n"
else:
raise Exception("Something went wrong during the building of query_unsat. The following element couldn't be recognized: " + \
str(negAxiom.rightTerm()) )
else:
raise Exception("Something went wrong during the building of query_unsat. The following element couldn't be recognized: " + \
str(negAxiom.rightTerm()) )
#~ The following "if" is dedicated to tind the following axioms:
#~ - \exists P is not A
#~ - \exists P1 is not \exists P2
#~ - \exists P1 is not \exists P2^-
elif negAxiom.leftTermExists() and negAxiom.leftTerm() in self.__roles:
if negAxiom.rightTerm() in self.__concepts:
#~ - \exists P is not A
#~ We have to add to queryUnsat the subquery:
#~ \exists x_counter, y_1. (elementLeft[1](x_counter, y_1) and elementRight(x_counter)]
#~ which, written in a PDDL-EKab format, will be:
#~ (exists (?x_counter ?y_1) (and (elementLeft[1] ?x_counter ?y_1) (elementRight ?x_counter))
queryUnsat += "(exists (?x_"+str(counter)+" ?y_1) (and ("+ negAxiom.leftTerm() +" ?x_"+str(counter)+ \
" ?y_1) ("+ negAxiom.rightTerm() +" ?x_"+str(counter)+") ))\n"
elif negAxiom.rightTermExists() and negAxiom.rightTerm() in self.__roles:
if not negAxiom.rightTermInverse():
#~ - \exists P1 is not \exists P2
#~ We have to add to queryUnsat the subquery:
#~ \exists x_counter, y_1, y_2. (elementLeft[1](x_counter, y_1) and elementRight[1](x_counter, y_2)]
#~ which, written in a PDDL-EKab format, will be:
#~ (exists (?x_counter ?y_1 ?y_2) (and (elementLeft[1] ?x_counter ?y_1) (elementRight[1] ?x_counter ?y_2))
queryUnsat += "(exists (?x_"+str(counter)+" ?y_1 ?y_2) (and ("+ negAxiom.leftTerm() +" ?x_"+str(counter)+ \
" ?y_1) ("+ negAxiom.rightTerm() +" ?x_"+str(counter)+" ?y_2) ))\n"
elif negAxiom.rightTermInverse():
#~ - \exists P1 is not \exists P2^-
#~ We have to add to queryUnsat the subquery:
#~ \exists x_counter,y_1,y_2. (elementLeft[1](x_counter y_1) and elementRight[1][1](?y_2, x_counter)]
#~ which, written in a PDDL-EKab format, will be:
#~ (exists (?x_counter ?y_1 ?y_2) (and (elementLeft[1] ?x_counter ?y_1) (elementRight[1][1] ?y_2 ?x_counter))
queryUnsat += "(exists (?x_"+str(counter)+" ?y_1 ?y_2) (and ("+ negAxiom.leftTerm() + \
" ?x_"+str(counter)+" ?y_1) ("+ negAxiom.rightTerm() +" ?y_2 ?x_"+str(counter)+") ))\n"
else:
raise Exception("Something went wrong during the building of query_unsat. The following element couldn't be recognized: " + str(negAxiom.rightTerm()) )
else:
raise Exception("Something went wrong during the building of query_unsat. The following element couldn't be recognized: " + str(negAxiom.rightTerm()) )
#~ The following "if" is dedicated to tind the following axioms:
#~ - \exists P^- is not A
#~ - \exists P1^- is not \exists P2
#~ - \exists P1^- is not \exists P2?-
elif negAxiom.leftTermExists() and \
negAxiom.leftTermInverse() and \
negAxiom.leftTerm() in self.__roles:
if negAxiom.rightTerm() in self.__concepts:
#~ - \exists P^- is not A
#~ We have to add to queryUnsat the subquery:
#~ \exists x_counter, y_1. (elementLeft[1][1](y_1, x_counter) and elementRight(x_counter)]
#~ which, written in a PDDL-EKab format, will be:
#~ (exists (?x_counter ?y_1) (and (elementLeft[1][1] ?y_1 ?x_counter) (elementRight ?x_counter))
queryUnsat += "(exists (?x_"+str(counter)+" ?y_1) (and ("+ negAxiom.leftTerm() + \
" ?y_1 ?x_"+str(counter)+") ("+ negAxiom.rightTerm() +" ?x_"+str(counter)+") ))\n"
elif negAxiom.rightTermExists() and negAxiom.rightTerm() in self.__roles:
if not negAxiom.rightTermInverse():
#~ - \exists P1^- is not \exists P2
#~ We have to add to queryUnsat the subquery:
#~ \exists x_counter, y_1, y_2. (elementLeft[1][1](y_1, x_counter) and elementRight[1](x_counter, y_2)]
#~ which, written in a PDDL-EKab format, will be:
#~ (exists (?x_counter ?y_1 ?y_2) (and (elementLeft[1][1] ?y_1 ?x_counter) (elementRight[1] ?x_counter ?y_2))
queryUnsat += "(exists (?x_"+str(counter)+" ?y_1 ?y_2) (and ("+ negAxiom.leftTerm() + \
" ?y_1 ?x_"+str(counter)+") ("+ negAxiom.rightTerm() +" ?x_"+str(counter)+" ?y_2) ))\n"
elif negAxiom.rightTermInverse():
#~ - \exists P1^- is not \exists P2^-
#~ We have to add to queryUnsat the subquery:
#~ \exists x_counter,y_1,y_2. (elementLeft[1][1](y_1 x_counter) and elementRight[1][1](?y_2, x_counter)]
#~ which, written in a PDDL-EKab format, will be:
#~ (exists (?x_counter ?y_1 ?y_2) (and (elementLeft[1][1] ?y_1 ?x_counter) (elementRight[1][1] ?y_2 ?x_counter))
queryUnsat += "(exists (?x_"+str(counter)+" ?y_1 ?y_2) (and ("+ negAxiom.leftTerm() + \
" ?y_1 ?x_"+str(counter)+") ("+ negAxiom.rightTerm() +" ?y_2 ?x_"+str(counter)+") ))\n"
else:
raise Exception("Something went wrong during the building of query_unsat. The following element couldn't be recognized: " + str(negAxiom.rightTerm()) )
else:
raise Exception("Something went wrong during the building of query_unsat. The following element couldn't be recognized: " + str(negAxiom.rightTerm()) )
#~ The following "if" is dedicated to tind the following axioms:
#~ - P1 is not P2
#~ - P1 is not P2^-
elif negAxiom.leftTerm() in self.__roles:
if negAxiom.rightTerm() in self.__roles and not negAxiom.rightTermExists():
if not negAxiom.rightTermInverse():
#~ - P1 is not P2
#~ We have to add to queryUnsat the subquery:
#~ \exists x_counter,y_counter. (elementLeft(x_counter,y_counter) and elementRight(x_counter,y_counter))
#~ which, written in a PDDL-EKab format, will be:
#~ (exists (?x_counter ?y_counter) (and (elementLeft ?x_counter ?y_counter) (elementRight ?x_counter ?y_counter) ))
queryUnsat += "(exists (?x_"+str(counter)+" ?y_"+str(counter)+")" +\
"(and ("+negAxiom.leftTerm()+" ?x_"+str(counter)+" ?y_"+str(counter)+")" + \
" ("+negAxiom.rightTerm()+" ?x_"+str(counter)+" ?y_"+str(counter)+") ))\n"
else:
#~ - P1 is not P2^-
#~ We have to add to queryUnsat the subquery:
#~ \exists x_counter,y_counter. (elementLeft(x_counter,y_counter) and elementRight[1](y_counter,x_counter))
#~ which, written in a PDDL-EKab format, will be:
#~ (exists (?x_counter ?y_counter) (and (elementLeft ?x_counter ?y_counter) (elementRight[1] ?y_counter ?x_counter) ))
queryUnsat += "(exists (?x_"+str(counter)+" ?y_"+str(counter)+")" +\
"(and ("+negAxiom.leftTerm()+" ?x_"+str(counter)+" ?y_"+str(counter)+")" + \
" ("+negAxiom.rightTerm()+" ?y_"+str(counter)+" ?x_"+str(counter)+") ))\n"
else:
#~ Something went wrong
raise Exception("Something went wrong during the building of query_unsat. The following element couldn't be recognized: " + str(negAxiom) )
#~ The following "if" is dedicated to tind the following axioms:
#~ - P1^- is not P2
#~ - P1^- is not P2^-
elif negAxiom.leftTerm() in self.__roles and negAxiom.leftTermInverse():
if negAxiom.rightTerm() in self.__roles and not negAxiom.rightTermInverse():
#~ - P1^- is not P2
#~ We have to add to queryUnsat the subquery:
#~ \exists x_counter,y_counter. (elementLeft[1](y_counter,x_counter) and elementRight(x_counter,y_counter))
#~ which, written in a PDDL-EKab format, will be:
#~ (exists (?x_counter ?y_counter) (and (elementLeft[1] ?y_counter ?x_counter) (elementRight ?x_counter ?y_counter) ))
queryUnsat += "(exists (?x_"+str(counter)+" ?y_"+str(counter)+")" +\
"(and ("+negAxiom.leftTerm()+" ?y_"+str(counter)+" ?x_"+str(counter)+")" + \
" ("+negAxiom.rightTerm()+" ?x_"+str(counter)+" ?y_"+str(counter)+") ))\n"
elif negAxiom.rightTerm() in self.__roles and negAxiom.rightTermInverse():
#~ - P1^- is not P2^-
#~ We have to add to queryUnsat the subquery:
#~ \exists x_counter,y_counter. (elementLeft[1](y_counter,x_counter) and elementRight[1](y_counter,x_counter))
#~ which, written in a PDDL-EKab format, will be:
#~ (exists (?x_counter ?y_counter) (and (elementLeft[1] ?y_counter ?x_counter) (elementRight[1] ?y_counter ?x_counter) ))
queryUnsat += "(exists (?x_"+str(counter)+" ?y_"+str(counter)+")" +\
"(and ("+negAxiom.leftTerm()+" ?y_"+str(counter)+" ?x_"+str(counter)+")" + \
" ("+negAxiom.rightTerm()+" ?y_"+str(counter)+" ?x_"+str(counter)+") ))\n"
else:
#~ Something went wrong
raise Exception("Something went wrong during the building of query_unsat. The following element couldn't be recognized: " + str(negAxiom.rightTerm()) )
else:
#~ Something went wrong
raise Exception("Something went wrong during the building of query_unsat. The following element couldn't be recognized: " + str(negAxiom.leftTerm()))
#~ Increase the counter
counter += 1
for functAxiom in self.__axiomsFunct:
#~ Each axioms is composed of one element, which could be:
#~ - funct P , saved as the string 'P'
#~ - funct P^-, saved as the tuple ('inverse','P')
if functAxiom.leftTerm() in self.__roles:
if not functAxiom.leftTermInverse():
#~ It's the case:
#~ - funct P , saved as the string 'P'
#~ We have to add to queryUnsat the subquery:
#~ \exists x_counter, y_1, y_2. (functAxiom(x_counter,y_1) and functAxiom(x_counter,y_2) and (y1 \neq y2))
#~ which, written in a PDDL-EKab format, will be:
#~ (exists (?x_counter ?y_1 ?y_2) (and (functAxiom x_counter y_1) (functAxiom x_counter y_2) (neq ?y_1 ?y_2)))
queryUnsat += "(exists (?x_"+str(counter)+" ?y_1 ?y_2) (and ("+functAxiom.leftTerm() + \
" ?x_"+str(counter)+" ?y_1) ("+functAxiom.leftTerm()+" ?x_"+str(counter)+" ?y_2) (neq ?y_1 ?y_2)))\n"
else:
#~ It's the case:
#~ - funct P^-, saved as the tuple ('inverse','P')
#~ We have to add to queryUnsat the subquery:
#~ \exists y_counter, x_1, x_2. (functAxiom[1](x_1,y_counter) and functAxiom[1](x_2,y_counter) and (x1 \neq x2))
#~ which, written in a PDDL-EKab format, will be:
#~ (exists (?y_counter ?x_1 ?x_2) (and (functAxiom[1] x_1 y_counter) (functAxiom[1] x_2 y_counter) (neq ?x_1 ?x_2)))
queryUnsat += "(exists (?y_"+str(counter)+" ?x_1 ?x_2) (and ("+functAxiom.leftTerm() + \
" ?x_1 ?y_"+str(counter)+") ("+functAxiom.leftTerm()+" ?x_2 ?y_"+str(counter)+") (neq ?x_1 ?x_2)))\n"
else:
#~ Something went wrong
raise Exception("Something went wrong during the building of query_unsat. The following element couldn't be recognized: " + str(functAxiom))
#~ Increase the counter
counter += 1
#~ If there are more than two axioms from both self.__axiomsNeg and self.__axiomsFunct,
#~ we have to consider each part that was created as connected by AND.
#~ We wrap the query in "(and .... )"
if counter > 1:
queryUnsat = "(or \n" + queryUnsat + ")"
self.__queryUnsat = UCQ(queryUnsat, self.__concepts, self.__roles, self.__individuals, inequalitiesAllowed = True)
def __parseProblem(self, planningProblemFile):
#~ The parsing is divided in parts:
#~ - retrieve the domain name and check that the requirements and the predicates are ok. If not, raise an exception.
#~ From the predicates we create the list of concepts and roles;
#~ - read the file again to retrieve the axioms, the condition-action rules, and the actions.
syntax = Syntax()
#~ The domain specified in the problem must be the same name specified in the domain parsed before.
#~ For this reason we put Literal(self.__domainName)
pddlProblem = StringStart() + \
syntax.leftPar + syntax.pddlDefineTag + \
syntax.leftPar + syntax.pddlProblemTag + syntax.pddlProblemName.setResultsName("problemName") + syntax.rightPar + \
syntax.leftPar + syntax.pddlProblemDomainTag + Literal(self.__domainName) + syntax.rightPar + \
syntax.leftPar + syntax.pddlProblemObjectsTag + Group(OneOrMore(syntax.pddlProblemObject)).setResultsName("problemObjects") + syntax.rightPar + \
syntax.leftPar + syntax.pddlProblemInitTag + Group(OneOrMore(nestedExpr())).setResultsName("problemAssertions") + syntax.rightPar + \
syntax.leftPar + syntax.pddlProblemGoalTag + nestedExpr().setResultsName("problemGoal") + syntax.rightPar + \
syntax.rightPar + \
StringEnd()
#~ Parse the file
result = pddlProblem.parseFile(planningProblemFile)
self.__problemName = result["problemName"] #Save the problem name
#~ Save the individuals.
#~ In PDDL they are reffered to as objects.
for obj in result["problemObjects"]:
self.__individuals.add(obj)
#~ Save the assertions.
for assertion in result["problemAssertions"]:
self.__assertions.add(Assertion(assertion, self.__concepts, self.__roles, self.__individuals))
#~ Save the goal query.
self.__goalQuery = ECQ(result["problemGoal"][0], self.__concepts, self.__roles, self.__individuals)
#~ Check that the goal query is boolean
if len(self.__goalQuery.freeVars()) > 0:
raise Exception("The goal query must be boolean.")
def __compileTBoxAway(self):
#~ This function is used to rewrite all the queries
#~ in rules, actions, and queryUnsat to remove the need of the TBox.
#~ Rewrite queryUnsat
self.__queryUnsatRewritten = rewrite(self.__queryUnsat, self.__axiomsPos, self.__concepts, self.__roles, self.__individuals)
#~ Create a new set of rules by rewriting their condition
for rule in self.__rules:
self.__rulesRewritten.add((rule[0], rewrite(rule[1], self.__axiomsPos, self.__concepts, self.__roles, self.__individuals), rule[2]))
#~ Create a new set of actions by rewriting the condition in the effects
for action in self.__actions:
rewrittenActionEffects = []
for effect in action[2]:
rewrittenActionEffects.append((rewrite(effect[0], self.__axiomsPos, self.__concepts, self.__roles, self.__individuals), effect[1], effect[2]))
self.__actionsRewritten.add((action[0],action[1],tuple(rewrittenActionEffects)))
#~ Rewrite goalQuery
self.__goalQueryRewritten = rewrite(self.__goalQuery, self.__axiomsPos, self.__concepts, self.__roles, self.__individuals)
def domainName(self):
return self.__domainName
def problemName(self):
return self.__problemName
def individuals(self):
return self.__individuals
def concepts(self):
return self.__concepts
def roles(self):
return self.__roles
def axiomPos(self):
return self.__axiomsPos
def axiomNeg(self):
return self.__axiomsNeg
def axiomFunct(self):
return self.__axiomsFunct
def assertions(self):
return self.__assertions
def rules(self):
return self.__rules
def rulesRewritten(self):
return self.__rulesRewritten
def actions(self):
return self.__actions
def actionsRewritten(self):
return self.__actionsRewritten
def goalQuery(self):
return self.__goalQuery
def goalQueryRewritten(self):
return self.__goalQueryRewritten
def queryUnsat(self):
return self.__queryUnsat
def queryUnsatRewritten(self):
return self.__queryUnsatRewritten
# --------------------------
if __name__ == '__main__':
prova = EKab("planDomain.pddl","planProblem.pddl")
print(prova.individuals())
print("\n-------------------------")
print("Rewritten rules:")
for rule in prova.rules():
for rewRule in prova.rulesRewritten():
if rule[0] == rewRule[0]:
print(rule)
print(rewRule)
print()
print("\n-------------------------")
print("Rewritten Actions:")
for action in prova.actions():
for rewAction in prova.actionsRewritten():
if action[0] == rewAction[0]:
print(action)
print(rewAction)
print()
| |
#pragma out
#pragma error OK
# The seattlegeni testlib must be imported first.
from seattlegeni.tests import testlib
from seattlegeni.common.api import maindb
from seattlegeni.common.exceptions import *
import unittest
next_nodeid_number = 0
def create_node_and_vessels_with_one_port_each(ip, portlist):
global next_nodeid_number
next_nodeid_number += 1
nodeid = "node" + str(next_nodeid_number)
port = 1234
version = "10.0test"
is_active = True
owner_pubkey = "1 2"
extra_vessel_name = "v1"
node = maindb.create_node(nodeid, ip, port, version, is_active, owner_pubkey, extra_vessel_name)
single_vessel_number = 2
for vesselport in portlist:
single_vessel_name = "v" + str(single_vessel_number)
single_vessel_number += 1
vessel = maindb.create_vessel(node, single_vessel_name)
maindb.set_vessel_ports(vessel, [vesselport])
return node
def create_nodes_on_same_subnet(count, portlist_for_vessels_on_each_node):
# Create 'count' nodes on the same subnet and on each node create a vessel
# with a single port for each port in 'portlist_for_vessels_on_each_node'.
ip_prefix = "ip = 127.0.0."
for i in range(count):
ip = ip_prefix + str(i)
create_node_and_vessels_with_one_port_each(ip, portlist_for_vessels_on_each_node)
def create_nodes_on_different_subnets(count, portlist_for_vessels_on_each_node):
# Create 'count' nodes on different subnets and on each node create a vessel
# with a single port for each port in 'portlist_for_vessels_on_each_node'.
ip_prefix = "127.1."
ip_suffix = ".0"
for i in range(count):
ip = ip_prefix + str(i) + ip_suffix
create_node_and_vessels_with_one_port_each(ip, portlist_for_vessels_on_each_node)
def create_nat_nodes(count, portlist_for_vessels_on_each_node):
# Create 'count' nat nodes and on each node create a vessel
# with a single port for each port in 'portlist_for_vessels_on_each_node'.
ip_prefix = maindb.NAT_STRING_PREFIX
for i in range(count):
ip = ip_prefix + str(i)
create_node_and_vessels_with_one_port_each(ip, portlist_for_vessels_on_each_node)
def _get_queryset_include_nat(port):
"""
Give a nicer though less accurate name to the function as we're using it a
lot here in the tests.
"""
return maindb._get_queryset_of_all_available_vessels_for_a_port_include_nat_nodes(port)
def _get_queryset_exclude_nat(port):
"""
Give a nicer though less accurate name to the function as we're using it a
lot here in the tests.
"""
return maindb._get_queryset_of_all_available_vessels_for_a_port_exclude_nat_nodes(port)
def _get_queryset_only_nat(port):
"""
Give a nicer though less accurate name to the function as we're using it a
lot here in the tests.
"""
return maindb._get_queryset_of_all_available_vessels_for_a_port_only_nat_nodes(port)
class SeattleGeniTestCase(unittest.TestCase):
def setUp(self):
# Setup a fresh database for each test.
testlib.setup_test_db()
def tearDown(self):
# Cleanup the test database.
testlib.teardown_test_db()
def test_get_queryset_1(self):
"""
This is ultimately testing
maindb._get_queryset_of_all_available_vessels_for_a_port_include_nat_nodes()
"""
userport = 100
# Make sure the queryset is initially empty.
queryset = _get_queryset_include_nat(userport)
self.assertEqual(0, queryset.count())
# Create a node that has no vessels.
ip = "127.0.0.1"
portlist = []
create_node_and_vessels_with_one_port_each(ip, portlist)
# Make sure the queryset is still empty.
queryset = _get_queryset_include_nat(userport)
self.assertEqual(0, queryset.count())
# Create a node that has one vessel, but not on this user's port.
ip = "127.0.0.2"
portlist = [userport + 1]
create_node_and_vessels_with_one_port_each(ip, portlist)
# Make sure the queryset is still empty.
queryset = _get_queryset_include_nat(userport)
self.assertEqual(0, queryset.count())
# Create a node that has one vessel that is on the user's port
ip = "127.0.0.3"
portlist = [userport]
create_node_and_vessels_with_one_port_each(ip, portlist)
# We expect one available vessel.
queryset = _get_queryset_include_nat(userport)
self.assertEqual(1, queryset.count())
# Make sure the vessel is on the last node we created.
availablevessel = list(queryset)[0]
self.assertEqual("127.0.0.3", availablevessel.node.last_known_ip)
def test_get_queryset_node_is_active_changes(self):
"""
This is ultimately testing
maindb._get_queryset_of_all_available_vessels_for_a_port_include_nat_nodes()
"""
userport = 100
# Create a node that has three vessels but only one vessel on the user's port.
ip = "127.0.0.1"
portlist = [userport - 1, userport, userport + 1]
node = create_node_and_vessels_with_one_port_each(ip, portlist)
# We expect one available vessel.
queryset = _get_queryset_include_nat(userport)
self.assertEqual(1, queryset.count())
# Now update the node to mark it as inactive.
node.is_active = False
node.save()
# We expect zero available vessels.
queryset = _get_queryset_include_nat(userport)
self.assertEqual(0, queryset.count())
# Now update the node to mark it as active again.
node.is_active = True
node.save()
# We expect one available vessel.
queryset = _get_queryset_include_nat(userport)
self.assertEqual(1, queryset.count())
def test_get_queryset_node_is_broken_changes(self):
"""
This is ultimately testing
maindb._get_queryset_of_all_available_vessels_for_a_port_include_nat_nodes()
"""
userport = 100
# Create a node that has three vessels but only one vessel on the user's port.
ip = "127.0.0.1"
portlist = [userport - 1, userport, userport + 1]
node = create_node_and_vessels_with_one_port_each(ip, portlist)
# We expect one available vessel.
queryset = _get_queryset_include_nat(userport)
self.assertEqual(1, queryset.count())
# Now update the node to mark it as inactive.
node.is_broken = True
node.save()
# We expect zero available vessels.
queryset = _get_queryset_include_nat(userport)
self.assertEqual(0, queryset.count())
# Now update the node to mark it as active again.
node.is_broken = False
node.save()
# We expect one available vessel.
queryset = _get_queryset_include_nat(userport)
self.assertEqual(1, queryset.count())
def test_get_queryset_vessel_is_dirty_changes(self):
"""
This is ultimately testing
maindb._get_queryset_of_all_available_vessels_for_a_port_include_nat_nodes()
"""
userport = 100
# Create two nodes that have three vessels but only one vessel on the user's port.
portlist = [userport - 1, userport, userport + 1]
ip = "127.0.0.1"
create_node_and_vessels_with_one_port_each(ip, portlist)
ip = "127.0.0.2"
create_node_and_vessels_with_one_port_each(ip, portlist)
# We expect two available vessels.
queryset = _get_queryset_include_nat(userport)
self.assertEqual(2, queryset.count())
# Now mark one of the two available vessels as dirty.
vessel = queryset[0]
vessel.is_dirty = True
vessel.save()
# We expect one available vessel, and it shouldn't be the dirty one.
queryset = _get_queryset_include_nat(userport)
self.assertEqual(1, queryset.count())
self.assertNotEqual(vessel, queryset[0])
def test_get_queryset_vessel_acquired_by_user_changes(self):
"""
This is ultimately testing
maindb._get_queryset_of_all_available_vessels_for_a_port_include_nat_nodes()
"""
# Create a user who will be doing the acquiring.
user = maindb.create_user("testuser", "password", "example@example.com", "affiliation", "1 2", "2 2 2", "3 4")
userport = user.usable_vessel_port
# Create two nodes that have three vessels but only one vessel on the user's port.
portlist = [userport - 1, userport, userport + 1]
ip = "127.0.0.1"
create_node_and_vessels_with_one_port_each(ip, portlist)
ip = "127.0.0.2"
create_node_and_vessels_with_one_port_each(ip, portlist)
# We expect two available vessels.
queryset = _get_queryset_include_nat(userport)
self.assertEqual(2, queryset.count())
# Mark one of the vessels as acquired.
vessel = queryset[0]
maindb.record_acquired_vessel(user, vessel)
# We expect one available vessel, and it shouldn't be the acquired one.
queryset = _get_queryset_include_nat(userport)
self.assertEqual(1, queryset.count())
self.assertNotEqual(vessel, queryset[0])
# Release the vessel. It should still be dirty.
maindb.record_released_vessel(vessel)
# We expect one available vessel, and it shouldn't be the acquired one.
queryset = _get_queryset_include_nat(userport)
self.assertEqual(1, queryset.count())
self.assertNotEqual(vessel, queryset[0])
# Mark the vessel as clean (as if the backend cleaned it up).
maindb.mark_vessel_as_clean(vessel)
# We expect two available vessels.
queryset = _get_queryset_include_nat(userport)
self.assertEqual(2, queryset.count())
# TODO: add a test for broken nodes if/when an is_broken flag is added to nodes
def test_get_queryset_different_node_types(self):
userport = 12345
# We choose the numbers of each type of node in a way that helps ensure
# that we don't accidentally pass the test if something is going wrong.
# We will get one vessel on each created node for each port in portlist
# and there will be only that one port on the vessel.
portlist = [userport - 1, userport, userport + 1]
create_nodes_on_same_subnet(3, portlist)
create_nodes_on_different_subnets(7, portlist)
create_nat_nodes(13, portlist)
# We expect 23 available total vessels (that is, one vessel on each node).
queryset = _get_queryset_include_nat(userport)
self.assertEqual(23, queryset.count())
# We expect 10 available vessels not on nat nodes.
queryset = _get_queryset_exclude_nat(userport)
self.assertEqual(10, queryset.count())
# We expect 13 available vessels on nat nodes.
queryset = _get_queryset_only_nat(userport)
self.assertEqual(13, queryset.count())
def test_get_available_rand_vessels(self):
# Create a user who will be doing the acquiring.
user = maindb.create_user("testuser", "password", "example@example.com", "affiliation", "1 2", "2 2 2", "3 4")
userport = user.usable_vessel_port
# We choose the numbers of each type of node in a way that helps ensure
# that we don't accidentally pass the test if something is going wrong.
# We will get one vessel on each created node for each port in portlist
# and there will be only that one port on the vessel.
portlist = [userport - 1, userport, userport + 1]
create_nodes_on_same_subnet(3, portlist)
create_nodes_on_different_subnets(7, portlist)
create_nat_nodes(13, portlist)
# Request 0 vessels, make sure it raises a AssertionError.
self.assertRaises(AssertionError, maindb.get_available_rand_vessels, user, 0)
# Request a negative number of vessels, make sure it raises a AssertionError.
self.assertRaises(AssertionError, maindb.get_available_rand_vessels, user, -1)
# We expect there to be 23 available rand vessels (one vessel on each node
# including the nat nodes).
# Request 1 vessel, make sure we get back more than 1 potential vessels.
vessel_list = maindb.get_available_rand_vessels(user, 1)
self.assertTrue(len(vessel_list) > 1)
# Request 5 vessels, make sure we get back more than 5 potential vessels.
vessel_list = maindb.get_available_rand_vessels(user, 5)
self.assertTrue(len(vessel_list) > 5)
# Request 23 vessels, make sure we get back all 23 vessels we expect.
vessel_list = maindb.get_available_rand_vessels(user, 23)
self.assertEqual(23, len(vessel_list))
# Request 24 vessels, make sure we get an exception.
self.assertRaises(UnableToAcquireResourcesError, maindb.get_available_rand_vessels, user, 24)
def test_get_available_wan_vessels(self):
# Create a user who will be doing the acquiring.
user = maindb.create_user("testuser", "password", "example@example.com", "affiliation", "1 2", "2 2 2", "3 4")
userport = user.usable_vessel_port
# We choose the numbers of each type of node in a way that helps ensure
# that we don't accidentally pass the test if something is going wrong.
# We will get one vessel on each created node for each port in portlist
# and there will be only that one port on the vessel.
portlist = [userport - 1, userport, userport + 1]
create_nodes_on_same_subnet(3, portlist)
create_nodes_on_different_subnets(7, portlist)
create_nat_nodes(13, portlist)
# Request 0 vessels, make sure it raises a AssertionError.
self.assertRaises(AssertionError, maindb.get_available_wan_vessels, user, 0)
# Request a negative number of vessels, make sure it raises a AssertionError.
self.assertRaises(AssertionError, maindb.get_available_wan_vessels, user, -1)
# We expect there to be 8 available rand vessels (one vessel on each node
# non-nat node on each subnet, and there are 8 subnets).
# Request 1 vessel, make sure we get back more than 1 potential vessels.
vessel_list = maindb.get_available_wan_vessels(user, 1)
self.assertTrue(len(vessel_list) > 1)
# Request 5 vessels, make sure we get back more than 5 potential vessels.
vessel_list = maindb.get_available_wan_vessels(user, 5)
self.assertTrue(len(vessel_list) > 5)
# Request 8 vessels, make sure we get back all 8 vessels we expect.
vessel_list = maindb.get_available_wan_vessels(user, 8)
self.assertEqual(8, len(vessel_list))
# Request 9 vessels, make sure we get an exception.
self.assertRaises(UnableToAcquireResourcesError, maindb.get_available_wan_vessels, user, 9)
def test_get_available_nat_vessels(self):
# Create a user who will be doing the acquiring.
user = maindb.create_user("testuser", "password", "example@example.com", "affiliation", "1 2", "2 2 2", "3 4")
userport = user.usable_vessel_port
# We choose the numbers of each type of node in a way that helps ensure
# that we don't accidentally pass the test if something is going wrong.
# We will get one vessel on each created node for each port in portlist
# and there will be only that one port on the vessel.
portlist = [userport - 1, userport, userport + 1]
create_nodes_on_same_subnet(3, portlist)
create_nodes_on_different_subnets(7, portlist)
create_nat_nodes(13, portlist)
# Request 0 vessels, make sure it raises a AssertionError.
self.assertRaises(AssertionError, maindb.get_available_nat_vessels, user, 0)
# Request a negative number of vessels, make sure it raises a AssertionError.
self.assertRaises(AssertionError, maindb.get_available_nat_vessels, user, -1)
# We expect there to be 13 available nat vessels (one vessel on each node
# nat node).
# Request 1 vessel, make sure we get back more than 1 potential vessels.
vessel_list = maindb.get_available_nat_vessels(user, 1)
self.assertTrue(len(vessel_list) > 1)
# Request 5 vessels, make sure we get back more than 5 potential vessels.
vessel_list = maindb.get_available_nat_vessels(user, 5)
self.assertTrue(len(vessel_list) > 5)
# Request 13 vessels, make sure we get back all 13 vessels we expect.
vessel_list = maindb.get_available_nat_vessels(user, 13)
self.assertEqual(13, len(vessel_list))
# Request 14 vessels, make sure we get an exception.
self.assertRaises(UnableToAcquireResourcesError, maindb.get_available_nat_vessels, user, 14)
def test_get_available_lan_vessels_by_subnet(self):
# Create a user who will be doing the acquiring.
user = maindb.create_user("testuser", "password", "example@example.com", "affiliation", "1 2", "2 2 2", "3 4")
userport = user.usable_vessel_port
# We choose the numbers of each type of node in a way that helps ensure
# that we don't accidentally pass the test if something is going wrong.
# We will get one vessel on each created node for each port in portlist
# and there will be only that one port on the vessel.
portlist = [userport - 1, userport, userport + 1]
create_nodes_on_same_subnet(29, portlist)
create_nodes_on_different_subnets(7, portlist)
create_nat_nodes(13, portlist)
# Request 0 vessels, make sure it raises a AssertionError.
self.assertRaises(AssertionError, maindb.get_available_lan_vessels_by_subnet, user, 0)
# Request a negative number of vessels, make sure it raises a AssertionError.
self.assertRaises(AssertionError, maindb.get_available_lan_vessels_by_subnet, user, -1)
# We expect there to be 7 subnets with a single available vessel for the
# user (the 7 on differnt subnets created above) and 1 subnet with 29
# available vessels for the user (the 29 nodes on the same subnet created
# above).
# Request 1 vessel, make sure we get back a list of 8 subnets where one
# subnet has more than one available vessel and the other 7 have only
# one available vessel.
subnet_vessel_list = maindb.get_available_lan_vessels_by_subnet(user, 1)
self.assertEqual(8, len(subnet_vessel_list))
vessel_list_sizes = []
for vessel_list in subnet_vessel_list:
vessel_list_sizes.append(len(vessel_list))
vessel_list_sizes.sort()
self.assertEqual([1, 1, 1, 1, 1, 1, 1], vessel_list_sizes[:7])
self.assertTrue(vessel_list_sizes[7] > 1)
# Request 5 vessels, make sure we get back a list that has one subnet and
# in that subnet is a list of more than 5 potential vessels.
subnet_vessel_list = maindb.get_available_lan_vessels_by_subnet(user, 5)
self.assertEqual(1, len(subnet_vessel_list))
self.assertTrue(len(subnet_vessel_list[0]) > 5)
# Request 29 vessels, make sure we get back a list that has one subnet and
# in that subnet is a list of 29 potential vessels.
subnet_vessel_list = maindb.get_available_lan_vessels_by_subnet(user, 29)
self.assertEqual(1, len(subnet_vessel_list))
self.assertEqual(29, len(subnet_vessel_list[0]))
# Request 30 vessels, make sure we get an exception.
self.assertRaises(UnableToAcquireResourcesError, maindb.get_available_lan_vessels_by_subnet, user, 30)
def run_test():
unittest.main()
if __name__ == "__main__":
run_test()
| |
# -*- coding: utf-8 -*-
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import subprocess
import sys
from datetime import datetime
import json
from jsmin import jsmin
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
if "READTHEDOCS" not in os.environ:
sys.path.insert(1, os.path.abspath("../../"))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
needs_sphinx = "1.6"
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.autosummary",
"sphinx.ext.coverage",
"sphinx.ext.mathjax",
"sphinx.ext.napoleon",
"sphinxcontrib.bibtex",
"sphinx_gallery.gen_gallery",
]
bibtex_bibfiles = ["references.bib"]
# numpydoc_show_class_members = False
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = ".rst"
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "pysteps"
copyright = f"2018-{datetime.now():%Y}, pysteps developers"
author = "pysteps developers"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
def get_version():
"""Returns project version as string from 'git describe' command."""
from subprocess import check_output
_version = check_output(["git", "describe", "--tags", "--always"])
if _version:
return _version.decode("utf-8")
else:
return "X.Y"
# The short X.Y version.
version = get_version().lstrip("v").rstrip().split("-")[0]
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Read the Docs build --------------------------------------------------
def set_root():
fn = os.path.abspath(os.path.join("..", "..", "pysteps", "pystepsrc"))
with open(fn, "r") as f:
rcparams = json.loads(jsmin(f.read()))
for key, value in rcparams["data_sources"].items():
new_path = os.path.join("..", "..", "pysteps-data", value["root_path"])
new_path = os.path.abspath(new_path)
value["root_path"] = new_path
fn = os.path.abspath(os.path.join("..", "..", "pystepsrc.rtd"))
with open(fn, "w") as f:
json.dump(rcparams, f, indent=4)
if "READTHEDOCS" in os.environ:
repourl = "https://github.com/pySTEPS/pysteps-data.git"
dir = os.path.join(os.getcwd(), "..", "..", "pysteps-data")
dir = os.path.abspath(dir)
subprocess.check_call(["rm", "-rf", dir])
subprocess.check_call(["git", "clone", repourl, dir])
set_root()
pystepsrc = os.path.abspath(os.path.join("..", "..", "pystepsrc.rtd"))
os.environ["PYSTEPSRC"] = pystepsrc
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'alabaster'
# html_theme = 'classic'
html_theme = "sphinx_book_theme"
html_title = ""
html_context = {
"github_user": "pySTEPS",
"github_repo": "pysteps",
"github_version": "master",
"doc_path": "doc",
}
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
"repository_url": "https://github.com/pySTEPS/pysteps",
"repository_branch": "master",
"path_to_docs": "doc/source",
"use_edit_page_button": True,
"use_repository_button": True,
"use_issues_button": True,
}
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = "../_static/pysteps_logo.png"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["../_static"]
html_css_files = ["../_static/pysteps.css"]
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
# html_sidebars = {
# "**": [
# "relations.html", # needs 'show_related': True theme option to display
# "searchbox.html",
# ]
# }
html_domain_indices = True
autosummary_generate = True
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = "pystepsdoc"
# -- Options for LaTeX output ---------------------------------------------
# This hack is taken from numpy (https://github.com/numpy/numpy/blob/master/doc/source/conf.py).
latex_preamble = r"""
\usepackage{amsmath}
\DeclareUnicodeCharacter{00A0}{\nobreakspace}
% In the parameters section, place a newline after the Parameters
% header
\usepackage{expdlist}
\let\latexdescription=\description
\def\description{\latexdescription{}{} \breaklabel}
% Make Examples/etc section headers smaller and more compact
\makeatletter
\titleformat{\paragraph}{\normalsize\py@HeaderFamily}%
{\py@TitleColor}{0em}{\py@TitleColor}{\py@NormalColor}
\titlespacing*{\paragraph}{0pt}{1ex}{0pt}
\makeatother
% Fix footer/header
\renewcommand{\chaptermark}[1]{\markboth{\MakeUppercase{\thechapter.\ #1}}{}}
\renewcommand{\sectionmark}[1]{\markright{\MakeUppercase{\thesection.\ #1}}}
"""
latex_elements = {
"papersize": "a4paper",
"pointsize": "10pt",
"preamble": latex_preamble
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
latex_domain_indices = False
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, "pysteps.tex", "pysteps reference", author, "manual"),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, "pysteps", "pysteps reference", [author], 1)]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"pysteps",
"pysteps reference",
author,
"pysteps",
"One line description of project.",
"Miscellaneous",
),
]
# -- Options for Sphinx-Gallery -------------------------------------------
# The configuration dictionary for Sphinx-Gallery
sphinx_gallery_conf = {
"examples_dirs": "../../examples", # path to your example scripts
"gallery_dirs": "auto_examples", # path where to save gallery generated examples
"filename_pattern": r"/*\.py", # Include all the files in the examples dir
}
| |
""" Defines commands for the Chaco shell.
"""
try:
from wx import GetApp
except ImportError:
GetApp = lambda: None
from chaco.api import Plot, color_map_name_dict
from chaco.scales.api import ScaleSystem
from chaco.tools.api import PanTool, ZoomTool
# Note: these are imported to be exposed in the namespace.
from chaco.scales.api import (FixedScale, Pow10Scale, LogScale,
CalendarScaleSystem)
from chaco.default_colormaps import *
import plot_maker
from session import PlotSession
session = PlotSession()
#------------------------------------------------------------------------
# General help commands
#------------------------------------------------------------------------
def chaco_commands():
"""
Prints the current list of all shell commands. Information
on each command is available in that command's docstring (__doc__).
Window/Plot Management
----------------------
figure
creates a new figure window
activate
activates an existing window or plot
close
closes a window
curplot
returns a reference to the active window's Plot object
show
starts the GUI and displays windows (should only be used in scripts)
Plotting
--------
plot
plots some data
imread
creates an array from an image file on disk
imshow
creates an image plot from a file on disk
pcolor
plots some scalar data as a pseudocolor image
contour
creates a contour line plot of some scalar data
contourf
creates a contour poly plot of some scalar data
loglog
plots an x-y line or scatter plot on log-log scale
semilogx
plots an x-y line or scatter plot with a log x-scale
semilogy
plots an x-y line or scatter plot with a log y-scale
hold
turns "hold" on or off
show
shows plot on screen; used when running from script
Axes, Annotations, Legends
--------------------------
xaxis
toggles the horizontal axis, sets the interval
yaxis
toggles the vertical axis, sets the interval
xgrid
toggles the grid running along the X axis
ygrid
toggles the grid running along the Y axis
xtitle
sets the title of a horizontal axis
ytitle
sets the title of a vertical axis
xscale
sets the tick scale system of the X axis
yscale
sets the tick scale system of the Y axis
title
sets the title of the plot
Tools
-----
colormap
sets the current colormap
IO
--
save
saves the current plot to a file (png, bmp, jpg, pdf)
"""
print chaco_commands.__doc__
# The following are not implemented yet
"""
tool -- toggles certain tools on or off
load -- loads a saved plot from file into the active plot area
scatter -- plots some data as a scatterplot (unordered X/Y data)
line -- plots some data as an ordered set of of X,Y points
label -- adds a label at a data point
legend -- creates a legend and adds it to the plot
Layout
------
names -- temporarily overlays plot areas with their names
hidenames -- force remove the name overlays from show_names
happend -- create a new plot area horizontally after the active plot
vappend -- create a new plot area vertically after the active plot
hsplit -- splits the current plot into two horizontal subplots
vsplit -- splits the current plot into two vertical subplots
save_layout -- saves the current layout of plots and plots areas
load_layout -- loads a saved layout of plot areas and applies it to the
current set of plots
Sessions
--------
save_session -- saves the current "workspace", defined as the set of
active windows and plots
load_session -- restores a previously-saved session
save_prefs -- saves the current session's preferences, either in a
separate file or as the chaco.shell defaults
load_prefs -- loads a previously-saved set of preferences
"""
#------------------------------------------------------------------------
# Window management commands
#------------------------------------------------------------------------
def figure(name=None, title=None):
""" Creates a new figure window and returns its index.
Parameters
----------
name : string
The name to use for this window. If this parameter is provided, then
this name can be used instead of the window's integer index in other
window-related functions.
title : string
The title of the plot window. If this is blank but *name* is provided,
then that is used. If neither *name* nor *title* is provided, then the
method uses the value of default_window_name in the Preferences.
"""
win = session.new_window(name, title)
activate(win)
return win
def activate(ident=None):
""" Activates and raises a figure window.
Parameters
----------
ident : integer or string
Index or name of the window. If neither is specified,
then the function raises the currently active window.
"""
if ident is not None:
win = session.get_window(ident)
else:
win = session.active_window
if win is not None:
session.active_window = win
win.raise_window()
return
def show():
""" Shows all the figure windows that have been created thus far, and
creates a GUI main loop. This function is useful in scripts to show plots
and keep their windows open, and has no effect when used from the
interpreter prompt.
"""
from traits.etsconfig.api import ETSConfig
from pyface.util import guisupport
is_event_loop_running = getattr(guisupport, 'is_event_loop_running_' + ETSConfig.toolkit)
start_event_loop = getattr(guisupport, 'start_event_loop_' + ETSConfig.toolkit)
if not is_event_loop_running():
frame = session.active_window
frame.raise_window()
start_event_loop()
return
def close(ident=None):
""" Closes a figure window
Parameters
----------
ident : integer or string
Index or name of the window to close, or "all". If nothing
is specified, then the function closes the active window.
"""
win_list = []
if ident is None:
win_list.append(session.active_window)
elif ident == 'all':
win_list = session.windows
else:
win_list.append(session.get_window(ident))
for win in win_list:
win.close()
return
def colormap(map):
"""Sets the active colormap.
Parameters
----------
map : a string, or a callable
The color map to use; if it is a string, it is the name of a default
colormap; if it is a callable, it must return an AbstractColorMap.
"""
if isinstance(map, basestring):
session.colormap = color_map_name_dict[map]
else:
session.colormap = map
def hold(state=None):
""" Turns "hold" on or off, or toggles the current state if none
is given.
Parameters
----------
state : Boolean
The desired hold state.
"""
if state is None:
session.hold = not session.hold
else:
session.hold = state
return
def curplot():
if session.active_window:
return session.active_window.get_container()
else:
return None
#------------------------------------------------------------------------
# Plotting functions
#------------------------------------------------------------------------
def _do_plot_boilerplate(kwargs, image=False):
""" Used by various plotting functions. Checks/handles hold state,
returns a Plot object for the plotting function to use.
"""
if kwargs.has_key("hold"):
hold(kwargs["hold"])
del kwargs["hold"]
# Check for an active window; if none, open one.
if len(session.windows) == 0:
if image:
win = session.new_window(is_image=True)
activate(win)
else:
figure()
cont = session.active_window.get_container()
if not cont:
cont = Plot(session.data)
session.active_window.set_container(cont)
existing_tools = [type(t) for t in (cont.tools + cont.overlays)]
if not PanTool in existing_tools:
cont.tools.append(PanTool(cont))
if not ZoomTool in existing_tools:
cont.overlays.append(ZoomTool(cont, tool_mode="box", always_on=True, drag_button="right"))
if not session.hold:
cont.delplot(*cont.plots.keys())
return cont
def plot(*data, **kwargs):
""" Plots data in a Matlab-compatible way. Data is assumed to be
X vs Y. Any additional *kwargs* passed in are broadcast to all plots.
Example::
x = arange(-pi, pi, pi/100.)
plot(x, sin(x), "b-")
To use previous data, specify names instead of actual data arrays.
"""
cont = _do_plot_boilerplate(kwargs)
plots = plot_maker.do_plot(session.data, cont,
*data, **kwargs)
cont.request_redraw()
return
def semilogx(*data, **kwargs):
""" Plots data on a semilog scale in a Matlab-compatible way. Data is
assumed to be X vs Y. Any additional *kwargs* passed in are broadcast
to all plots.
Example::
x = linspace(0.01, 10.0 100)
semilogx(x, sqrt(x), "b-")
To use previous data, specify names instead of actual data arrays.
Adding a semilog plot to an active plot with a currently different scale
rescales the plot.
"""
kwargs["index_scale"] = "log"
plot(*data, **kwargs)
def semilogy(*data, **kwargs):
""" Plots data on a semilog scale in a Matlab-compatible way. Data is
assumed to be X vs Y. Any additional *kwargs* passed in are broadcast
to all plots.
Example::
x = linspace(0, 10.0, 100)
semilogy(x, exp(x), "b-")
To use previous data, specify names instead of actual data arrays.
Adding a semilog plot to an active plot with a currently different scale
rescales the plot.
"""
kwargs["value_scale"] = "log"
plot(*data, **kwargs)
def loglog(*data, **kwargs):
""" Plots data on a log-log scale in a Matlab-compatible way. Data is
assumed to be X vs Y. Any additional *kwargs* passed in are broadcast
to all plots.
Example::
x = linspace(0.001, 10.0, 100)
loglog(x, x**2, "b-")
To use previous data, specify names instead of actual data arrays.
Adding a log-log plot to an active plot with a currently different scale
rescales the plot.
"""
kwargs["index_scale"] = "log"
kwargs["value_scale"] = "log"
plot(*data, **kwargs)
def imread(*data, **kwargs):
""" Returns image file as an array. """
return plot_maker.do_imread(*data, **kwargs)
def imshow(*data, **kwargs):
""" Creates an image plot from a file on disk. Takes either
filename or image data. Any additional *kwargs* passed in are broadcast
to all plots.
Example 1::
imshow("example.jpg")
Example 2::
image = ImageData.fromfile("example.jpg")
imshow(image)
To use previous data, specify names instead of filename or data arrays.
"""
cont = _do_plot_boilerplate(kwargs, image=True)
if "colormap" not in kwargs:
kwargs["colormap"] = session.colormap
plots = plot_maker.do_imshow(session.data, cont,
*data, **kwargs)
cont.request_redraw()
return
def pcolor(*data, **kwargs):
""" Colormaps scalar data in a roughly Matlab-compatible way. Data are
assumed to be a scalar image. Any additional *kwargs* passed in are
broadcast to all plots.
Example::
xs = linspace(0,10,100)
ys = linspace(0,20,200)
x,y=meshgrid(xs,ys)
z = sin(x)*y
pcolor(x, y, z)
To use previous data, specify names instead of actual data arrays.
"""
cont = _do_plot_boilerplate(kwargs)
plots = plot_maker.do_pcolor(session.data, session.colormap, cont,
*data, **kwargs)
cont.request_redraw()
return
def contour(*data, **kwargs):
""" Contour line plots of scalar data in a roughly Matlab-compatible way.
Data are assumed to be a scalar image. Any additional *kwargs* passed in
are broadcast to all plots.
Example::
xs = linspace(0,10,100)
ys = linspace(0,20,200)
x,y=meshgrid(xs,ys)
z = sin(x)*y
contour(z)
To use previous data, specify names instead of actual data arrays.
"""
cont = _do_plot_boilerplate(kwargs)
plots = plot_maker.do_contour(session.data, session.colormap, cont,
"line", *data, **kwargs)
cont.request_redraw()
return
def contourf(*data, **kwargs):
""" Contour polygon plots of scalar data in a roughly Matlab-compatible way.
Data are assumed to be a scalar image. Any additional *kwargs* passed in
are broadcast to all plots.
Example::
xs = linspace(0,10,100)
ys = linspace(0,20,200)
x,y=meshgrid(xs,ys)
z = sin(x)*y
contourf(z)
To use previous data, specify names instead of actual data arrays.
"""
cont = _do_plot_boilerplate(kwargs)
plots = plot_maker.do_contour(session.data, session.colormap, cont,
"poly", *data, **kwargs)
cont.request_redraw()
return
def plotv(*args, **kwargs):
""" Creates a plot of a particular type, or using a "best guess"
approach based on the data, using chaco semantics.
The number and shape of data arrays determine how the data is
interpreted, and how many plots are created.
Single-dimensional arrays (shape = (N,))
----------------------------------------
1. Single array: the data is treated as the value array, and an index
array is generated automatically using arange(len(value))
2. Multiple arrays: the first array is treated as the index array, and
each subsequent array is used as the value for a new plot. All of
the plots share a common index (first array).
Multi-dimensional arrays (shape = (N,2) or (2,N))
-------------------------------------------------
1. Single array (NxM or MxN, N > M): interpreted as M-1 plots of
N data points each, just like in the multiple 1D array case above.
2. Multiple arrays: each array is treated as a separate set of inter-
related plots, with its own index and value data sources
Keyword Arguments
-----------------
type
comma-separated combination of "line", "scatter", "polar"
sort
"ascending", "descending", or "none", indicating the sorting order
of the array that will be used as the index
color
the color of the plot line and/or marker
bgcolor
the background color of the plot
grid
boolean specifying whether or not to draw a grid on the plot
axis
boolean specifying whether or not to draw an axis on the plot
orientation
"h" for index on the X axis, "v" for index on the Y axis
Scatter plot keywords
---------------------
marker
the type of marker to use (square, diamond, circle, cross,
crossed circle, triangle, inverted triangle, plus, dot, pixel
marker_size
the size (in pixels) of the marker
outline_color
the color of the marker outline
Line plot keywords
------------------
width
the thickness of the line
dash
the dash style to use (solid, dot dash, dash, dot, long dash)
"""
cont = _do_plot_boilerplate(kwargs)
plots = plot_maker.do_plotv(session, *args, **kwargs)
cont.add(*plots)
cont.request_redraw()
return
#-----------------------------------------------------------------------------
# Annotations
#-----------------------------------------------------------------------------
def xtitle(text):
""" Sets the horizontal axis label to *text*. """
p = curplot()
if p:
p.x_axis.title = text
p.request_redraw()
def ytitle(text):
""" Sets the vertical axis label to *text*. """
p = curplot()
if p:
p.y_axis.title = text
p.request_redraw()
def title(text):
""" Sets the plot title to *text*. """
p = curplot()
if p:
p.title = text
p.request_redraw()
_axis_params = """Parameters
----------
title : str
The text of the title
title_font : KivaFont('modern 12')
The font in which to render the title
title_color : color ('color_name' or (red, green, blue, [alpha]) tuple)
The color in which to render the title
tick_weight : float
The thickness (in pixels) of each tick
tick_color : color ('color_name' or (red, green, blue, [alpha]) tuple)
The color of the ticks
tick_label_font : KivaFont('modern 10')
The font in which to render the tick labels
tick_label_color : color ('color_name' or (red, green, blue, [alpha]) tuple)
The color of the tick labels
tick_label_formatter : callable
A callable that is passed the numerical value of each tick label and
which should return a string.
tick_in : int
The number of pixels by which the ticks go "into" the plot area
tick_out : int
The number of pixels by which the ticks extend into the label area
tick_visible : bool
Are ticks visible at all?
tick_interval : 'auto' or float
What is the dataspace interval between ticks?
orientation : Enum("top", "bottom", "left", "right")
The location of the axis relative to the plot. This determines where
the axis title is located relative to the axis line.
axis_line_visible : bool
Is the axis line visible?
axis_line_color : color ('color_name' or (red, green, blue, [alpha]) tuple)
The color of the axis line
axis_line_weight : float
The line thickness (in pixels) of the axis line
axis_line_style : LineStyle('solid')
The dash style of the axis line"""
def xaxis(**kwds):
""" Configures the x-axis.
Usage
-----
* ``xaxis()``: toggles the horizontal axis on or off.
* ``xaxis(**kwds)``: set parameters of the horizontal axis.
%s
""" % _axis_params
p = curplot()
if p:
if kwds:
p.x_axis.set(**kwds)
else:
p.x_axis.visible ^= True
p.request_redraw()
xaxis.__doc__ = """ Configures the x-axis.
Usage
-----
* ``xaxis()``: toggles the horizontal axis on or off.
* ``xaxis(**kwds)``: set parameters of the horizontal axis.
%s
""" % _axis_params
def yaxis(**kwds):
""" Configures the y-axis.
Usage
-----
* ``yaxis()``: toggles the vertical axis on or off.
* ``yaxis(**kwds)``: set parameters of the vertical axis.
%s
""" % _axis_params
p = curplot()
if p:
if kwds:
p.y_axis.set(**kwds)
else:
p.y_axis.visible ^= True
p.request_redraw()
yaxis.__doc__ = """ Configures the y-axis.
Usage
-----
* ``yaxis()``: toggles the vertical axis on or off.
* ``yaxis(**kwds)``: set parameters of the vertical axis.
%s
""" % _axis_params
def xgrid():
""" Toggles the grid perpendicular to the X axis. """
p = curplot()
if p:
p.x_grid.visible ^= True
p.request_redraw()
def ygrid():
""" Toggles the grid perpendicular to the Y axis. """
p = curplot()
if p:
p.y_grid.visible ^= True
p.request_redraw()
def _set_scale(axis, system):
p = curplot()
if p:
if axis == 'x':
log_linear_trait = 'index_scale'
ticks = p.x_ticks
else:
log_linear_trait = 'value_scale'
ticks = p.y_ticks
if system == 'time':
system = CalendarScaleSystem()
if isinstance(system, basestring):
setattr(p, log_linear_trait, system)
else:
if system is None:
system = dict(linear=p.linear_scale, log=p.log_scale).get(
p.get(log_linear_trait), p.linear_scale)
ticks.scale = system
p.request_redraw()
def xscale(system=None):
""" Change the scale system for the X-axis ticks.
Usage
-----
* ``xscale()``: revert the scale system to the default.
* ``xscale('time')``: use the calendar scale system for time series.
* ``xscale('log')``: use a generic log-scale.
* ``xscale('linear')``: use a generic linear-scale.
* ``xscale(some_scale_system)``: use an arbitrary ScaleSystem object.
"""
_set_scale('x', system)
def yscale(system=None):
""" Change the scale system for the Y-axis ticks.
Usage
-----
* ``yscale()``: revert the scale system to the default.
* ``yscale('time')``: use the calendar scale system for time series.
* ``yscale('log')``: use a generic log-scale.
* ``yscale('linear')``: use a generic linear-scale.
* ``yscale(some_scale_system)``: use an arbitrary ScaleSystem object.
"""
_set_scale('y', system)
def legend(setting=None):
""" Sets or toggles the presence of the legend
Usage
-----
* ``legend()``: toggles the legend; if it is currently visible, it is hideen, and if it is currently hidden, then it is displayed
* ``legend(True)``: shows the legend
* ``legend(False)``: hides the legend
"""
p = curplot()
if p:
if setting is None:
setting = not p.legend.visible
p.legend.visible = setting
p.request_redraw()
#-----------------------------------------------------------------------------
# Tools
#-----------------------------------------------------------------------------
def tool():
""" Toggles tools on and off. """
p = curplot()
if p:
pass
#-----------------------------------------------------------------------------
# Saving and IO
#-----------------------------------------------------------------------------
def save(filename="chacoplot.png", dpi=72, pagesize="letter", dest_box=None, units="inch"):
""" Saves the active plot to an file. Currently supported file types
are: bmp, png, jpg.
"""
p = curplot()
if not p:
print "Doing nothing because there is no active plot."
return
import os.path
ext = os.path.splitext(filename)[-1]
if ext == ".pdf":
print "Warning: the PDF backend is still a little buggy."
from chaco.pdf_graphics_context import PdfPlotGraphicsContext
# Set some default PDF options if none are provided
if dest_box is None:
dest_box = (0.5, 0.5, -0.5, -0.5)
gc = PdfPlotGraphicsContext(filename = filename,
pagesize = pagesize,
dest_box = dest_box,
dest_box_units = units)
# temporarily turn off the backbuffer for offscreen rendering
use_backbuffer = p.use_backbuffer
p.use_backbuffer = False
gc.render_component(p)
p.use_backbuffer = use_backbuffer
gc.save()
del gc
print "Saved to", filename
elif ext in [".bmp", ".png", ".jpg"]:
from chaco.api import PlotGraphicsContext
gc = PlotGraphicsContext(tuple(p.outer_bounds), dpi=dpi)
# temporarily turn off the backbuffer for offscreen rendering
use_backbuffer = p.use_backbuffer
p.use_backbuffer = False
gc.render_component(p)
p.use_backbuffer = use_backbuffer
gc.save(filename)
del gc
print "Saved to", filename
else:
print "Format not yet supported:", ext
print "Currently supported formats are: bmp, png, jpg."
return
# EOF
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Author: Sword York
# GitHub: https://github.com/SwordYork/sequencing
# No rights reserved.
#
import json
import numpy
import sequencing as sq
import tensorflow as tf
from sequencing import TIME_MAJOR, MODE
from sequencing.utils.metrics import Delta_BLEU
def optimistic_restore(session, save_file):
"""
Only load matched variables. For example, Adam may not be saved and not
necessary to load.
:param session:
:param save_file: file path of the checkpoint.
:return:
"""
reader = tf.train.NewCheckpointReader(save_file)
saved_shapes = reader.get_variable_to_shape_map()
var_names = sorted(
[(var.name, var.name.split(':')[0]) for var in tf.global_variables()
if var.name.split(':')[0] in saved_shapes])
restore_vars = []
name2var = dict(
zip(map(lambda x: x.name.split(':')[0], tf.global_variables()),
tf.global_variables()))
with tf.variable_scope('', reuse=True):
for var_name, saved_var_name in var_names:
curr_var = name2var[saved_var_name]
var_shape = curr_var.get_shape().as_list()
if var_shape == saved_shapes[saved_var_name]:
restore_vars.append(curr_var)
saver = tf.train.Saver(restore_vars)
saver.restore(session, save_file)
def cross_entropy_sequence_loss(logits, targets, sequence_length):
with tf.name_scope('cross_entropy_sequence_loss'):
total_length = tf.to_float(tf.reduce_sum(sequence_length))
entropy_losses = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits, labels=targets)
# Mask out the losses we don't care about
loss_mask = tf.sequence_mask(
tf.to_int32(sequence_length), tf.to_int32(tf.shape(targets)[0]))
loss_mask = tf.transpose(tf.to_float(loss_mask), [1, 0])
losses = entropy_losses * loss_mask
# losses.shape: T * B
# sequence_length: B
total_loss_avg = tf.reduce_sum(losses) / total_length
return total_loss_avg
def rl_sequence_loss(logits, predict_ids, sequence_length,
baseline_states, reward, start_rl_step):
# reward: T * B
with tf.name_scope('rl_sequence_loss'):
max_ml_step = tf.to_int32(tf.maximum(tf.reduce_max(start_rl_step), 0))
min_ml_step = tf.to_int32(tf.maximum(tf.reduce_min(start_rl_step), 0))
# entropy loss:
# before start_rl_step is ml entropy
# after start_rl_step should be rl entropy
entropy_losses = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits, labels=predict_ids)
# ML loss
ml_entropy_losses = tf.slice(entropy_losses, [0, 0], [max_ml_step, -1])
# Mask out the losses we don't care about
ml_loss_mask = tf.sequence_mask(
tf.to_int32(start_rl_step), max_ml_step)
ml_loss_mask = tf.transpose(tf.to_float(ml_loss_mask), [1, 0])
ml_loss = tf.reduce_sum(ml_entropy_losses * ml_loss_mask) / \
tf.maximum(tf.reduce_sum(ml_loss_mask), 1)
# RL
rl_entropy_losses = tf.slice(entropy_losses, [min_ml_step, 0], [-1, -1])
# Mask out the losses we don't care about
rl_loss_mask = (
tf.to_float(tf.sequence_mask(
tf.to_int32(sequence_length - min_ml_step),
tf.to_int32(tf.shape(predict_ids)[0] - min_ml_step)))
- tf.to_float(tf.sequence_mask(
tf.to_int32(start_rl_step - min_ml_step),
tf.to_int32(tf.shape(predict_ids)[0] - min_ml_step))))
rl_loss_mask = tf.transpose(tf.to_float(rl_loss_mask), [1, 0])
baseline_states = tf.slice(baseline_states, [min_ml_step, 0, 0],
[-1, -1, -1])
reward = tf.slice(reward, [min_ml_step, 0], [-1, -1])
# prevent from dividing by zero
rl_total = tf.maximum(tf.reduce_sum(rl_loss_mask), 1)
with tf.variable_scope('baseline'):
reward_predicted_m = tf.contrib.layers.fully_connected(
baseline_states, baseline_states.get_shape().as_list()[-1],
activation_fn=tf.nn.relu, scope='middle')
# note, there is no negative reward, so we could use relu
reward_predicted = tf.contrib.layers.fully_connected(
reward_predicted_m, 1, activation_fn=None)
reward_predicted = tf.squeeze(reward_predicted, axis=[2])
reward_losses = tf.pow(reward_predicted - reward, 2)
reward_loss_rmse = tf.sqrt(
tf.reduce_sum(reward_losses * rl_loss_mask) / rl_total + 1e-12)
reward_entropy_losses = (reward - tf.stop_gradient(reward_predicted)) \
* rl_entropy_losses * rl_loss_mask
reward_entropy_loss = tf.reduce_sum(reward_entropy_losses) / rl_total
predict_reward = tf.cond(tf.greater(tf.shape(reward_predicted)[0], 0),
lambda: tf.reduce_mean(
tf.slice(reward_predicted, [0, 0],
[1, -1])),
lambda: tf.to_float(0))
# Calculate the average log perplexity in each batch
total_loss_avg = ml_loss + reward_entropy_loss + reward_loss_rmse
# the first reward predict is total reward
return total_loss_avg, \
ml_loss, \
reward_loss_rmse, \
predict_reward
def _py_func(predict_target_ids, ground_truth_ids, eos_id):
n = 4 # 4-gram
delta = True # delta future reward
batch_size = predict_target_ids.shape[1]
length = numpy.zeros(batch_size, dtype=numpy.int32)
reward = numpy.zeros_like(predict_target_ids, dtype=numpy.float32)
for i in range(batch_size):
p_id = predict_target_ids[:, i].tolist()
p_len = p_id.index(eos_id) + 1 if eos_id in p_id else len(p_id)
length[i] = p_len
p_id = p_id[:p_len]
t_id = ground_truth_ids[:, i].tolist()
t_len = t_id.index(eos_id) + 1 if eos_id in t_id else len(t_id)
t_id = t_id[:t_len]
bleu_scores = Delta_BLEU(p_id, t_id, n)
reward_i = bleu_scores[:, n - 1].copy()
if delta:
reward_i[1:] = reward_i[1:] - reward_i[:-1]
reward[:p_len, i] = reward_i[::-1].cumsum()[::-1]
else:
reward[:p_len, i] = reward_i[-1]
return reward, length
def build_attention_model(params, src_vocab, trg_vocab, source_ids,
source_seq_length, target_ids, target_seq_length,
beam_size=1, mode=MODE.TRAIN,
burn_in_step=100000, increment_step=10000,
teacher_rate=1.0, max_step=100):
"""
Build a model.
:param params: dict.
{encoder: {rnn_cell: {},
...},
decoder: {rnn_cell: {},
...}}
for example:
{'encoder': {'rnn_cell': {'state_size': 512,
'cell_name': 'BasicLSTMCell',
'num_layers': 2,
'input_keep_prob': 1.0,
'output_keep_prob': 1.0},
'attention_key_size': attention_size},
'decoder': {'rnn_cell': {'cell_name': 'BasicLSTMCell',
'state_size': 512,
'num_layers': 1,
'input_keep_prob': 1.0,
'output_keep_prob': 1.0},
'trg_vocab_size': trg_vocab_size}}
:param src_vocab: Vocab of source symbols.
:param trg_vocab: Vocab of target symbols.
:param source_ids: placeholder
:param source_seq_length: placeholder
:param target_ids: placeholder
:param target_seq_length: placeholder
:param beam_size: used in beam inference
:param mode:
:return:
"""
if mode != MODE.TRAIN:
params = sq.disable_dropout(params)
tf.logging.info(json.dumps(params, indent=4))
# parameters
encoder_params = params['encoder']
decoder_params = params['decoder']
# Because source encoder is different to the target feedback,
# we construct source_embedding_table manually
source_embedding_table = sq.LookUpOp(src_vocab.vocab_size,
src_vocab.embedding_dim,
name='source')
source_embedded = source_embedding_table(source_ids)
encoder = sq.StackBidirectionalRNNEncoder(encoder_params, name='stack_rnn',
mode=mode)
encoded_representation = encoder.encode(source_embedded, source_seq_length)
attention_keys = encoded_representation.attention_keys
attention_values = encoded_representation.attention_values
attention_length = encoded_representation.attention_length
# feedback
if mode == MODE.RL:
tf.logging.info('BUILDING RL TRAIN FEEDBACK......')
dynamical_batch_size = tf.shape(attention_keys)[1]
feedback = sq.RLTrainingFeedBack(target_ids, target_seq_length,
trg_vocab, dynamical_batch_size,
burn_in_step=burn_in_step,
increment_step=increment_step,
max_step=max_step)
elif mode == MODE.TRAIN:
tf.logging.info('BUILDING TRAIN FEEDBACK WITH {} TEACHER_RATE'
'......'.format(teacher_rate))
feedback = sq.TrainingFeedBack(target_ids, target_seq_length,
trg_vocab, teacher_rate,
max_step=max_step)
elif mode == MODE.EVAL:
tf.logging.info('BUILDING EVAL FEEDBACK ......')
feedback = sq.TrainingFeedBack(target_ids, target_seq_length,
trg_vocab, 0.,
max_step=max_step)
else:
tf.logging.info('BUILDING INFER FEEDBACK WITH BEAM_SIZE {}'
'......'.format(beam_size))
infer_key_size = attention_keys.get_shape().as_list()[-1]
infer_value_size = attention_values.get_shape().as_list()[-1]
# expand beam
if TIME_MAJOR:
# batch size should be dynamical
dynamical_batch_size = tf.shape(attention_keys)[1]
final_key_shape = [-1, dynamical_batch_size * beam_size,
infer_key_size]
final_value_shape = [-1, dynamical_batch_size * beam_size,
infer_value_size]
attention_keys = tf.reshape(
(tf.tile(attention_keys, [1, 1, beam_size])), final_key_shape)
attention_values = tf.reshape(
(tf.tile(attention_values, [1, 1, beam_size])),
final_value_shape)
else:
dynamical_batch_size = tf.shape(attention_keys)[0]
final_key_shape = [dynamical_batch_size * beam_size, -1,
infer_key_size]
final_value_shape = [dynamical_batch_size * beam_size, -1,
infer_value_size]
attention_keys = tf.reshape(
(tf.tile(attention_keys, [1, beam_size, 1])), final_key_shape)
attention_values = tf.reshape(
(tf.tile(attention_values, [1, beam_size, 1])),
final_value_shape)
attention_length = tf.reshape(
tf.transpose(tf.tile([attention_length], [beam_size, 1])), [-1])
feedback = sq.BeamFeedBack(trg_vocab, beam_size, dynamical_batch_size,
max_step=max_step)
decoder_state_size = decoder_params['rnn_cell']['state_size']
context_size = attention_values.get_shape().as_list()[-1]
# attention
attention = sq.Attention(decoder_state_size,
attention_keys, attention_values, attention_length)
with tf.variable_scope('logits_func'):
attention_mix = LinearOp(context_size + feedback.embedding_dim + decoder_state_size,
decoder_state_size , name='attention_mix')
logits_trans = LinearOp(decoder_state_size, feedback.vocab_size,
name='logits_trans')
logits_func = lambda _softmax: logits_trans(
tf.nn.tanh(attention_mix(_softmax)))
# decoder
decoder = sq.AttentionRNNDecoder(decoder_params, attention,
feedback, logits_func=logits_func, mode=mode)
decoder_output, decoder_final_state = sq.dynamic_decode(decoder,
swap_memory=True,
scope='decoder')
# not training
if mode == MODE.EVAL or mode == MODE.INFER:
return decoder_output, decoder_final_state
# bos is added in feedback
# so target_ids is predict_ids
if not TIME_MAJOR:
ground_truth_ids = tf.transpose(target_ids, [1, 0])
else:
ground_truth_ids = target_ids
# construct the loss
if mode == MODE.RL:
# Creates a variable to hold the global_step.
global_step_tensor = tf.get_collection(
tf.GraphKeys.GLOBAL_VARIABLES, scope='global_step')[0]
rl_time_steps = tf.floordiv(tf.maximum(global_step_tensor -
burn_in_step, 0),
increment_step)
start_rl_step = target_seq_length - rl_time_steps
baseline_states = tf.stop_gradient(decoder_output.baseline_states)
predict_ids = tf.stop_gradient(decoder_output.predicted_ids)
# TODO: bug in tensorflow
ground_or_predict_ids = tf.cond(tf.greater(rl_time_steps, 0),
lambda: predict_ids,
lambda: ground_truth_ids)
reward, sequence_length = tf.py_func(
func=_py_func,
inp=[ground_or_predict_ids, ground_truth_ids, trg_vocab.eos_id],
Tout=[tf.float32, tf.int32],
name='reward')
sequence_length.set_shape((None,))
total_loss_avg, entropy_loss_avg, reward_loss_rmse, reward_predicted \
= rl_sequence_loss(
logits=decoder_output.logits,
predict_ids=predict_ids,
sequence_length=sequence_length,
baseline_states=baseline_states,
start_rl_step=start_rl_step,
reward=reward)
return decoder_output, total_loss_avg, entropy_loss_avg, \
reward_loss_rmse, reward_predicted
else:
total_loss_avg = cross_entropy_sequence_loss(
logits=decoder_output.logits,
targets=ground_truth_ids,
sequence_length=target_seq_length)
return decoder_output, total_loss_avg, total_loss_avg, \
tf.to_float(0.), tf.to_float(0.)
| |
from __future__ import absolute_import
import sys
from contextlib import contextmanager
from collections import namedtuple
import logging
import kombu
import kombu.mixins
import kombu.pools
from lymph.events.base import BaseEventSystem
from lymph.core.events import Event
from lymph.utils.logging import setup_logger
logger = logging.getLogger(__name__)
DEFAULT_SERIALIZER = 'lymph-msgpack'
DEFAULT_EXCHANGE = 'lymph'
DEFAULT_MAX_RETRIES = 3
RETRY_HEADER = 'retry_count'
# Info of where the queue master server, so that we can redeclare the queue
# when we failover to another master.
QueueInfo = namedtuple('QueueInfo', 'master queue')
class EventConsumer(kombu.mixins.ConsumerMixin):
def __init__(self, event_system, connection, queue, handler, exchange, max_retries=DEFAULT_MAX_RETRIES):
self.connection = connection
self.queue = queue
self.handler = handler
self.exchange = exchange
self.greenlet = None
self.event_system = event_system
self.connect_max_retries = max_retries
self.retry_producer = EventProducer(
exchange=self.event_system.retry_exchange,
routing_key=handler.queue_name,
event_system=event_system,
)
def get_consumers(self, Consumer, channel):
return [Consumer(queues=[self.queue], callbacks=[self.on_kombu_message])]
def create_connection(self):
return kombu.pools.connections[self.connection].acquire()
def on_connection_revived(self):
# Re-create any queue and bind it to exchange in case of a failover, Since
# the bind may be broken or the queue my have been deleted if it was created
# with auto-delete.
self._declare()
def _declare(self):
with self.establish_connection() as conn:
self.event_system.safe_declare(conn, self.queue)
for event_type in self.handler.event_types:
self.queue(conn).bind_to(exchange=self.exchange, routing_key=event_type)
self.queue(conn).bind_to(exchange=self.event_system.retry_exchange, routing_key=self.handler.queue_name)
def on_kombu_message(self, body, message):
logger.debug("received kombu message %r", body)
if self.handler.sequential:
self._handle_message(body, message)
else:
self.event_system.container.spawn(self._handle_message, body, message)
def _handle_message(self, body, message):
try:
event = Event.deserialize(body)
self.handler(event)
message.ack()
except:
self._handle_fail(message, event)
finally:
if self.handler.once:
self.event_system.unsubscribe(self.handler)
def _handle_fail(self, message, event):
retry = event.headers.get(RETRY_HEADER, self.handler.retry) - 1
if retry >= 0:
self._requeue(event, retry)
message.reject()
logger.exception('failed to handle event from queue %r (tries_left=%s)', self.handler.queue_name, retry)
self.event_system.container.error_hook(sys.exc_info())
def _requeue(self, event, retry):
event.headers[RETRY_HEADER] = retry
self.retry_producer.emit(event)
def start(self):
if self.greenlet:
return
self.should_stop = False
self._declare()
self.greenlet = self.event_system.container.spawn(self.run)
def stop(self, **kwargs):
if not self.greenlet:
return
self.should_stop = True
self.greenlet.join()
self.greenlet = None
class EventProducer(object):
def __init__(self, exchange, routing_key, event_system, max_retries=DEFAULT_MAX_RETRIES):
self.routing_key = routing_key
self.exchange = exchange
self.event_system = event_system
self.max_retries = max_retries
@contextmanager
def _get_connection(self):
with self.event_system.get_connection() as conn:
yield conn
def _get_producer(self, conn):
return conn.Producer(
serializer=self.event_system.serializer, routing_key=self.routing_key,
exchange=self.exchange)
def emit(self, event):
with self._get_connection() as conn:
producer = self._get_producer(conn)
return producer.publish(event.serialize(), retry_policy={'max_retries': self.max_retries})
class EventProducerWithDelay(EventProducer):
"""Producer that allow sending messages after a given delay.
It works by publishing messages to an intermediate RabbitMQ queue, this messages
will have a ttl set to the delay given, this way RabbitMQ will forward the messages
after the ttl expire to the dead-letter-exchange attached to the intermediate queue,
which we set to our main exchange (i.e. default to 'lymph' exchange), et voila now
RabbitMQ can send the message to the lymph events handler.
"""
def __init__(self, delay, *args, **kwargs):
super(EventProducerWithDelay, self).__init__(*args, **kwargs)
self.delay = delay # Delay in ms.
self._intermediate_queue = None
def _get_producer(self, conn):
if self._intermediate_queue is None or self._intermediate_queue.master != conn.as_uri():
queue = self._prepare_intermediate_queue(conn)
self._intermediate_queue = QueueInfo(master=conn.as_uri(), queue=queue)
return super(EventProducerWithDelay, self)._get_producer(conn)
def _prepare_intermediate_queue(self, conn):
queue_name = '%s-wait_%s' % (self.routing_key, self.delay)
queue = self.event_system.get_queue(queue_name, durable=False, queue_arguments={
'x-dead-letter-exchange': self.event_system.exchange.name,
'x-dead-letter-routing-key': self.routing_key,
'x-message-ttl': self.delay,
})
self.event_system.safe_declare(conn, queue)
queue(conn).bind_to(exchange=self.exchange, routing_key=self.routing_key)
return queue
class KombuEventSystem(BaseEventSystem):
def __init__(self, connection, exchange_name, serializer=DEFAULT_SERIALIZER, connect_max_retries=DEFAULT_MAX_RETRIES):
super(KombuEventSystem, self).__init__()
self.connection = connection
self.exchange_name = exchange_name
self.exchange = kombu.Exchange(exchange_name, 'topic', durable=True)
self.waiting_exchange = kombu.Exchange('%s_waiting' % exchange_name, 'direct', durable=True)
self.retry_exchange = kombu.Exchange('%s_retry' % exchange_name, 'direct', durable=True)
self.serializer = serializer
self.connect_max_retries = connect_max_retries
self._producers = {}
self.consumers_by_queue = {}
@classmethod
def from_config(cls, config, **kwargs):
exchange_name = config.get('exchange', DEFAULT_EXCHANGE)
serializer = config.get('serializer', DEFAULT_SERIALIZER)
connection = kombu.Connection(**config)
return cls(connection, exchange_name, serializer=serializer, **kwargs)
def on_start(self):
setup_logger('kombu')
with self.get_connection() as conn:
self.exchange(conn).declare()
self.waiting_exchange(conn).declare()
self.retry_exchange(conn).declare()
def on_stop(self, **kwargs):
for consumer in self.consumers_by_queue.values():
consumer.stop(**kwargs)
self.consumers_by_queue.clear()
def subscribe(self, handler, consume=True):
try:
consumer = self.consumers_by_queue[handler.queue_name]
except KeyError:
consumer = self.setup_consumer(handler)
else:
if consumer.handler != handler:
raise RuntimeError('cannot subscribe to queue %r more than once' % handler.queue_name)
if consume:
consumer.start()
return consumer
def unsubscribe(self, handler):
queue_name = handler.queue_name
try:
consumer = self.consumers_by_queue[queue_name]
except KeyError:
raise KeyError('there is no subscription for %r' % queue_name)
if consumer.handler != handler:
raise KeyError('%s is not subscribed to %r' % (handler, queue_name))
consumer.stop()
del self.consumers_by_queue[queue_name]
def setup_consumer(self, handler):
if handler.broadcast:
queue = self.get_queue(handler.queue_name, auto_delete=True, durable=False)
else:
queue = self.get_queue(handler.queue_name, auto_delete=handler.once, durable=False)
consumer = EventConsumer(self, self.connection, queue, handler, self.exchange, max_retries=self.connect_max_retries)
self.consumers_by_queue[handler.queue_name] = consumer
return consumer
@contextmanager
def get_connection(self):
with kombu.pools.connections[self.connection].acquire() as conn:
conn.ensure_connection(max_retries=self.connect_max_retries)
logger.debug('connecting to %s', conn.as_uri())
yield conn
@staticmethod
def get_queue(name, **kwargs):
queue_arguments = kwargs.pop('queue_arguments', {})
queue_arguments['x-ha-policy'] = 'all'
return kombu.Queue(name, queue_arguments=queue_arguments, **kwargs)
@staticmethod
def safe_declare(conn, queue):
try:
queue(conn).declare()
except conn.connection.channel_errors:
# XXX(Mouad): Redeclare queue since a race condition may happen
# when declaring queues in failover situation, more info check:
# https://bugs.launchpad.net/neutron/+bug/1318721.
queue(conn).declare()
def emit(self, event, delay=0):
producer = self._get_producer(event.evt_type, delay)
producer.emit(event)
def _get_producer(self, event_type, delay=0):
try:
return self._producers[event_type, delay]
except KeyError:
if delay:
producer = EventProducerWithDelay(
delay=int(1000 * delay),
exchange=self.waiting_exchange,
routing_key=event_type,
event_system=self,
)
else:
producer = EventProducer(
exchange=self.exchange,
routing_key=event_type,
event_system=self,
)
self._producers[event_type, delay] = producer
return producer
| |
from airflow.models import DAG
from airflow.operators.dummy_operator import DummyOperator
from datetime import datetime, timedelta
from airflow.operators import PythonOperator
from airflow.hooks import RedisHook
from airflow.models import Variable
from airflow.hooks import MemcacheHook
from etl_tasks_functions import get_time
from etl_tasks_functions import subtract_time
from subdags.utilization_utility import calculate_wimax_utilization
from subdags.utilization_utility import calculate_cambium_ss_utilization
from subdags.utilization_utility import calculate_radwin5k_ss_utilization
from subdags.utilization_utility import calculate_radwin5k_bs_utilization
from subdags.utilization_utility import calculate_radwin5kjet_ss_utilization
from subdags.utilization_utility import calculate_radwin5kjet_bs_utilization
from subdags.utilization_utility import calculate_radwin5k_bs_and_ss_dyn_tl_kpi
from subdags.utilization_utility import calculate_backhaul_utilization
from subdags.utilization_utility import calculate_ptp_utilization
from subdags.utilization_utility import calculate_mrotek_utilization
from subdags.utilization_utility import backtrack_x_min
from subdags.utilization_utility import get_severity_values
from subdags.utilization_utility import calculate_age
from subdags.utilization_utility import calculate_severity
from airflow.operators import MySqlLoaderOperator
import logging
import itertools
import socket
import random
import traceback
import time
from pprint import pprint
default_args = {
'owner': 'wireless',
'depends_on_past': False,
'start_date': datetime.now() - timedelta(minutes=2),
'email': ['vipulsharma144@gmail.com'],
'email_on_failure': False,
'email_on_retry': False,
'retries': 1,
'retry_delay': timedelta(minutes=1),
'provide_context': True,
'catchup': False,
# 'queue': 'bash_queue',
# 'pool': 'backfill',
# 'priority_weight': 10,
# 'end_date': datetime(2016, 1, 1),
}
redis_hook_util_10 = RedisHook(redis_conn_id="redis_hook_util_10")
memc_con_cluster = MemcacheHook(memc_cnx_id = 'memc_cnx')
vrfprv_memc_con = MemcacheHook(memc_cnx_id = 'vrfprv_memc_cnx')
pub_memc_con = MemcacheHook(memc_cnx_id = 'pub_memc_cnx')
redis_hook_static_5 = RedisHook(redis_conn_id="redis_hook_5")
INSERT_HEADER = "INSERT INTO %s.performance_utilization"
INSERT_TAIL = """
(machine_name,current_value,service_name,avg_value,max_value,age,min_value,site_name,data_source,critical_threshold,device_name,severity,sys_timestamp,ip_address,warning_threshold,check_timestamp,refer )
values
(%(machine_name)s,%(current_value)s,%(service_name)s,%(avg_value)s,%(max_value)s,%(age)s,%(min_value)s,%(site_name)s,%(data_source)s,%(critical_threshold)s,%(device_name)s,%(severity)s,%(sys_timestamp)s,%(ip_address)s,%(warning_threshold)s,%(check_timestamp)s,%(refer)s)
"""
UPDATE_HEADER = "INSERT INTO %s.performance_utilizationstatus"
UPDATE_TAIL = """
(machine_name,current_value,service_name,avg_value,max_value,age,min_value,site_name,data_source,critical_threshold,device_name,severity,sys_timestamp,ip_address,warning_threshold,check_timestamp,refer )
values
(%(machine_name)s,%(current_value)s,%(service_name)s,%(avg_value)s,%(max_value)s,%(age)s,%(min_value)s,%(site_name)s,%(data_source)s,%(critical_threshold)s,%(device_name)s,%(severity)s,%(sys_timestamp)s,%(ip_address)s,%(warning_threshold)s,%(check_timestamp)s,%(refer)s)
ON DUPLICATE KEY UPDATE machine_name = VALUES(machine_name),current_value = VALUES(current_value),age=VALUES(age),site_name=VALUES(site_name),critical_threshold=VALUES(critical_threshold),severity=VALUES(severity),sys_timestamp=VALUES(sys_timestamp),ip_address=VALUES(ip_address),warning_threshold=VALUES(warning_threshold),check_timestamp=VALUES(check_timestamp),refer=VALUES(refer)
"""
ERROR_DICT ={404:'Device not found yet',405:'No SS Connected to BS-BS is not skipped'}
ERROR_FOR_DEVICE_OMITTED = [404]
kpi_rules = eval(Variable.get("kpi_rules"))
DEBUG = False
sv_to_ds_mapping = {}
#O7_CALC_Q = "calculation_q"
O7_CALC_Q = "poller_queue"
down_and_unresponsive_devices = eval(redis_hook_static_5.get("current_down_devices_all"))
def process_utilization_kpi(
parent_dag_name,
child_dag_name,
start_date,
schedule_interval,
celery_queue,
ss_tech_sites,
hostnames_ss_per_site,
ss_name,
utilization_attributes,
config_sites): #here config site is list of all sites in system_config var
utilization_kpi_subdag_dag = DAG(
dag_id="%s.%s"%(parent_dag_name, child_dag_name),
schedule_interval=schedule_interval,
start_date=start_date,
)
for service in utilization_attributes:
sv_to_ds_mapping [service.get("service_name")] ={"data_source":service.get("data_source"),"sector_type":service.get("sector_type")}
def get_calculated_ss_data():
ss_data = redis_hook_util_10.rget("calculated_ss_utilization_kpi")
combined_site_data = {}
for site_data in ss_data:
site_data = eval(site_data)
combined_site_data.update(site_data)
return combined_site_data
#To create SS dict
def format_data(**kwargs):
device_type = kwargs.get("params").get("technology")
utilization_attributes = kwargs.get("params").get("attributes")
machine_name = kwargs.get("params").get("machine_name")
ss_kpi_dict = {
'site_name': 'unknown' ,
'device_name': 'unknown',
'service_name': 'unknown',
'ip_address': 'unknown',
'severity': 'unknown',
'age': 'unknown',
'data_source': 'unknown',
'current_value': 'unknown',
'warning_threshold': 'unknown',
'critical_threshold': 'unknown',
'check_timestamp': 'unknown',
'sys_timestamp': 'unknown' ,
'refer':'unknown',
'min_value':'unknown',
'max_value':'unknown',
'avg_value':'unknown',
'machine_name':'unknown'
}
ss_data =redis_hook_util_10.rget("calculated_utilization_%s_%s"%(device_type,machine_name))
cur_processing_time = backtrack_x_min(time.time(),300) + 120 # this is used to rewind the time to previous multiple of 5 value so that kpi can be shown accordingly
ss_devices_list = []
for ss_device in ss_data:
ss_device = eval(ss_device)
hostname = ss_device.get('hostname')
for service in ss_device.get('services'):
data_source = sv_to_ds_mapping.get(service).get("data_source")
pmp_type = sv_to_ds_mapping.get(service).get("sector_type")
thresholds = get_severity_values(service)
ss_kpi_dict['critical_threshold']=thresholds[0]
ss_kpi_dict['data_source']=data_source
ss_kpi_dict['site_name']=ss_device.get('site')
#TODO: ok and unknown are only 2 sev for ss we can incluudethis in rules later
ss_kpi_dict['service_name']= service
ss_kpi_dict['machine_name']= machine_name
ss_kpi_dict['check_timestamp']=cur_processing_time
ss_kpi_dict['device_name']=ss_device.get('hostname')
ss_kpi_dict['sys_timestamp']=cur_processing_time
ss_kpi_dict['refer']=ss_device.get("%s_sector"%(pmp_type))
ss_kpi_dict['ip_address']=ss_device.get('ipaddress')
ss_kpi_dict['warning_threshold']= thresholds[1]
if not isinstance(ss_device.get(service),dict):
#handling cur_value if it is greater than 100
cur_value=ss_device.get(service)
if ss_device.get(service) and ss_device.get(service) != None:
cur_value=ss_device.get(service)
try:
if isinstance(curr_value,float) and cur_value and cur_value > 100.00:
cur_value = 100
except Exception:
logging.error("Exception while handling above 100 entries")
ss_kpi_dict['severity']= calculate_severity(service,ss_device.get(service))
ss_kpi_dict['age']= calculate_age(hostname,ss_kpi_dict['severity'],ss_device.get('device_type'),cur_processing_time,service)
ss_kpi_dict['current_value']=cur_value
ss_kpi_dict['avg_value']=cur_value
ss_kpi_dict['min_value']=cur_value
ss_kpi_dict['max_value']=cur_value
if ss_kpi_dict['current_value'] != None:
ss_devices_list.append(ss_kpi_dict.copy())
else:
for data_source in ss_device.get(service):
ds_values = ss_device.get(service)
curr_value= ss_device.get(service).get(data_source)
if isinstance(curr_value,str):
try:
curr_value=float(curr_value)
if isinstance(curr_value,float):
if curr_value > 100.00:
curr_value=100
except Exception:
logging.error("Unable to convert to float")
else:
if curr_value > 100.00:
curr_value=100
ss_kpi_dict['data_source']=data_source
ss_kpi_dict['severity']= calculate_severity(service,ds_values.get(data_source))
ss_kpi_dict['age']= calculate_age(hostname,ss_kpi_dict['severity'],ss_device.get('device_type'),cur_processing_time,service)
ss_kpi_dict['current_value'] = curr_value
ss_kpi_dict['avg_value']=curr_value
ss_kpi_dict['min_value']=curr_value
ss_kpi_dict['max_value']=curr_value
if ss_kpi_dict['current_value'] != None:
ss_devices_list.append(ss_kpi_dict.copy())
try:
if len(ss_devices_list) > 0:
redis_hook_util_10.rpush("formatted_util_%s_%s"%(device_type,machine_name),ss_devices_list)
else:
logging.info("No %s device found in %s after formatting "%(device_type,machine_name))
except Exception:
logging.error("Unable to push formatted SS data to redis")
def get_required_data_ss(**kwargs):
site_name = kwargs.get("params").get("site_name")
device_type = kwargs.get("params").get("technology")
utilization_attributes = kwargs.get("params").get("attributes")
if "vrfprv" in site_name:
memc_con = vrfprv_memc_con
elif "pub" in site_name:
memc_con = pub_memc_con
else:
memc_con = memc_con_cluster
ss_data_dict = {}
all_ss_data = []
if site_name not in hostnames_ss_per_site.keys():
logging.warning("No SS devices found for %s"%(site_name))
return 1
for hostnames_dict in hostnames_ss_per_site.get(site_name):
host_name = hostnames_dict.get("hostname")
ip_address = hostnames_dict.get("ip_address")
ss_data_dict['hostname'] = host_name
ss_data_dict['ipaddress'] = ip_address
ss_data_dict['site_name'] = site_name
if host_name not in down_and_unresponsive_devices:
for service in utilization_attributes:
ss_data_dict[service.get('service_name')] = memc_con.get(service.get('utilization_key')%(host_name))
all_ss_data.append(ss_data_dict.copy())
if len(all_ss_data) == 0:
logging.info("No data Fetched ! Aborting Successfully")
return 0
try:
#redis_hook_util_10.rpush("%s_%s"%(device_type,site_name),all_ss_data)
print "++++++++++++"
print site_name.split("_")[0]
redis_hook_util_10.rpush("%s_%s"%(device_type,site_name.split("_")[0]),all_ss_data)
except Exception:
logging.warning("Unable to insert ss data into redis")
#pprint(all_ss_data)
def calculate_utilization_data_ss(**kwargs):
machine_name = kwargs.get("params").get("machine_name")
device_type = kwargs.get("params").get("technology")
utilization_attributes = kwargs.get("params").get("attributes")
devices_data_dict = redis_hook_util_10.rget("%s_%s"%(device_type,machine_name))
if len(devices_data_dict) == 0:
logging.info("No Data found for ss %s "%(machine_name))
return 1
ss_data = []
for devices in devices_data_dict:
devices = eval(devices)
site_name = devices.get("site_name")
devices['site'] = site_name
devices['device_type'] = device_type
for service_attributes in utilization_attributes: #loop for the all the configured services
service = service_attributes.get('service_name')
if service_attributes.get('isKpi'):
if 'services' in devices.keys() and devices.get('services') != None:
devices.get('services').append(service)
elif service and devices.get('services') == None:
devices['services'] = [service]
else:
devices['services'] = []
if service_attributes.get('isKpi'):
utilization_type = service_attributes.get("utilization_type")
capacity = None
if "capacity" in service_attributes.keys():
capacity = service_attributes.get("capacity")
try:
formula = kpi_rules.get(service).get('formula')
devices[service] = eval(formula)
except Exception:
print "Exception in calculating data"
pass
else:
continue
#ip_ul_mapper[devices.get('ipaddress')] = devices
ss_data.append(devices.copy())
#ss_utilization_list.append(ip_ul_mapper.copy())
key="calculated_utilization_%s_%s"%(device_type,machine_name)
redis_hook_util_10.rpush(key,ss_data)
print "Setting ....."
print "calculated_utilization_%s_%s"%(device_type,machine_name)
#redis_hook_util_10.rpush("calculated_ss_utilization_kpi",ss_utilization_list)
def aggregate_utilization_data(*args,**kwargs):
print "Aggregating Data"
machine_name = kwargs.get("params").get("machine_name")
device_type = kwargs.get("params").get("technology")
#device_type = kwargs.get("params").get("device_type")
formatted_data=redis_hook_util_10.rget("formatted_util_%s_%s"%(device_type,machine_name))
machine_data = []
for site_data in formatted_data:
machine_data.append(eval(site_data))
redis_hook_util_10.set("aggregated_utilization_%s_%s"%(machine_name,device_type),str(machine_data))
machine_names = set([site.split("_")[0] for site in ss_tech_sites])
config_machines = set([site.split("_")[0] for site in config_sites])
aggregate_dependency_ss = {}
aggregate_dependency_bs = {}
calculate_task_list={}
format_task_list={}
#TODo Remove this if ss >> bs task
# calculate_utilization_lost_ss_bs_task = PythonOperator(
# task_id = "calculate_bs_utilization_lost_ss",
# provide_context=True,
# python_callable=calculate_utilization_data_bs,
# params={"lost_n_found":True},
# dag=utilization_kpi_subdag_dag
# )
for each_machine_name in machine_names:
if each_machine_name in config_machines:
aggregate_utilization_data_ss_task = PythonOperator(
task_id = "aggregate_utilization_ss_%s"%each_machine_name,
provide_context=True,
python_callable=aggregate_utilization_data,
params={"machine_name":each_machine_name,"technology":ss_name},
dag=utilization_kpi_subdag_dag,
queue = O7_CALC_Q,
trigger_rule = 'all_done'
)
aggregate_dependency_ss[each_machine_name] = aggregate_utilization_data_ss_task
calculate_utilization_data_ss_task = PythonOperator(
task_id = "calculate_ss_utilization_kpi_of_%s"%each_machine_name,
provide_context=True,
trigger_rule = 'all_done',
python_callable=calculate_utilization_data_ss,
params={"machine_name":each_machine_name,"technology":ss_name,'attributes':utilization_attributes},
dag=utilization_kpi_subdag_dag,
queue = O7_CALC_Q,
)
format_data_ss_task = PythonOperator(
task_id = "format_data_of_ss_%s"%each_machine_name,
provide_context=True,
python_callable=format_data,
trigger_rule = 'all_done',
params={"machine_name":each_machine_name,"technology":ss_name,'attributes':utilization_attributes},
dag=utilization_kpi_subdag_dag,
queue = celery_queue,
)
calculate_task_list[each_machine_name] = calculate_utilization_data_ss_task
calculate_utilization_data_ss_task >> format_data_ss_task
format_data_ss_task >> aggregate_utilization_data_ss_task
#we gotta create teh crazy queries WTF this is so unsafe
INSERT_QUERY = INSERT_HEADER%("nocout_"+each_machine_name) + INSERT_TAIL
UPDATE_QUERY = UPDATE_HEADER%("nocout_"+each_machine_name) + UPDATE_TAIL
INSERT_QUERY = INSERT_QUERY.replace('\n','')
UPDATE_QUERY = UPDATE_QUERY.replace('\n','')
#ss_name == Device_type
if not DEBUG:
insert_data_in_mysql = MySqlLoaderOperator(
task_id ="upload_data_%s"%(each_machine_name),
dag=utilization_kpi_subdag_dag,
query=INSERT_QUERY,
#data="",
redis_key="aggregated_utilization_%s_%s"%(each_machine_name,ss_name),
redis_conn_id = "redis_hook_util_10",
mysql_conn_id='mysql_uat',
queue = O7_CALC_Q,
trigger_rule = 'all_done'
)
update_data_in_mysql = MySqlLoaderOperator(
task_id ="update_data_%s"%(each_machine_name),
query=UPDATE_QUERY ,
#data="",
redis_key="aggregated_utilization_%s_%s"%(each_machine_name,ss_name),
redis_conn_id = "redis_hook_util_10",
mysql_conn_id='mysql_uat',
dag=utilization_kpi_subdag_dag,
queue = O7_CALC_Q,
trigger_rule = 'all_done'
)
update_data_in_mysql << aggregate_utilization_data_ss_task
insert_data_in_mysql << aggregate_utilization_data_ss_task
db_list=[]
for each_site_name in ss_tech_sites:
if each_site_name in config_sites:
machine = each_site_name.split("_")[0]
get_required_data_ss_task = PythonOperator(
task_id = "get_utilization_data_of_ss_%s"%each_site_name,
provide_context=True,
trigger_rule = 'all_done',
python_callable=get_required_data_ss,
params={"site_name":each_site_name,"technology":ss_name,'attributes':utilization_attributes},
dag=utilization_kpi_subdag_dag,
queue = celery_queue
)
get_required_data_ss_task >> calculate_task_list.get(machine)
#calculate_utilization_data_ss_task >> format_data_ss_task
#calculate_utilization_data_ss_task >> calculate_utilization_data_bs_task
# try:
# aggregate_dependency_ss[machine_name] << format_data_ss_task
# except:
# logging.info("Site Not Found %s"%(machine_name))
# pass
else:
logging.info("Skipping %s"%(each_site_name))
return utilization_kpi_subdag_dag
| |
# -*- coding: utf-8 -*-
"""
Production Configurations
- Use djangosecure
- Use Amazon's S3 for storing static files and uploaded media
- Use mailgun to send emails
- Use Redis on Heroku
- Use sentry for error logging
- Use opbeat for error reporting
"""
from __future__ import absolute_import, unicode_literals
from boto.s3.connection import OrdinaryCallingFormat
from django.utils import six
import logging
from .common import * # noqa
# SECRET CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Raises ImproperlyConfigured exception if DJANGO_SECRET_KEY not in os.environ
SECRET_KEY = env('DJANGO_SECRET_KEY')
# This ensures that Django will be able to detect a secure connection
# properly on Heroku.
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# django-secure
# ------------------------------------------------------------------------------
INSTALLED_APPS += ('djangosecure', )
# raven sentry client
# See https://docs.getsentry.com/hosted/clients/python/integrations/django/
INSTALLED_APPS += ('raven.contrib.django.raven_compat', )
SECURITY_MIDDLEWARE = (
'djangosecure.middleware.SecurityMiddleware',
)
RAVEN_MIDDLEWARE = ('raven.contrib.django.raven_compat.middleware.Sentry404CatchMiddleware',
'raven.contrib.django.raven_compat.middleware.SentryResponseErrorIdMiddleware',)
MIDDLEWARE_CLASSES = SECURITY_MIDDLEWARE + \
RAVEN_MIDDLEWARE + MIDDLEWARE_CLASSES
# opbeat integration
# See https://opbeat.com/languages/django/
INSTALLED_APPS += ('opbeat.contrib.django',)
OPBEAT = {
'ORGANIZATION_ID': env('DJANGO_OPBEAT_ORGANIZATION_ID'),
'APP_ID': env('DJANGO_OPBEAT_APP_ID'),
'SECRET_TOKEN': env('DJANGO_OPBEAT_SECRET_TOKEN')
}
MIDDLEWARE_CLASSES = (
'opbeat.contrib.django.middleware.OpbeatAPMMiddleware',
) + MIDDLEWARE_CLASSES
# set this to 60 seconds and then to 518400 when you can prove it works
SECURE_HSTS_SECONDS = 60
SECURE_HSTS_INCLUDE_SUBDOMAINS = env.bool(
'DJANGO_SECURE_HSTS_INCLUDE_SUBDOMAINS', default=True)
SECURE_FRAME_DENY = env.bool('DJANGO_SECURE_FRAME_DENY', default=True)
SECURE_CONTENT_TYPE_NOSNIFF = env.bool(
'DJANGO_SECURE_CONTENT_TYPE_NOSNIFF', default=True)
SECURE_BROWSER_XSS_FILTER = True
SESSION_COOKIE_SECURE = False
SESSION_COOKIE_HTTPONLY = True
SECURE_SSL_REDIRECT = env.bool('DJANGO_SECURE_SSL_REDIRECT', default=True)
# SITE CONFIGURATION
# ------------------------------------------------------------------------------
# Hosts/domain names that are valid for this site
# See https://docs.djangoproject.com/en/1.6/ref/settings/#allowed-hosts
ALLOWED_HOSTS = env.list('DJANGO_ALLOWED_HOSTS', default=['example.com'])
# END SITE CONFIGURATION
INSTALLED_APPS += ('gunicorn', )
# STORAGE CONFIGURATION
# ------------------------------------------------------------------------------
# Uploaded Media Files
# ------------------------
# See: http://django-storages.readthedocs.org/en/latest/index.html
INSTALLED_APPS += (
'storages',
)
AWS_ACCESS_KEY_ID = env('DJANGO_AWS_ACCESS_KEY_ID')
AWS_SECRET_ACCESS_KEY = env('DJANGO_AWS_SECRET_ACCESS_KEY')
AWS_STORAGE_BUCKET_NAME = env('DJANGO_AWS_STORAGE_BUCKET_NAME')
AWS_AUTO_CREATE_BUCKET = True
AWS_QUERYSTRING_AUTH = False
AWS_S3_CALLING_FORMAT = OrdinaryCallingFormat()
# AWS cache settings, don't change unless you know what you're doing:
AWS_EXPIRY = 60 * 60 * 24 * 7
# TODO See: https://github.com/jschneier/django-storages/issues/47
# Revert the following and use str after the above-mentioned bug is fixed in
# either django-storage-redux or boto
AWS_HEADERS = {
'Cache-Control': six.b('max-age=%d, s-maxage=%d, must-revalidate' % (
AWS_EXPIRY, AWS_EXPIRY))
}
# URL that handles the media served from MEDIA_ROOT, used for managing
# stored files.
# See:http://stackoverflow.com/questions/10390244/
from storages.backends.s3boto import S3BotoStorage
StaticRootS3BotoStorage = lambda: S3BotoStorage(location='static')
MediaRootS3BotoStorage = lambda: S3BotoStorage(location='media')
DEFAULT_FILE_STORAGE = 'config.settings.production.MediaRootS3BotoStorage'
MEDIA_URL = 'https://s3.amazonaws.com/%s/media/' % AWS_STORAGE_BUCKET_NAME
# Static Assets
# ------------------------
STATIC_URL = 'https://s3.amazonaws.com/%s/static/' % AWS_STORAGE_BUCKET_NAME
STATICFILES_STORAGE = 'config.settings.production.StaticRootS3BotoStorage'
# See: https://github.com/antonagestam/collectfast
# For Django 1.7+, 'collectfast' should come before
# 'django.contrib.staticfiles'
AWS_PRELOAD_METADATA = True
INSTALLED_APPS = ('collectfast', ) + INSTALLED_APPS
# EMAIL
# ------------------------------------------------------------------------------
DEFAULT_FROM_EMAIL = env('DJANGO_DEFAULT_FROM_EMAIL',
default='tunza <noreply@example.com>')
EMAIL_BACKEND = 'django_mailgun.MailgunBackend'
MAILGUN_ACCESS_KEY = env('DJANGO_MAILGUN_API_KEY')
MAILGUN_SERVER_NAME = env('DJANGO_MAILGUN_SERVER_NAME')
EMAIL_SUBJECT_PREFIX = env('DJANGO_EMAIL_SUBJECT_PREFIX', default='[tunza] ')
SERVER_EMAIL = env('DJANGO_SERVER_EMAIL', default=DEFAULT_FROM_EMAIL)
NEW_RELIC_LICENSE_KEY = env('NEW_RELIC_LICENSE_KEY')
NEW_RELIC_APP_NAME = env('NEW_RELIC_APP_NAME')
# TEMPLATE CONFIGURATION
# ------------------------------------------------------------------------------
# See:
# https://docs.djangoproject.com/en/dev/ref/templates/api/#django.template.loaders.cached.Loader
TEMPLATES[0]['OPTIONS']['loaders'] = [
('django.template.loaders.cached.Loader', [
'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader', ]),
]
# DATABASE CONFIGURATION
# ------------------------------------------------------------------------------
# Raises ImproperlyConfigured exception if DATABASE_URL not in os.environ
DATABASES['default'] = env.db('DATABASE_URL')
# CACHING
# ------------------------------------------------------------------------------
# Heroku URL does not pass the DB number, so we parse it in
CACHES = {
'default': {
'BACKEND': 'django_redis.cache.RedisCache',
'LOCATION': '{0}/{1}'.format(env('REDIS_URL', default='redis://127.0.0.1:6379'), 0),
'OPTIONS': {
'CLIENT_CLASS': 'django_redis.client.DefaultClient',
'IGNORE_EXCEPTIONS': True, # mimics memcache behavior.
# http://niwinz.github.io/django-redis/latest/#_memcached_exceptions_behavior
}
}
}
# Sentry Configuration
SENTRY_DSN = env('DJANGO_SENTRY_DSN')
SENTRY_CLIENT = env('DJANGO_SENTRY_CLIENT', default='raven.contrib.django.raven_compat.DjangoClient')
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'root': {
'level': 'WARNING',
'handlers': ['sentry'],
},
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s '
'%(process)d %(thread)d %(message)s'
},
},
'handlers': {
'sentry': {
'level': 'ERROR',
'class': 'raven.contrib.django.raven_compat.handlers.SentryHandler',
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose'
}
},
'loggers': {
'django.db.backends': {
'level': 'ERROR',
'handlers': ['console'],
'propagate': False,
},
'raven': {
'level': 'DEBUG',
'handlers': ['console'],
'propagate': False,
},
'sentry.errors': {
'level': 'DEBUG',
'handlers': ['console'],
'propagate': False,
},
'django.security.DisallowedHost': {
'level': 'ERROR',
'handlers': ['console', 'sentry'],
'propagate': False,
},
},
}
SENTRY_CELERY_LOGLEVEL = env.int('DJANGO_SENTRY_LOG_LEVEL', logging.INFO)
RAVEN_CONFIG = {
'CELERY_LOGLEVEL': env.int('DJANGO_SENTRY_LOG_LEVEL', logging.INFO),
'DSN': SENTRY_DSN
}
# Custom Admin URL, use {% url 'admin:index' %}
ADMIN_URL = env('DJANGO_ADMIN_URL')
# Your production stuff: Below this line define 3rd party library settings
| |
from __future__ import absolute_import
from ..spec.v2_0.objects import Parameter, Operation, Schema
from ..utils import deref, final, from_iso8601
from decimal import Decimal
import random
import six
import sys
import string
import uuid
import base64
import datetime
import time
# TODO: patternProperties
# TODO: pattern
# TODO: binary
# TODO: enum of object, array
# min/max of integer
maxInt32 = 1 << 31 - 1
minInt32 = -maxInt32
maxInt64 = 1 << 63 - 1
minInt64 = -maxInt64
def _int_(obj, _, val=None):
if val:
return val
max_ = maxInt32 if getattr(obj, 'format') in ['int32', None] else maxInt64
max_ = obj.maximum if obj.maximum else max_
max_ = max_-1 if obj.exclusiveMaximum else max_
min_ = minInt32 if getattr(obj, 'format') in ['int32', None] else minInt64
min_ = obj.minimum if obj.minimum else min_
min_ = min_+1 if obj.exclusiveMinimum else min_
out = random.randint(min_, max_)
return out - (out % obj.multipleOf) if isinstance(obj.multipleOf, six.integer_types) and obj.multipleOf != 0 else out
def _float_(obj, _, val=None):
if val:
return val
# TODO: exclusiveMaximum == False is not implemented.
max_ = obj.maximum if obj.maximum else sys.float_info.max
min_ = obj.minimum if obj.minimum else sys.float_info.min
out = None
while out == None:
out = min_ + (max_ - min_) * random.random()
if obj.multipleOf and obj.multipleOf != 0:
out = int(out / obj.multipleOf) * obj.multipleOf
if out <= min_ and obj.exclusiveMinimum:
out = None
return float(out)
def _str_(obj, opt, val=None):
if val:
return val
max_ = obj.maxLength if obj.maxLength else opt['max_str_length']
min_ = obj.minLength if obj.minLength else 0
# note: length is 0~100, characters are limited to ASCII
return ''.join([random.choice(string.ascii_letters) for _ in range(random.randint(min_, max_))])
def _bool_(obj, _, val=None):
return bool(val) if val else random.randint(0, 1) == 0
def _uuid_(obj, _, val=None):
return uuid.UUID(val) if val else uuid.uuid4()
names = list(string.ascii_letters) + ['_', '-'] + list(string.digits)
def _email_name_():
return random.choice(string.ascii_letters) \
+ ''.join([random.choice(names) for _ in six.moves.xrange(random.randint(1, 30))]) \
+ random.choice(string.ascii_letters)
def _email_(obj, _, val=None):
if val:
return val
host_length = random.randint(2, 100)
region_length = random.randint(2, 30)
return '.'.join([_email_name_() for _ in six.moves.xrange(random.randint(1, 4))]) \
+ '@' \
+ random.choice(string.ascii_letters) \
+ ''.join([random.choice(names) for _ in six.moves.xrange(host_length)]) \
+ '.' \
+ random.choice(string.ascii_letters) \
+ ''.join([random.choice(names) for _ in six.moves.xrange(region_length)])
def _byte_(obj, opt, val=None):
return val if val else base64.b64encode(
six.b(''.join([random.choice(string.ascii_letters) for _ in range(random.randint(0, opt['max_byte_length']))]))
)
max_date = time.mktime(datetime.date(2038, 1, 19).timetuple())
min_date = time.mktime(datetime.date(1970, 1, 2).timetuple())
def _date_(obj, _, val=None):
return from_iso8601(val).date() if val else datetime.date.fromtimestamp(
random.uniform(min_date, max_date)
)
max_datetime = time.mktime(datetime.datetime(2038, 1, 19).utctimetuple())
min_datetime = time.mktime(datetime.datetime(1970, 1, 2).utctimetuple())
def _date_time_(obj, _, val=None):
return from_iso8601(val) if val else datetime.datetime.utcfromtimestamp(
random.uniform(min_datetime, max_datetime)
)
def _file_(obj, opt, _):
if len(opt['files'] or []) > 0:
return random.choice(opt['files'])
return dict(
header={
'Content-Type': 'text/plain',
'Content-Transfer-Encoding': 'binary'
},
filename='',
data=six.moves.cStringIO(
''.join([random.choice(string.ascii_letters) for _ in range(random.randint(0, opt['max_file_length']))])
)
)
class Renderer(object):
"""
"""
def __init__(self):
"""
"""
random.seed()
# init map of generators
self._map = {
'integer': {
'': _int_,
None: _int_,
'int32': _int_,
'int64': _int_,
},
'number': {
'': _float_,
None: _float_,
'float': _float_,
'double': _float_,
},
'string': {
'': _str_,
None: _str_,
'password': _str_,
'byte': _byte_,
'date': _date_,
'date-time': _date_time_,
'uuid': _uuid_,
'email': _email_,
},
'boolean': {
'': _bool_,
None: _bool_,
},
'file': {
'': _file_,
None: _file_,
}
}
def _get(self, _type, _format=None):
r = self._map.get(_type, None)
return None if r == None else r.get(_format, None)
def _generate(self, obj, opt):
obj = final(deref(obj))
type_ = getattr(obj, 'type', None)
template = opt['object_template']
out = None
if type_ == 'object':
max_p = opt['max_property']
out = {}
max_ = obj.maxProperties if obj.maxProperties else opt['max_prop_count']
min_ = obj.minProperties if obj.minProperties else None
count = 0
for name, prop in six.iteritems(obj.properties or {}):
if name in template:
out[name] = template[name]
continue
if not max_p and not name in obj.required:
if random.randint(0, 1) == 0 or opt['minimal_property']:
continue
out[name] = self._generate(prop, opt)
count = count + 1
if isinstance(obj.additionalProperties, Schema):
# TODO: additionalProperties == True is not handled
# generate random properties
more = random.randint(min_, max_) - count
if more > 0:
# generate a random string as property-name
for _ in six.moves.xrange(more):
while True:
length = random.randint(0, opt['max_name_length'])
name = ''.join([random.choice(string.ascii_letters) for _ in six.moves.xrange(length)])
if name not in out:
out[name] = self._generate(obj.additionalProperties, opt)
break
elif type_ == 'array':
min_ = obj.minItems if obj.minItems else 0
max_ = obj.maxItems if obj.maxItems else opt['max_array_length']
out = []
for _ in six.moves.xrange(random.randint(min_, max_)):
out.append(self._generate(obj.items, opt))
elif type_ != None:
out = None
if len(obj.enum or []) > 0:
out = random.choice(obj.enum)
g = self._get(getattr(obj, 'type', None), getattr(obj, 'format', None))
if not g:
raise Exception('Unable to locate generator: {0}'.format(obj))
out = g(obj, opt, out)
else:
raise Exception('No type info available:{0}, {1}'.format(obj.type, obj.format))
return out
@staticmethod
def default():
""" return default options, available options:
- max_name_length: maximum length of name for additionalProperties
- max_prop_count: maximum count of properties (count of fixed properties + additional properties)
- max_str_length: maximum length of string type
- max_byte_length: maximum length of byte type
- max_array_length: maximum length of array
- max_file_length: maximum length of file, in byte
- minimal_property: only generate 'required' properties
- minimal_parameter: only generate 'required' parameter
- files: registered file object: refer to pyswagger.primitives.File for details
- object_template: dict of default values assigned for properties when 'name' matched
- parameter_template: dict of default values assigned for parameters when 'name matched
- max_property: all properties are generated, ignore 'required'
- max_parameter: all parameters are generated, ignore 'required'
:return: options
:rtype: dict
"""
return dict(
max_name_length=64,
max_prop_count=32,
max_str_length=100,
max_byte_length=100,
max_array_length=100,
max_file_length=200,
minimal_property=False,
minimal_parameter=False,
files=[],
object_template={},
parameter_template={},
max_property=False,
max_parameter=False,
)
def render(self, obj, opt=None):
""" render a Schema/Parameter
:param obj Schema/Parameter: the swagger spec object
:param opt dict: render option
:return: values that can be passed to Operation.__call__
:rtype: depends on type of 'obj'
"""
opt = self.default() if opt == None else opt
if not isinstance(opt, dict):
raise ValueError('Not a dict: {0}'.format(opt))
if isinstance(obj, Parameter):
if getattr(obj, 'in', None) == 'body':
return self._generate(obj.schema, opt)
return self._generate(obj, opt=opt)
elif isinstance(obj, Schema):
return self._generate(obj, opt)
else:
raise ValueError('Not a Schema/Parameter: {0}'.format(obj))
def render_all(self, op, exclude=[], opt=None):
""" render a set of parameter for an Operation
:param op Operation: the swagger spec object
:param opt dict: render option
:return: a set of parameters that can be passed to Operation.__call__
:rtype: dict
"""
opt = self.default() if opt == None else opt
if not isinstance(op, Operation):
raise ValueError('Not a Operation: {0}'.format(op))
if not isinstance(opt, dict):
raise ValueError('Not a dict: {0}'.format(opt))
template = opt['parameter_template']
max_p = opt['max_parameter']
out = {}
for p in op.parameters:
if p.name in exclude:
continue
if p.name in template:
out.update({p.name: template[p.name]})
continue
if not max_p and not p.required:
if random.randint(0, 1) == 0 or opt['minimal_parameter']:
continue
out.update({p.name: self.render(p, opt=opt)})
return out
| |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Helper functions to add support for magnitude-based model pruning.
# Adds variables and ops to the graph to enable
# elementwise masking of weights
apply_mask(weights)
# Returns a list containing the sparsity of each of the weight tensors
get_weight_sparsity()
# Returns a list of all the masked weight tensorflow variables
get_masked_weights()
# Returns a list of all the mask tensorflow variables
get_masks()
# Returns a list of all the thresholds
get_thresholds()
# Returns a list of all the weight tensors that have been masked
get_weights()
The Pruning class uses a tf.hparams object to set up the
parameters for a model pruning. Here's a typical usage:
# Parse pruning hyperparameters
pruning_hparams = pruning.get_pruning_hparams().parse(FLAGS.pruning_hparams)
# Create a pruning object using the pruning_hparams
p = pruning.Pruning(pruning_hparams)
# Add mask update ops to the graph
mask_update_op = p.conditional_mask_update_op()
# Add the summaries
p.add_pruning_summaries()
# Run the op
session.run(mask_update_op)
# An object of the pruning also accepts externally defined sparsity:
sparsity = tf.Variable(0.5, name = "ConstantSparsity")
p = pruning.Pruning(pruning_hparams, sparsity=sparsity)
"""
# pylint: disable=missing-docstring
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.model_pruning.python import pruning_utils
from tensorflow.contrib.model_pruning.python.layers import core_layers as core
from tensorflow.contrib.training.python.training import hparam
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_impl
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import summary
from tensorflow.python.training import training_util
_MASK_COLLECTION = core.MASK_COLLECTION
_THRESHOLD_COLLECTION = core.THRESHOLD_COLLECTION
_MASKED_WEIGHT_COLLECTION = core.MASKED_WEIGHT_COLLECTION
_WEIGHT_COLLECTION = core.WEIGHT_COLLECTION
_MASKED_WEIGHT_NAME = core.MASKED_WEIGHT_NAME
def apply_mask(x, scope=''):
"""Apply mask to a given weight tensor.
Args:
x: Input weight tensor
scope: The current variable scope. Defaults to "".
Returns:
Tensor representing masked_weights
"""
mask = pruning_utils.weight_mask_variable(x, scope)
threshold = pruning_utils.weight_threshold_variable(x, scope)
# Add masked_weights in the weights namescope so as to make it easier
# for the quantization library to add quant ops.
masked_weights = math_ops.multiply(mask, x, _MASKED_WEIGHT_NAME)
# Make sure the mask for a given variable are not added multiple times to the
# collection. This is particularly important when applying mask to RNN's
# weight variables
if mask not in ops.get_collection_ref(_MASK_COLLECTION):
ops.add_to_collection(_THRESHOLD_COLLECTION, threshold)
ops.add_to_collection(_MASK_COLLECTION, mask)
ops.add_to_collection(_MASKED_WEIGHT_COLLECTION, masked_weights)
ops.add_to_collection(_WEIGHT_COLLECTION, x)
return masked_weights
def get_masked_weights():
return ops.get_collection(_MASKED_WEIGHT_COLLECTION)
def get_masks():
return ops.get_collection(_MASK_COLLECTION)
def get_thresholds():
return ops.get_collection(_THRESHOLD_COLLECTION)
def get_weights():
return ops.get_collection(_WEIGHT_COLLECTION)
def get_weight_sparsity():
"""Get sparsity of the weights.
Args:
None
Returns:
A list containing the sparsity of each of the weight tensors
"""
masks = get_masks()
return [nn_impl.zero_fraction(mask) for mask in masks]
def get_pruning_hparams():
"""Get a tf.HParams object with the default values for the hyperparameters.
name: string
name of the pruning specification. Used for adding summaries and ops under
a common tensorflow name_scope
begin_pruning_step: integer
the global step at which to begin pruning
end_pruning_step: integer
the global step at which to terminate pruning. Defaults to -1 implying
that pruning continues till the training stops
do_not_prune: list of strings
list of layers that are not pruned
threshold_decay: float
the decay factor to use for exponential decay of the thresholds
pruning_frequency: integer
How often should the masks be updated? (in # of global_steps)
nbins: integer
number of bins to use for histogram computation
block_height: integer
number of rows in a block (defaults to 1)
block_width: integer
number of cols in a block (defaults to 1)
block_pooling_function: string
Whether to perform average (AVG) or max (MAX) pooling in the block
(default: AVG)
initial_sparsity: float
initial sparsity value
target_sparsity: float
target sparsity value
sparsity_function_begin_step: integer
the global step at this which the gradual sparsity function begins to
take effect
sparsity_function_end_step: integer
the global step used as the end point for the gradual sparsity function
sparsity_function_exponent: float
exponent = 1 is linearly varying sparsity between initial and final.
exponent > 1 varies more slowly towards the end than the beginning
use_tpu: False
Indicates whether to use TPU
We use the following sparsity function:
num_steps = (sparsity_function_end_step -
sparsity_function_begin_step)/pruning_frequency
sparsity(step) = (initial_sparsity - target_sparsity)*
[1-step/(num_steps -1)]**exponent + target_sparsity
Args:
None
Returns:
tf.HParams object initialized to default values
"""
return hparam.HParams(
name='model_pruning',
begin_pruning_step=0,
end_pruning_step=-1,
do_not_prune=[''],
threshold_decay=0.9,
pruning_frequency=10,
nbins=256,
block_height=1,
block_width=1,
block_pooling_function='AVG',
initial_sparsity=0,
target_sparsity=0.5,
sparsity_function_begin_step=0,
sparsity_function_end_step=100,
sparsity_function_exponent=3,
use_tpu=False)
class Pruning(object):
def __init__(self, spec=None, global_step=None, sparsity=None):
"""Set up the specification for model pruning.
If a spec is provided, the sparsity is set up based on the sparsity_function
in the spec. The effect of sparsity_function is overridden if the sparsity
variable is passed to the constructor. This enables setting up arbitrary
sparsity profiles externally and passing it to this pruning functions.
Args:
spec: Pruning spec as defined in pruning.proto
global_step: A tensorflow variable that is used while setting up the
sparsity function
sparsity: A tensorflow scalar variable storing the sparsity
"""
# Pruning specification
self._spec = spec if spec else get_pruning_hparams()
# A tensorflow variable that tracks the sparsity function.
# If not provided as input, the graph must already contain the global_step
# variable before calling this constructor.
self._global_step = self._setup_global_step(global_step)
# Stores the tensorflow sparsity variable.
# Built using self._setup_sparsity() or provided externally
self._sparsity = sparsity if sparsity else self._setup_sparsity()
# List of tensorflow assignments ops for new masks and thresholds
self._assign_ops = []
# Tensorflow variable keeping track of the last global step when the masks
# were updated
self._last_update_step = self._setup_last_update_step()
# Block dimensions
self._block_dim = [self._spec.block_height, self._spec.block_width]
# Block pooling function
self._block_pooling_function = self._spec.block_pooling_function
def _setup_global_step(self, global_step):
graph_global_step = global_step
if graph_global_step is None:
graph_global_step = training_util.get_global_step()
return math_ops.cast(graph_global_step, dtypes.int32)
def _setup_sparsity(self):
begin_step = self._spec.sparsity_function_begin_step
end_step = self._spec.sparsity_function_end_step
initial_sparsity = self._spec.initial_sparsity
target_sparsity = self._spec.target_sparsity
exponent = self._spec.sparsity_function_exponent
if begin_step >= end_step:
raise ValueError(
'Pruning must begin before it can end. begin_step=%d, end_step=%d' %
(begin_step, end_step))
with ops.name_scope(self._spec.name):
p = math_ops.minimum(
1.0,
math_ops.maximum(
0.0,
math_ops.div(
math_ops.cast(self._global_step - begin_step, dtypes.float32),
end_step - begin_step)))
sparsity = math_ops.add(
math_ops.multiply(initial_sparsity - target_sparsity,
math_ops.pow(1 - p, exponent)),
target_sparsity,
name='sparsity')
return sparsity
def _setup_last_update_step(self):
with variable_scope.variable_scope(
self._spec.name, use_resource=self._spec.use_tpu) as scope:
try:
last_update_step = variable_scope.get_variable(
'last_mask_update_step', [],
initializer=init_ops.zeros_initializer(),
trainable=False,
dtype=dtypes.int32)
except ValueError:
scope.reuse_variables()
last_update_step = variable_scope.get_variable(
'last_mask_update_step', dtype=dtypes.int32)
return last_update_step
def _exists_in_do_not_prune_list(self, tensor_name):
do_not_prune_list = self._spec.do_not_prune
if not do_not_prune_list[0]:
return False
for layer_name in do_not_prune_list:
if tensor_name.find(layer_name) != -1:
return True
return False
def _update_mask(self, weights, threshold):
"""Updates the mask for a given weight tensor.
This functions first computes the cdf of the weight tensor, and estimates
the threshold value such that 'desired_sparsity' fraction of weights
have magnitude less than the threshold.
Args:
weights: The weight tensor that needs to be masked.
threshold: The current threshold value. The function will compute a new
threshold and return the exponential moving average using the current
value of threshold
Returns:
new_threshold: The new value of the threshold based on weights, and
sparsity at the current global_step
new_mask: A numpy array of the same size and shape as weights containing
0 or 1 to indicate which of the values in weights falls below
the threshold
Raises:
ValueError: if sparsity is not defined
"""
if self._sparsity is None:
raise ValueError('Sparsity variable undefined')
with ops.name_scope(weights.op.name + '_pruning_ops'):
abs_weights = math_ops.abs(weights)
max_value = math_ops.reduce_max(abs_weights)
cdf_fn = pruning_utils.compute_cdf_from_histogram
if self._spec.use_tpu:
cdf_fn = pruning_utils.compute_cdf
norm_cdf = cdf_fn(abs_weights, [0.0, max_value], nbins=self._spec.nbins)
current_threshold = math_ops.multiply(
math_ops.div(
math_ops.reduce_sum(
math_ops.cast(
math_ops.less(norm_cdf, self._sparsity), dtypes.float32)),
float(self._spec.nbins)), max_value)
smoothed_threshold = math_ops.add_n([
math_ops.multiply(current_threshold, 1 - self._spec.threshold_decay),
math_ops.multiply(threshold, self._spec.threshold_decay)
])
new_mask = math_ops.cast(
math_ops.greater(abs_weights, smoothed_threshold), dtypes.float32)
return smoothed_threshold, new_mask
def _maybe_update_block_mask(self, weights, threshold):
"""Performs block-granular masking of the weights.
Block pruning occurs only if the block_height or block_width is > 1 and
if the weight tensor, when squeezed, has ndims = 2. Otherwise, elementwise
pruning occurs.
Args:
weights: The weight tensor that needs to be masked.
threshold: The current threshold value. The function will compute a new
threshold and return the exponential moving average using the current
value of threshold
Returns:
new_threshold: The new value of the threshold based on weights, and
sparsity at the current global_step
new_mask: A numpy array of the same size and shape as weights containing
0 or 1 to indicate which of the values in weights falls below
the threshold
Raises:
ValueError: if block pooling function is not AVG or MAX
"""
squeezed_weights = array_ops.squeeze(weights)
if squeezed_weights.get_shape().ndims != 2 or self._block_dim == [1, 1]:
return self._update_mask(weights, threshold)
if self._block_pooling_function not in ['AVG', 'MAX']:
raise ValueError('Unknown pooling function for block sparsity: %s' %
self._block_pooling_function)
with ops.name_scope(weights.op.name + '_pruning_ops'):
abs_weights = math_ops.abs(squeezed_weights)
pool_window = [self._block_dim[0], self._block_dim[1]]
pool_fn = pruning_utils.factorized_pool
if not self._spec.use_tpu:
pool_fn = nn_ops.pool
abs_weights = array_ops.reshape(
abs_weights,
[1, abs_weights.get_shape()[0],
abs_weights.get_shape()[1], 1])
pooled_weights = pool_fn(
abs_weights,
window_shape=pool_window,
pooling_type=self._block_pooling_function,
strides=pool_window,
padding='SAME',
name=weights.op.name + '_pooled')
if pooled_weights.get_shape().ndims != 2:
pooled_weights = array_ops.squeeze(pooled_weights)
smoothed_threshold, new_mask = self._update_mask(pooled_weights,
threshold)
updated_mask = pruning_utils.kronecker_product(
new_mask, array_ops.ones(self._block_dim))
sliced_mask = array_ops.slice(
updated_mask, [0, 0],
[squeezed_weights.get_shape()[0],
squeezed_weights.get_shape()[1]])
return smoothed_threshold, array_ops.reshape(sliced_mask,
array_ops.shape(weights))
def _get_mask_assign_ops(self):
# Make sure the assignment ops have not already been added to the list
if self._assign_ops:
raise ValueError(
'Assign op list not empty. _get_mask_assign_ops() called twice?')
masks = get_masks()
weights = get_weights()
thresholds = get_thresholds()
if len(masks) != len(thresholds):
raise ValueError(
'Number of masks %s and number of thresholds %s mismatch' %
(len(masks), len(thresholds)))
for index, mask in enumerate(masks):
threshold = thresholds[index]
weight = weights[index]
is_partitioned = isinstance(weight, variables.PartitionedVariable)
if is_partitioned:
weight = weight.as_tensor()
if self._spec.do_not_prune:
if self._exists_in_do_not_prune_list(mask.name):
continue
new_threshold, new_mask = self._maybe_update_block_mask(weight, threshold)
self._assign_ops.append(
pruning_utils.variable_assign(threshold, new_threshold))
self._assign_ops.append(
pruning_utils.partitioned_variable_assign(mask, new_mask)
if is_partitioned else pruning_utils.variable_assign(mask, new_mask))
def mask_update_op(self):
with ops.name_scope(self._spec.name):
if not self._assign_ops:
self._get_mask_assign_ops()
with ops.control_dependencies([
state_ops.assign(
self._last_update_step,
self._global_step,
name='last_mask_update_step_assign')
]):
with ops.control_dependencies(self._assign_ops):
logging.info('Updating masks.')
return control_flow_ops.no_op('mask_update')
def conditional_mask_update_op(self):
def maybe_update_masks():
with ops.name_scope(self._spec.name):
is_step_within_pruning_range = math_ops.logical_and(
math_ops.greater_equal(self._global_step,
self._spec.begin_pruning_step),
# If end_pruning_step is negative, keep pruning forever!
math_ops.logical_or(
math_ops.less_equal(self._global_step,
self._spec.end_pruning_step),
math_ops.less(self._spec.end_pruning_step, 0)))
is_pruning_step = math_ops.less_equal(
math_ops.add(self._last_update_step, self._spec.pruning_frequency),
self._global_step)
return math_ops.logical_and(is_step_within_pruning_range,
is_pruning_step)
def mask_update_op():
return self.mask_update_op()
def no_update_op():
return control_flow_ops.no_op()
return control_flow_ops.cond(maybe_update_masks(), mask_update_op,
no_update_op)
def add_pruning_summaries(self):
"""Adds summaries for this pruning spec.
Args: none
Returns: none
"""
with ops.name_scope(self._spec.name + '_summaries'):
summary.scalar('sparsity', self._sparsity)
summary.scalar('last_mask_update_step', self._last_update_step)
masks = get_masks()
thresholds = get_thresholds()
for mask, threshold in zip(masks, thresholds):
if not self._exists_in_do_not_prune_list(mask.name):
summary.scalar(mask.op.name + '/sparsity',
nn_impl.zero_fraction(mask))
summary.scalar(threshold.op.name + '/threshold', threshold)
def print_hparams(self):
logging.info(self._spec.to_json())
| |
# Copyright 2013 Openstack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import datetime
import mock
import netaddr
from neutron.common import exceptions
from quark.db import models
from quark import exceptions as quark_exceptions
from quark.plugin_modules import floating_ips
from quark.tests import test_quark_plugin
class TestRemoveFloatingIPs(test_quark_plugin.TestQuarkPlugin):
@contextlib.contextmanager
def _stubs(self, flip=None):
flip_model = None
if flip:
flip_model = models.IPAddress()
flip_model.update(flip)
with contextlib.nested(
mock.patch("quark.db.api.floating_ip_find"),
mock.patch("quark.ipam.QuarkIpam.deallocate_ip_address"),
mock.patch("quark.drivers.unicorn_driver.UnicornDriver"
".remove_floating_ip")
) as (flip_find, mock_dealloc, mock_remove_flip):
flip_find.return_value = flip_model
yield
def test_delete_floating_by_ip_address_id(self):
flip = dict(id=1, address=3232235876, address_readable="192.168.1.100",
subnet_id=1, network_id=2, version=4, used_by_tenant_id=1,
network=dict(ipam_strategy="ANY"))
with self._stubs(flip=flip):
self.plugin.delete_floatingip(self.context, 1)
def test_delete_floating_by_when_ip_address_does_not_exists_fails(self):
with self._stubs():
with self.assertRaises(quark_exceptions.FloatingIpNotFound):
self.plugin.delete_floatingip(self.context, 1)
class TestFloatingIPUtilityMethods(test_quark_plugin.TestQuarkPlugin):
def test_get_next_available_fixed_ip_with_single_fixed_ip(self):
port = models.Port()
port.update(dict(id=1))
fixed_ip_addr = netaddr.IPAddress('192.168.0.1')
fixed_ip = models.IPAddress()
fixed_ip.update(dict(address_type="fixed", address=int(fixed_ip_addr),
version=4, address_readable=str(fixed_ip_addr),
allocated_at=datetime.datetime.now()))
port.ip_addresses.append(fixed_ip)
next_fixed_ip = floating_ips._get_next_available_fixed_ip(port)
self.assertEqual(next_fixed_ip["address_readable"], '192.168.0.1')
def test_get_next_available_fixed_ip_with_mult_fixed_ips(self):
port = models.Port()
port.update(dict(id=1))
for ip_addr in ["192.168.0.1", "192.168.0.2", "192.168.0.3"]:
fixed_ip_addr = netaddr.IPAddress(ip_addr)
fixed_ip = models.IPAddress()
fixed_ip.update(dict(address_type="fixed",
address=int(fixed_ip_addr),
version=4,
address_readable=str(fixed_ip_addr),
allocated_at=datetime.datetime.now()))
port.ip_addresses.append(fixed_ip)
next_fixed_ip = floating_ips._get_next_available_fixed_ip(port)
self.assertEqual(next_fixed_ip["address_readable"], '192.168.0.1')
def test_get_next_available_fixed_ip_with_no_avail_fixed_ips(self):
port = models.Port()
port.update(dict(id=1))
fixed_ip_addr = netaddr.IPAddress("192.168.0.1")
fixed_ip = models.IPAddress()
fixed_ip.update(dict(address_type="fixed",
address=int(fixed_ip_addr),
version=4,
address_readable=str(fixed_ip_addr),
allocated_at=datetime.datetime.now()))
flip_addr = netaddr.IPAddress("10.0.0.1")
flip = models.IPAddress()
flip.update(dict(address_type="floating",
address=int(flip_addr),
version=4,
address_readable=str(flip_addr),
allocated_at=datetime.datetime.now()))
flip.fixed_ip = fixed_ip
port.ip_addresses.append(fixed_ip)
port.ip_addresses.append(flip)
fixed_ip_addr = netaddr.IPAddress("192.168.0.2")
fixed_ip = models.IPAddress()
fixed_ip.update(dict(address_type="fixed",
address=int(fixed_ip_addr),
version=4,
address_readable=str(fixed_ip_addr),
allocated_at=datetime.datetime.now()))
flip_addr = netaddr.IPAddress("10.0.0.2")
flip = models.IPAddress()
flip.update(dict(address_type="floating",
address=int(flip_addr),
version=4,
address_readable=str(flip_addr),
allocated_at=datetime.datetime.now()))
flip.fixed_ip = fixed_ip
port.ip_addresses.append(fixed_ip)
port.ip_addresses.append(flip)
next_fixed_ip = floating_ips._get_next_available_fixed_ip(port)
self.assertEqual(next_fixed_ip, None)
def test_get_next_available_fixed_ip_with_avail_fixed_ips(self):
port = models.Port()
port.update(dict(id=1))
fixed_ip_addr = netaddr.IPAddress("192.168.0.1")
fixed_ip = models.IPAddress()
fixed_ip.update(dict(address_type="fixed",
address=int(fixed_ip_addr),
version=4,
address_readable=str(fixed_ip_addr),
allocated_at=datetime.datetime.now()))
flip_addr = netaddr.IPAddress("10.0.0.1")
flip = models.IPAddress()
flip.update(dict(address_type="floating",
address=int(flip_addr),
version=4,
address_readable=str(flip_addr),
allocated_at=datetime.datetime.now()))
flip.fixed_ip = fixed_ip
port.ip_addresses.append(fixed_ip)
port.ip_addresses.append(flip)
fixed_ip_addr = netaddr.IPAddress("192.168.0.2")
fixed_ip = models.IPAddress()
fixed_ip.update(dict(address_type="fixed",
address=int(fixed_ip_addr),
version=4,
address_readable=str(fixed_ip_addr),
allocated_at=datetime.datetime.now()))
port.ip_addresses.append(fixed_ip)
port.ip_addresses.append(flip)
next_fixed_ip = floating_ips._get_next_available_fixed_ip(port)
self.assertEqual(next_fixed_ip["address_readable"], "192.168.0.2")
class TestCreateFloatingIPs(test_quark_plugin.TestQuarkPlugin):
@contextlib.contextmanager
def _stubs(self, flip=None, port=None, ips=None, network=None):
port_model = None
if port:
port_model = models.Port()
port_model.update(dict(port=port))
if ips:
for ip in ips:
ip_model = models.IPAddress()
ip_model.update(ip)
if (ip["address_type"] == "floating"
and "fixed_ip_addr" in ip):
fixed_ip = models.IPAddress()
fixed_ip.update(next(ip_addr for ip_addr in ips
if (ip_addr["address_readable"] ==
ip["fixed_ip_addr"])))
ip_model.fixed_ip = fixed_ip
port_model.ip_addresses.append(ip_model)
flip_model = None
if flip:
flip_model = models.IPAddress()
flip_model.update(flip)
net_model = None
if network:
net_model = models.Network()
net_model.update(network)
def _alloc_ip(context, new_addr, net_id, port_m, *args, **kwargs):
new_addr.append(flip_model)
def _flip_fixed_ip_assoc(context, addr, fixed_ip):
addr.fixed_ip = fixed_ip
return addr
with contextlib.nested(
mock.patch("quark.db.api.floating_ip_find"),
mock.patch("quark.db.api.network_find"),
mock.patch("quark.db.api.port_find"),
mock.patch("quark.ipam.QuarkIpam.allocate_ip_address"),
mock.patch("quark.drivers.unicorn_driver.UnicornDriver"
".register_floating_ip"),
mock.patch("quark.db.api.floating_ip_associate_fixed_ip")
) as (flip_find, net_find, port_find, alloc_ip, mock_reg_flip, assoc):
flip_find.return_value = flip_model
net_find.return_value = net_model
port_find.return_value = port_model
alloc_ip.side_effect = _alloc_ip
assoc.side_effect = _flip_fixed_ip_assoc
yield
def test_create_with_a_port(self):
floating_ip_addr = netaddr.IPAddress("10.0.0.1")
floating_ip = dict(id=1, address=int(floating_ip_addr), version=4,
address_readable=str(floating_ip_addr), subnet_id=1,
network_id=2, used_by_tenant_id=1)
network = dict(id="00000000-0000-0000-0000-000000000000",
ipam_strategy="ANY")
fixed_ip_addr = netaddr.IPAddress("192.168.0.1")
fixed_ips = [dict(address_type="fixed", address=int(fixed_ip_addr),
version=4, address_readable=str(fixed_ip_addr),
allocated_at=datetime.datetime.now())]
port = dict(id="abcdefgh-1111-2222-3333-1234567890ab")
with self._stubs(flip=floating_ip, port=port,
ips=fixed_ips, network=network):
request = dict(floating_network_id=network["id"],
port_id=port["id"])
flip = self.plugin.create_floatingip(self.context,
dict(floatingip=request))
self.assertEqual(flip["floating_ip_address"], "10.0.0.1")
self.assertEqual(flip["fixed_ip_address"], "192.168.0.1")
def test_create_without_a_port(self):
floating_ip_addr = netaddr.IPAddress("10.0.0.1")
floating_ip = dict(id=1, address=int(floating_ip_addr), version=4,
address_readable=str(floating_ip_addr), subnet_id=1,
network_id=2, used_by_tenant_id=1)
network = dict(id="00000000-0000-0000-0000-000000000000",
ipam_strategy="ANY")
fixed_ip_addr = netaddr.IPAddress("192.168.0.1")
fixed_ips = [dict(address_type="fixed", address=int(fixed_ip_addr),
version=4, address_readable=str(fixed_ip_addr),
allocated_at=datetime.datetime.now())]
with self._stubs(flip=floating_ip, port=None,
ips=fixed_ips, network=network):
request = dict(floating_network_id=network["id"], port_id=None)
flip = self.plugin.create_floatingip(self.context,
dict(floatingip=request))
self.assertEqual(flip["floating_ip_address"], "10.0.0.1")
self.assertEqual(flip.get("fixed_ip_address"), None)
def test_create_with_fixed_ip_specified(self):
floating_ip_addr = netaddr.IPAddress("10.0.0.1")
floating_ip = dict(id=1, address=int(floating_ip_addr), version=4,
address_readable=str(floating_ip_addr), subnet_id=1,
network_id=2, used_by_tenant_id=1)
network = dict(id="00000000-0000-0000-0000-000000000000",
ipam_strategy="ANY")
fixed_ips = []
for ip_addr in ["192.168.0.1", "192.168.0.2"]:
fixed_ip_addr = netaddr.IPAddress(ip_addr)
fixed_ips.append(dict(address_type="fixed", version=4,
address=int(fixed_ip_addr),
address_readable=str(fixed_ip_addr),
allocated_at=datetime.datetime.now()))
port = dict(id="abcdefgh-1111-2222-3333-1234567890ab")
with self._stubs(flip=floating_ip, port=port,
ips=fixed_ips, network=network):
request = dict(floating_network_id=network["id"],
port_id=port["id"], fixed_ip_address="192.168.0.2")
flip = self.plugin.create_floatingip(self.context,
dict(floatingip=request))
self.assertEqual(flip["floating_ip_address"], "10.0.0.1")
self.assertEqual(flip["fixed_ip_address"], "192.168.0.2")
def test_create_with_floating_ip_specified(self):
floating_ip_addr = netaddr.IPAddress("10.0.0.1")
floating_ip = dict(id=1, address=int(floating_ip_addr), version=4,
address_readable=str(floating_ip_addr), subnet_id=1,
network_id=2, used_by_tenant_id=1)
network = dict(id="00000000-0000-0000-0000-000000000000",
ipam_strategy="ANY")
fixed_ip_addr = netaddr.IPAddress("192.168.0.1")
fixed_ips = [dict(address_type="fixed", address=int(fixed_ip_addr),
version=4, address_readable=str(fixed_ip_addr),
allocated_at=datetime.datetime.now())]
port = dict(id=2)
with self._stubs(flip=floating_ip, port=port,
ips=fixed_ips, network=network):
request = dict(floating_network_id=network["id"],
port_id=port["id"], floating_ip_address="10.0.0.1")
flip = self.plugin.create_floatingip(self.context,
dict(floatingip=request))
self.assertEqual(flip["floating_ip_address"], "10.0.0.1")
self.assertEqual(flip["fixed_ip_address"], "192.168.0.1")
def test_create_without_network_id_fails(self):
with self._stubs():
with self.assertRaises(exceptions.BadRequest):
request = dict(port_id=2, floating_ip_address="10.0.0.1")
self.plugin.create_floatingip(self.context,
dict(floatingip=request))
def test_create_with_invalid_network_fails(self):
with self._stubs():
with self.assertRaises(exceptions.NetworkNotFound):
request = dict(floating_network_id=123,
port_id=2, floating_ip_address="10.0.0.1")
self.plugin.create_floatingip(self.context,
dict(floatingip=request))
def test_create_with_invalid_port_fails(self):
network = dict(id="00000000-0000-0000-0000-000000000000",
ipam_strategy="ANY")
with self._stubs(network=network):
with self.assertRaises(exceptions.PortNotFound):
request = dict(floating_network_id=network["id"],
port_id=2, floating_ip_address="10.0.0.1")
self.plugin.create_floatingip(self.context,
dict(floatingip=request))
def test_create_with_invalid_fixed_ip_for_port_fails(self):
network = dict(id="00000000-0000-0000-0000-000000000000",
ipam_strategy="ANY")
fixed_ip_addr = netaddr.IPAddress("192.168.0.1")
fixed_ips = [dict(address_type="fixed", version=4,
address=int(fixed_ip_addr),
address_readable=str(fixed_ip_addr),
allocated_at=datetime.datetime.now())]
port = dict(id="abcdefgh-1111-2222-3333-1234567890ab")
with self._stubs(port=port, ips=fixed_ips, network=network):
with self.assertRaises(
quark_exceptions.FixedIpDoesNotExistsForPort):
request = dict(floating_network_id=network["id"],
port_id=port["id"],
fixed_ip_address="192.168.0.2")
flip = self.plugin.create_floatingip(self.context,
dict(floatingip=request))
self.assertEqual(flip["address_readable"], "10.0.0.1")
self.assertEqual(flip.fixed_ip["address_readable"],
"192.168.0.2")
def test_create_with_port_and_fixed_ip_with_existing_flip_fails(self):
network = dict(id="00000000-0000-0000-0000-000000000000",
ipam_strategy="ANY")
fixed_ip_addr = netaddr.IPAddress("192.168.0.1")
fixed_ip = dict(address_type="fixed", version=4,
address=int(fixed_ip_addr),
address_readable=str(fixed_ip_addr),
allocated_at=datetime.datetime.now())
floating_ip_addr = netaddr.IPAddress("10.0.0.1")
floating_ip = dict(address_type="floating", version=4,
address=int(floating_ip_addr),
address_readable=str(floating_ip_addr),
allocated_at=datetime.datetime.now(),
fixed_ip_addr="192.168.0.1")
ips = [fixed_ip, floating_ip]
port = dict(id="abcdefgh-1111-2222-3333-1234567890ab")
with self._stubs(port=port, ips=ips, network=network):
with self.assertRaises(
quark_exceptions.PortAlreadyContainsFloatingIp):
request = dict(floating_network_id=network["id"],
port_id=port["id"],
fixed_ip_address="192.168.0.1")
self.plugin.create_floatingip(self.context,
dict(floatingip=request))
def test_create_when_port_has_no_fixed_ips_fails(self):
network = dict(id="00000000-0000-0000-0000-000000000000",
ipam_strategy="ANY")
port = dict(id="abcdefgh-1111-2222-3333-1234567890ab")
with self._stubs(port=port, network=network):
with self.assertRaises(
quark_exceptions.NoAvailableFixedIPsForPort):
request = dict(floating_network_id=network["id"],
port_id=port["id"])
self.plugin.create_floatingip(self.context,
dict(floatingip=request))
def test_create_when_port_has_no_available_fixed_ips_fails(self):
network = dict(id="00000000-0000-0000-0000-000000000000",
ipam_strategy="ANY")
fixed_ip_addr = netaddr.IPAddress("192.168.0.1")
fixed_ip = dict(address_type="fixed", version=4,
address=int(fixed_ip_addr),
address_readable=str(fixed_ip_addr),
allocated_at=datetime.datetime.now())
floating_ip_addr = netaddr.IPAddress("10.0.0.1")
floating_ip = dict(address_type="floating", version=4,
address=int(floating_ip_addr),
address_readable=str(floating_ip_addr),
allocated_at=datetime.datetime.now(),
fixed_ip_addr="192.168.0.1")
ips = [fixed_ip, floating_ip]
port = dict(id="abcdefgh-1111-2222-3333-1234567890ab")
with self._stubs(port=port, ips=ips, network=network):
with self.assertRaises(
quark_exceptions.NoAvailableFixedIPsForPort):
request = dict(floating_network_id=network["id"],
port_id=port["id"])
self.plugin.create_floatingip(self.context,
dict(floatingip=request))
| |
import inspect
from datetime import datetime, timedelta
from poloniex import constants
from poloniex.api.base import command_operator, BasePublicApi, BaseTradingApi
from poloniex.error import PoloniexError
from poloniex.logger import getLogger
from poloniex.wamp.client import WAMPClient
__author__ = "andrew.shvv@gmail.com"
logger = getLogger(__name__)
def ticker_wrapper(handler):
async def decorator(data):
currency_pair = data[0]
last = data[1]
lowest_ask = data[2]
highest_bid = data[3]
percent_change = data[4]
base_volume = data[5]
quote_volume = data[6]
is_frozen = data[7]
day_high = data[8]
day_low = data[9]
kwargs = {
"currency_pair": currency_pair,
"last": last,
"lowest_ask": lowest_ask,
"highest_bid": highest_bid,
"percent_change": percent_change,
"base_volume": base_volume,
"quote_volume": quote_volume,
"is_frozen": is_frozen,
"day_high": day_high,
"day_low": day_low
}
if inspect.isgeneratorfunction(handler):
await handler(**kwargs)
else:
handler(**kwargs)
return decorator
def trades_wrapper(topic, handler):
async def decorator(data):
for event in data:
event["currency_pair"] = topic
if inspect.isgeneratorfunction(handler):
await handler(**event)
else:
handler(**event)
return decorator
def trollbox_wrapper(handler):
async def decorator(data):
if len(data) != 5:
return
type_ = data[0]
message_id = data[1]
username = data[2]
text = data[3]
reputation = data[4]
kwargs = {
"id": message_id,
"username": username,
"type": type_,
"text": text,
"reputation": reputation
}
if inspect.isgeneratorfunction(handler):
await handler(**kwargs)
else:
handler(**kwargs)
return decorator
class PushApi:
url = "wss://api.poloniex.com"
def __init__(self, session):
self.wamp = WAMPClient(url=self.url, session=session)
async def start(self):
await self.wamp.start()
async def stop(self, force=True):
if force or not self.is_subscribed:
await self.wamp.stop()
@property
def is_subscribed(self):
[queue, subscriptions] = self.wamp.subsciptions
return len(queue) + len(subscriptions) != 0
def subscribe(self, topic, handler):
if topic in constants.CURRENCY_PAIRS:
handler = trades_wrapper(topic, handler)
self.wamp.subscribe(topic=topic, handler=handler)
elif topic is "trollbox":
handler = trollbox_wrapper(handler)
self.wamp.subscribe(topic=topic, handler=handler)
elif topic is "ticker":
handler = ticker_wrapper(handler)
self.wamp.subscribe(topic=topic, handler=handler)
elif topic in constants.AVAILABLE_SUBSCRIPTIONS:
self.wamp.subscribe(topic=topic, handler=handler)
else:
raise NotImplementedError("Topic not available")
class PublicApi(BasePublicApi):
def __init__(self, session):
self.session = session
async def api_call(self, *args, **kwargs):
async with self.session.get(self.url, *args, **kwargs) as response:
logger.debug(response)
response = await response.json()
if ("error" in response) and (response["error"] is not None):
raise PoloniexError(response["error"])
return response
@command_operator
async def returnTicker(self):
"""
Returns the ticker for all markets
"""
pass
@command_operator
async def return24hVolume(self):
"""
Returns the 24-hour volume for all markets, plus totals for primary currencies.
"""
pass
@command_operator
async def returnOrderBook(self, currency_pair="all", depth=50):
"""
Returns the order book for a given market, as well as a sequence number for use with the Push API and an indicator
specifying whether the market is frozen. You may set currencyPair to "all" to get the order books of all markets
"""
pass
@command_operator
async def returnChartData(self,
currency_pair,
start=datetime.now() - timedelta(days=1),
end=datetime.now(),
period=300):
"""
Returns candlestick chart data. Required GET parameters are "currencyPair", "period" (candlestick period in seconds;
valid values are 300, 900, 1800, 7200, 14400, and 86400), "start", and "end". "Start" and "end" are given in UNIX
timestamp format and used to specify the date range for the data returned.
"""
pass
@command_operator
async def returnCurrencies(self):
"""
Returns information about currencies
"""
pass
@command_operator
async def returnTradeHistory(self,
currency_pair="all",
start=datetime.now() - timedelta(days=1),
end=datetime.now()):
"""
Returns the past 200 trades for a given market, or up to 50,000 trades between a range specified in UNIX timestamps
by the "start" and "end" GET parameters.
"""
pass
class TradingApi(BaseTradingApi):
def __init__(self, session, *args, **kwargs):
self.session = session
super(TradingApi, self).__init__(*args, **kwargs)
async def api_call(self, *args, **kwargs):
data, headers = self.secure_request(kwargs.get('data', {}), kwargs.get('headers', {}))
kwargs['data'] = data
kwargs['headers'] = headers
async with self.session.post(self.url, *args, **kwargs) as response:
return await response.json()
@command_operator
async def returnBalances(self):
"""
Returns all of your available balances
"""
pass
@command_operator
async def returnCompleteBalances(self):
"""
Returns all of your balances, including available balance, balance on orders, and the estimated BTC value of your balance.
"""
pass
@command_operator
async def returnDepositAddresses(self):
"""
Returns all of your deposit addresses.
"""
pass
@command_operator
async def generateNewAddress(self, currency):
"""
Generates a new deposit address for the specified currency.
"""
pass
@command_operator
async def returnDepositsWithdrawals(self,
start=datetime.now() - timedelta(days=1),
end=datetime.now()):
"""
Returns your deposit and withdrawal history within a range, specified by the "start" and "end" POST parameters,
both of which should be given as UNIX timestamps
"""
pass
@command_operator
async def returnOpenOrders(self, currency_pair="all"):
"""
Returns your open orders for a given market, specified by the "currencyPair" POST parameter, e.g. "BTC_XCP".
Set "currencyPair" to "all" to return open orders for all markets.
"""
pass
@command_operator
async def returnTradeHistory(self,
currency_pair="all",
start=datetime.now() - timedelta(days=1),
end=datetime.now()):
"""
Returns your trade history for a given market, specified by the "currencyPair" POST parameter.
You may specify "all" as the currencyPair to receive your trade history for all markets. You may optionally
specify a range via "start" and/or "end" POST parameters, given in UNIX timestamp format; if you do not specify
a range, it will be limited to one day.
"""
pass
@command_operator
async def returnOrderTrades(self, order_number):
"""
Returns all trades involving a given order, specified by the "orderNumber" POST parameter. If no trades for the order
have occurred or you specify an order that does not belong to you, you will receive an error.
"""
@command_operator
async def buy(self,
currency_pair,
rate,
amount):
"""
Places a limit buy order in a given market. Required POST parameters are "currencyPair", "rate", and "amount".
If successful, the method will return the order number
"""
pass
@command_operator
async def sell(self,
currency_pair,
rate,
amount):
"""
Places a sell order in a given market
"""
pass
@command_operator
async def withdraw(self, currency, amount, address):
"""
Immediately places a withdrawal for a given currency, with no email confirmation. In order to use this method,
the withdrawal privilege must be enabled for your API key. Required POST parameters are "currency", "amount",
and "address". For XMR withdrawals, you may optionally specify "paymentId".
"""
pass
@command_operator
async def cancelOrder(self, order_number):
"""
Cancels an order you have placed in a given market. Required POST parameter is "orderNumber"
"""
pass
| |
import numpy as np
from numba import njit
from numpy import linalg
from testbeam_analysis.tools import geometry_utils
@njit
def _filter_predict(transition_matrix, transition_covariance,
transition_offset, current_filtered_state,
current_filtered_state_covariance):
"""Calculates the predicted state and its covariance matrix. Prediction
is done on whole track chunk with size chunk_size.
Parameters
----------
transition_matrix : [chunk_size, n_dim_state, n_dim_state] array
state transition matrix from time t to t+1.
transition_covariance : [chunk_size, n_dim_state, n_dim_state] array
covariance matrix for state transition from time t to t+1.
transition_offset : [chunk_size, n_dim_state] array
offset for state transition from time t to t+1.
current_filtered_state: [chunk_size, n_dim_state] array
filtered state at time t.
current_filtered_state_covariance: [chunk_size, n_dim_state, n_dim_state] array
covariance of filtered state at time t.
Returns
-------
predicted_state : [chunk_size, n_dim_state] array
predicted state at time t+1.
predicted_state_covariance : [chunk_size, n_dim_state, n_dim_state] array
covariance matrix of predicted state at time t+1.
"""
predicted_state = _vec_mul(transition_matrix, current_filtered_state) + transition_offset
predicted_state_covariance = _mat_mul(transition_matrix,
_mat_mul(current_filtered_state_covariance,
_mat_trans(transition_matrix))) + transition_covariance
return predicted_state, predicted_state_covariance
def _filter_correct(observation_matrix, observation_covariance,
observation_offset, predicted_state,
predicted_state_covariance, observation, mask):
r"""Filters a predicted state with the Kalman Filter. Filtering
is done on whole track chunk with size chunk_size.
Parameters
----------
observation_matrix : [chunk_size, n_dim_obs, n_dim_obs] array
observation matrix for time t.
observation_covariance : [chunk_size, n_dim_obs, n_dim_obs] array
covariance matrix for observation at time t.
observation_offset : [chunk_size, n_dim_obs] array
offset for observation at time t.
predicted_state : [chunk_size, n_dim_state] array
predicted state at time t.
predicted_state_covariance : [n_dim_state, n_dim_state] array
covariance matrix of predicted state at time t.
observation : [chunk_size, n_dim_obs] array
observation at time t. If observation is a masked array and any of
its values are masked, the observation will be not included in filtering.
mask : [chunk_size, n_dim_obs] bool
Mask which determines if measurement will be included in filtering step (False, not masked)
or will be treated as missing measurement (True, masked).
Returns
-------
kalman_gain : [chunk_size, n_dim_state, n_dim_obs] array
Kalman gain matrix for time t.
filtered_state : [chunk_size, n_dim_state] array
filtered state at time t.
filtered_state_covariance : [chunk_size, n_dim_state, n_dim_state] array
covariance matrix of filtered state at time t.
"""
if not np.any(mask):
predicted_observation = _vec_mul(observation_matrix, predicted_state) + observation_offset
predicted_observation_covariance = _mat_mul(observation_matrix,
_mat_mul(predicted_state_covariance, _mat_trans(observation_matrix))) + observation_covariance
kalman_gain = _mat_mul(predicted_state_covariance,
_mat_mul(_mat_trans(observation_matrix),
_mat_inverse(predicted_observation_covariance)))
filtered_state = predicted_state + _vec_mul(kalman_gain,
observation - predicted_observation)
filtered_state_covariance = predicted_state_covariance - _mat_mul(kalman_gain,
_mat_mul(observation_matrix,
predicted_state_covariance))
else:
n_dim_state = predicted_state_covariance.shape[1]
n_dim_obs = observation_matrix.shape[1]
chunk_size = observation_matrix.shape[0]
kalman_gain = np.zeros((chunk_size, n_dim_state, n_dim_obs))
filtered_state = predicted_state
filtered_state_covariance = predicted_state_covariance
return kalman_gain, filtered_state, filtered_state_covariance
def _filter(alignment, transition_matrices, observation_matrices, transition_covariances,
observation_covariances, transition_offsets, observation_offsets,
initial_state, initial_state_covariance, observations, mask):
"""Apply the Kalman Filter. First a prediction of the state is done, then a filtering is
done which includes the observations.
Parameters
----------
alignment : array_like or None
Aligment data, which contains rotations and translations for each DUT. Needed to take rotations of DUTs into account,
in order to get correct transition matrices. If pre-alignment data is used, this is set to None since no rotations have to be
taken into account.
transition_matrices : [chunk_size, n_timesteps-1, n_dim_state, n_dim_state] array-like
matrices to transport states from t to t+1.
observation_matrices : [chunk_size, n_timesteps, n_dim_obs, n_dim_state] array-like
observation matrices.
transition_covariances : [chunk_size, n_timesteps-1, n_dim_state,n_dim_state] array-like
covariance matrices of transition matrices.
observation_covariances : [chunk_size, n_timesteps, n_dim_obs, n_dim_obs] array-like
covariance matrices of observation matrices.
transition_offsets : [chunk_size, n_timesteps-1, n_dim_state] array-like
offsets of transition matrices.
observation_offsets : [chunk_size, n_timesteps, n_dim_obs] array-like
offsets of observations.
initial_state : [chunk_size, n_dim_state] array-like
initial value of state.
initial_state_covariance : [chunk_size, n_dim_state, n_dim_state] array-like
initial value for observation covariance matrices.
observations : [chunk_size, n_timesteps, n_dim_obs] array
observations (measurements) from times [0...n_timesteps-1]. If any of observations is masked,
then observations[:, t] will be treated as a missing observation
and will not be included in the filtering step.
Returns
-------
predicted_states : [chunk_size, n_timesteps, n_dim_state] array
predicted states of times [0...t].
predicted_state_covariances : [chunk_size, n_timesteps, n_dim_state, n_dim_state] array
covariance matrices of predicted states of times [0...t].
kalman_gains : [chunk_size, n_timesteps, n_dim_state] array
Kalman gain matrices of times [0...t].
filtered_states : [chunk_size, n_timesteps, n_dim_state] array
filtered states of times [0...t].
filtered_state_covariances : [chunk_size, n_timesteps, n_dim_state] array
covariance matrices of filtered states of times [0...t].
transition_matrices_update : [chunk_size, n_timesteps-1, n_dim_state, n_dim_state] array-like
updated transition matrices in case of rotated planes.
"""
chunk_size, n_timesteps, n_dim_obs = observations.shape
n_dim_state = transition_covariances.shape[2]
predicted_states = np.zeros((chunk_size, n_timesteps, n_dim_state))
predicted_state_covariances = np.zeros((chunk_size, n_timesteps, n_dim_state, n_dim_state))
kalman_gains = np.zeros((chunk_size, n_timesteps, n_dim_state, n_dim_obs))
filtered_states = np.zeros((chunk_size, n_timesteps, n_dim_state))
filtered_state_covariances = np.zeros((chunk_size, n_timesteps, n_dim_state, n_dim_state))
# array where new transition matrices are stored, needed to pass it to kalman smoother
transition_matrices_update = np.zeros_like(transition_covariances)
for t in range(n_timesteps):
if t == 0:
predicted_states[:, t] = initial_state
predicted_state_covariances[:, t] = initial_state_covariance
else:
if alignment is not None:
# get position of plane t - 1 and t
dut_position = [np.array([alignment[t - 1]['translation_x'], alignment[t - 1]['translation_y'], alignment[t - 1]['translation_z']]),
np.array([alignment[t]['translation_x'], alignment[t]['translation_y'], alignment[t]['translation_z']])]
rotation_matrix = [geometry_utils.rotation_matrix(alpha=alignment[t - 1]['alpha'],
beta=alignment[t - 1]['beta'],
gamma=alignment[t - 1]['gamma']),
geometry_utils.rotation_matrix(alpha=alignment[t]['alpha'],
beta=alignment[t]['beta'],
gamma=alignment[t]['gamma'])]
basis_global = [rotation_matrix[0].T.dot(np.eye(3)), rotation_matrix[1].T.dot(np.eye(3))]
dut_plane_normal = [basis_global[0][2], basis_global[1][2]]
# slopes (directional vectors))o f the filtered estimates
slopes = np.column_stack((filtered_states[:, t - 1, 2], filtered_states[:, t - 1, 3], np.ones((filtered_states.shape[0], 1))))
# z position of the filtered states
z_position = geometry_utils.get_line_intersections_with_plane(line_origins=np.column_stack((filtered_states[:, t - 1, 0],
filtered_states[:, t - 1, 1],
np.ones(filtered_states[:, t - 1, 1].shape))),
line_directions=np.column_stack((np.zeros((filtered_states[:, t - 1, 1].shape)),
np.zeros(filtered_states[:, t - 1, 1].shape),
np.ones(filtered_states[:, t - 1, 1].shape))),
position_plane=dut_position[0],
normal_plane=dut_plane_normal[0])[:, -1]
# offsets (supoort vectors) of the filtered states
offsets = np.column_stack((filtered_states[:, t - 1, 0], filtered_states[:, t - 1, 1], z_position))
# calculate intersection of state which should be predicted (filtered state of plane before) with plane t - 1 and t
offsets_rotated = [geometry_utils.get_line_intersections_with_plane(line_origins=offsets,
line_directions=slopes,
position_plane=dut_position[0],
normal_plane=dut_plane_normal[0]),
geometry_utils.get_line_intersections_with_plane(line_origins=offsets,
line_directions=slopes,
position_plane=dut_position[1],
normal_plane=dut_plane_normal[1])]
z_diff = offsets_rotated[1][:, 2] - offsets_rotated[0][:, 2]
# update transition matrix, only need to change these value in case for rotated planes
transition_matrices[:, t - 1, 0, 2] = z_diff
transition_matrices[:, t - 1, 1, 3] = z_diff
# store updated transition matrix for smoothing function
transition_matrices_update[:, t - 1] = transition_matrices[:, t - 1]
transition_matrix = transition_matrices[:, t - 1]
transition_covariance = transition_covariances[:, t - 1]
transition_offset = transition_offsets[:, t - 1]
predicted_states[:, t], predicted_state_covariances[:, t] = _filter_predict(
transition_matrix,
transition_covariance,
transition_offset,
filtered_states[:, t - 1],
filtered_state_covariances[:, t - 1])
observation_matrix = observation_matrices[:, t]
observation_covariance = observation_covariances[:, t]
observation_offset = observation_offsets[:, t]
kalman_gains[:, t], filtered_states[:, t], filtered_state_covariances[:, t] = _filter_correct(
observation_matrix,
observation_covariance,
observation_offset,
predicted_states[:, t],
predicted_state_covariances[:, t],
observations[:, t],
np.ma.getmask(observations[:, t]))
return predicted_states, predicted_state_covariances, kalman_gains, filtered_states, filtered_state_covariances, transition_matrices_update
@njit
def _smooth_update(transition_matrix, filtered_state,
filtered_state_covariance, predicted_state,
predicted_state_covariance, next_smoothed_state,
next_smoothed_state_covariance):
"""Smooth a filtered state with a Kalman Smoother. Smoothing
is done on whole track chunk with size chunk_size.
Parameters
----------
transition_matrix : [chunk_size, n_dim_state, n_dim_state] array
transition matrix to transport state from time t to t+1.
filtered_state : [chunk_size, n_dim_state] array
filtered state at time t.
filtered_state_covariance : [chunk_size, n_dim_state, n_dim_state] array
covariance matrix of filtered state at time t.
predicted_state : [chunk_size, n_dim_state] array
predicted state at time t+1.
predicted_state_covariance : [chunk_size, n_dim_state, n_dim_state] array
covariance matrix of filtered state at time t+1.
next_smoothed_state : [chunk_size, n_dim_state] array
smoothed state at time t+1.
next_smoothed_state_covariance : [chunk_size, n_dim_state, n_dim_state] array
covariance matrix of smoothed state at time t+1.
Returns
-------
smoothed_state : [chunk_size, n_dim_state] array
smoothed state at time t.
smoothed_state_covariance : [chunk_size, n_dim_state, n_dim_state] array
covariance matrix of smoothed state at time t.
kalman_smoothing_gain : [chunk_size, n_dim_state, n_dim_state] array
smoothed Kalman gain matrix at time t.
"""
kalman_smoothing_gain = _mat_mul(filtered_state_covariance,
_mat_mul(_mat_trans(transition_matrix),
_mat_inverse(predicted_state_covariance)))
smoothed_state = filtered_state + _vec_mul(kalman_smoothing_gain,
next_smoothed_state - predicted_state)
smoothed_state_covariance = filtered_state_covariance + _mat_mul(kalman_smoothing_gain,
_mat_mul((next_smoothed_state_covariance - predicted_state_covariance),
_mat_trans(kalman_smoothing_gain)))
return smoothed_state, smoothed_state_covariance, kalman_smoothing_gain
@njit
def _smooth(transition_matrices, filtered_states,
filtered_state_covariances, predicted_states,
predicted_state_covariances):
"""Apply the Kalman Smoother to filtered states. Estimate the smoothed states.
Smoothing is done on whole track chunk with size chunk_size.
Parameters
----------
transition_matrices : [chunk_size, n_timesteps-1, n_dim_state, n_dim_state] array-like
matrices to transport states from t to t+1 of times [0...t-1].
filtered_states : [chunk_size, n_timesteps, n_dim_state] array
filtered states of times [0...t].
filtered_state_covariances : [chunk_size, n_timesteps, n_dim_state] array
covariance matrices of filtered states of times [0...t].
predicted_states : [chunk_size, n_timesteps, n_dim_state] array
predicted states of times [0...t].
predicted_state_covariances : [chunk_size, n_timesteps, n_dim_state, n_dim_state] array
covariance matrices of predicted states of times [0...t].
Returns
-------
smoothed_states : [chunk_size, n_timesteps, n_dim_state]
smoothed states for times [0...n_timesteps-1].
smoothed_state_covariances : [chunk_size, n_timesteps, n_dim_state, n_dim_state] array
covariance matrices of smoothed states for times [0...n_timesteps-1].
kalman_smoothing_gains : [chunk_size, n_timesteps-1, n_dim_state] array
smoothed kalman gain matrices fot times [0...n_timesteps-2].
"""
chunk_size, n_timesteps, n_dim_state = filtered_states.shape
smoothed_states = np.zeros((chunk_size, n_timesteps, n_dim_state))
smoothed_state_covariances = np.zeros((chunk_size, n_timesteps, n_dim_state, n_dim_state))
kalman_smoothing_gains = np.zeros((chunk_size, n_timesteps - 1, n_dim_state, n_dim_state))
smoothed_states[:, -1] = filtered_states[:, -1]
smoothed_state_covariances[:, -1] = filtered_state_covariances[:, -1]
for i in range(n_timesteps - 1):
t = (n_timesteps - 2) - i # reverse order
transition_matrix = transition_matrices[:, t]
smoothed_states[:, t], smoothed_state_covariances[:, t], kalman_smoothing_gains[:, t] = _smooth_update(
transition_matrix,
filtered_states[:, t],
filtered_state_covariances[:, t],
predicted_states[:, t + 1],
predicted_state_covariances[:, t + 1],
smoothed_states[:, t + 1],
smoothed_state_covariances[:, t + 1])
return smoothed_states, smoothed_state_covariances, kalman_smoothing_gains
@njit
def _mat_mul(X, Y):
'''Helper function to multiply two 3D matrices. Multiplication is done on last two axes.
'''
result = np.zeros(X.shape)
for l in range(X.shape[0]):
# iterate through rows of X
for i in range(X.shape[1]):
# iterate through columns of Y
for j in range(Y.shape[2]):
# iterate through rows of Y
for k in range(Y.shape[1]):
result[l][i][j] += X[l][i][k] * Y[l][k][j]
return result
@njit
def _vec_mul(X, Y):
'''Helper function to multiply 3D matrix with 3D vector. Multiplication is done on last two axes.
'''
result = np.zeros(Y.shape)
for l in range(X.shape[0]):
# iterate through rows of X
for i in range(X.shape[1]):
# iterate through columns of Y
for k in range(Y.shape[1]):
result[l][i] += X[l][i][k] * Y[l][k]
return result
@njit
def _mat_trans(X):
'''Helper function to calculate transpose of 3D matrix. Transposition is done on last two axes.
'''
result = np.zeros((X.shape[0], X.shape[1], X.shape[2]))
for l in range(X.shape[0]):
for i in range(X.shape[1]):
for j in range(X.shape[2]):
result[l][i][j] = X[l][j][i]
return result
@njit
def _mat_inverse(X):
'''Helper function to calculate inverese of 3D matrix. Inversion is done on last two axes.
'''
inv = np.zeros((X.shape))
for i in range(X.shape[0]):
inv[i] = linalg.pinv(X[i])
return inv
class KalmanFilter():
def smooth(self, alignment, transition_matrices, transition_offsets, transition_covariance,
observation_matrices, observation_offsets, observation_covariances,
initial_state, initial_state_covariance, observations):
"""Apply the Kalman Smoother to the observations. In the first step a filtering is done,
afterwards a smoothing is done. Calculation is done on whole track chunk with size chunk_size.
Parameters
----------
alignment : array_like or None
Aligment data, which contains rotations and translations for each DUT. Needed to take rotations of DUTs into account,
in order to get correct transition matrices. If pre-alignment data is used, this is set to None since no rotations have to be
taken into account.
transition_matrices : [chunk_size, n_timesteps-1, n_dim_state, n_dim_state] array-like
matrices to transport states from t to t+1.
transition_offsets : [chunk_size, n_timesteps-1, n_dim_state] array-like
offsets of transition matrices.
transition_covariances : [chunk_size, n_timesteps-1, n_dim_state,n_dim_state] array-like
covariance matrices of transition matrices.
observation_matrices : [chunk_size, n_timesteps, n_dim_obs, n_dim_state] array-like
observation matrices.
observation_offsets : [chunk_size, n_timesteps, n_dim_obs] array-like
offsets of observations.
observation_covariances : [chunk_size, n_timesteps, n_dim_obs, n_dim_obs] array-like
covariance matrices of observation matrices.
initial_state : [chunk_size, n_dim_state] array-like
initial value of state.
initial_state_covariance : [chunk_size, n_dim_state, n_dim_state] array-like
initial value for observation covariance matrices.
observations : [chunk_size, n_timesteps, n_dim_obs] array
observations (measurements) from times [0...n_timesteps-1]. If any of observations is masked,
then observations[:, t] will be treated as a missing observation
and will not be included in the filtering step.
Returns
-------
smoothed_states : [chunk_size, n_timesteps, n_dim_state]
smoothed states for times [0...n_timesteps-1].
smoothed_state_covariances : [chunk_size, n_timesteps, n_dim_state, n_dim_state] array
covariance matrices of smoothed states for times [0...n_timesteps-1].
"""
predicted_states, predicted_state_covariances, _, filtered_states, filtered_state_covariances, transition_matrices = _filter(
alignment, transition_matrices, observation_matrices,
transition_covariance, observation_covariances,
transition_offsets, observation_offsets,
initial_state, initial_state_covariance, observations,
observations.mask)
smoothed_states, smoothed_state_covariances, _ = _smooth(
transition_matrices, filtered_states,
filtered_state_covariances, predicted_states,
predicted_state_covariances)
return smoothed_states, smoothed_state_covariances
| |
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the exploration editor page."""
import datetime
import logging
import os
import StringIO
import zipfile
from core import jobs_registry
from core.controllers import creator_dashboard
from core.controllers import editor
from core.domain import config_services
from core.domain import event_services
from core.domain import exp_domain
from core.domain import exp_services
from core.domain import stats_jobs_continuous_test
from core.domain import rights_manager
from core.domain import user_services
from core.platform import models
from core.tests import test_utils
import feconf
(user_models,) = models.Registry.import_models([models.NAMES.user])
class BaseEditorControllerTest(test_utils.GenericTestBase):
CAN_EDIT_STR = 'GLOBALS.can_edit = JSON.parse(\'true\');'
CANNOT_EDIT_STR = 'GLOBALS.can_edit = JSON.parse(\'false\');'
def setUp(self):
"""Completes the sign-up process for self.EDITOR_EMAIL."""
super(BaseEditorControllerTest, self).setUp()
self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME)
self.signup(self.ADMIN_EMAIL, self.ADMIN_USERNAME)
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.signup(self.VIEWER_EMAIL, self.VIEWER_USERNAME)
self.signup(self.MODERATOR_EMAIL, self.MODERATOR_USERNAME)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
self.editor_id = self.get_user_id_from_email(self.EDITOR_EMAIL)
self.viewer_id = self.get_user_id_from_email(self.VIEWER_EMAIL)
self.admin_id = self.get_user_id_from_email(self.ADMIN_EMAIL)
self.moderator_id = self.get_user_id_from_email(self.MODERATOR_EMAIL)
self.set_admins([self.ADMIN_USERNAME])
self.set_moderators([self.MODERATOR_USERNAME])
def assert_can_edit(self, response_body):
"""Returns True if the response body indicates that the exploration is
editable."""
self.assertIn(self.CAN_EDIT_STR, response_body)
self.assertNotIn(self.CANNOT_EDIT_STR, response_body)
def assert_cannot_edit(self, response_body):
"""Returns True if the response body indicates that the exploration is
not editable."""
self.assertIn(self.CANNOT_EDIT_STR, response_body)
self.assertNotIn(self.CAN_EDIT_STR, response_body)
class EditorTest(BaseEditorControllerTest):
ALL_CC_MANAGERS_FOR_TESTS = [
stats_jobs_continuous_test.ModifiedInteractionAnswerSummariesAggregator
]
def setUp(self):
super(EditorTest, self).setUp()
exp_services.load_demo('0')
rights_manager.release_ownership_of_exploration(
feconf.SYSTEM_COMMITTER_ID, '0')
def test_editor_page(self):
"""Test access to editor pages for the sample exploration."""
# Check that non-editors can access, but not edit, the editor page.
response = self.testapp.get('/create/0')
self.assertEqual(response.status_int, 200)
self.assertIn('Help others learn new things.', response.body)
self.assert_cannot_edit(response.body)
# Log in as an editor.
self.login(self.EDITOR_EMAIL)
# Check that it is now possible to access and edit the editor page.
response = self.testapp.get('/create/0')
self.assertIn('Help others learn new things.', response.body)
self.assertEqual(response.status_int, 200)
self.assert_can_edit(response.body)
self.assertIn('Stats', response.body)
self.assertIn('History', response.body)
self.logout()
def test_new_state_template(self):
"""Test the validity of the NEW_STATE_TEMPLATE."""
exploration = exp_services.get_exploration_by_id('0')
exploration.add_states([feconf.DEFAULT_INIT_STATE_NAME])
new_state_dict = exploration.states[
feconf.DEFAULT_INIT_STATE_NAME].to_dict()
self.assertEqual(new_state_dict, editor.NEW_STATE_TEMPLATE)
def test_that_default_exploration_cannot_be_published(self):
"""Test that publishing a default exploration raises an error
due to failing strict validation.
"""
self.login(self.EDITOR_EMAIL)
response = self.testapp.get(feconf.CREATOR_DASHBOARD_URL)
self.assertEqual(response.status_int, 200)
csrf_token = self.get_csrf_token_from_response(response)
exp_id = self.post_json(
feconf.NEW_EXPLORATION_URL, {}, csrf_token
)[creator_dashboard.EXPLORATION_ID_KEY]
response = self.testapp.get('/create/%s' % exp_id)
csrf_token = self.get_csrf_token_from_response(response)
publish_url = '%s/%s' % (feconf.EXPLORATION_STATUS_PREFIX, exp_id)
self.put_json(publish_url, {
'make_public': True,
}, csrf_token, expect_errors=True, expected_status_int=400)
self.logout()
def test_add_new_state_error_cases(self):
"""Test the error cases for adding a new state to an exploration."""
current_version = 1
self.login(self.EDITOR_EMAIL)
response = self.testapp.get('/create/0')
csrf_token = self.get_csrf_token_from_response(response)
def _get_payload(new_state_name, version=None):
result = {
'change_list': [{
'cmd': 'add_state',
'state_name': new_state_name
}],
'commit_message': 'Add new state',
}
if version is not None:
result['version'] = version
return result
def _put_and_expect_400_error(payload):
return self.put_json(
'/createhandler/data/0', payload, csrf_token,
expect_errors=True, expected_status_int=400)
# A request with no version number is invalid.
response_dict = _put_and_expect_400_error(_get_payload('New state'))
self.assertIn('a version must be specified', response_dict['error'])
# A request with the wrong version number is invalid.
response_dict = _put_and_expect_400_error(
_get_payload('New state', 123))
self.assertIn('which is too old', response_dict['error'])
# A request with an empty state name is invalid.
response_dict = _put_and_expect_400_error(
_get_payload('', current_version))
self.assertIn('should be between 1 and 50', response_dict['error'])
# A request with a really long state name is invalid.
response_dict = _put_and_expect_400_error(
_get_payload('a' * 100, current_version))
self.assertIn('should be between 1 and 50', response_dict['error'])
# A request with a state name containing invalid characters is
# invalid.
response_dict = _put_and_expect_400_error(
_get_payload('[Bad State Name]', current_version))
self.assertIn('Invalid character [', response_dict['error'])
# A name cannot have spaces at the front or back.
response_dict = _put_and_expect_400_error(
_get_payload(' aa', current_version))
self.assertIn('start or end with whitespace', response_dict['error'])
response_dict = _put_and_expect_400_error(
_get_payload('aa\t', current_version))
self.assertIn('end with whitespace', response_dict['error'])
response_dict = _put_and_expect_400_error(
_get_payload('\n', current_version))
self.assertIn('end with whitespace', response_dict['error'])
# A name cannot have consecutive whitespace.
response_dict = _put_and_expect_400_error(
_get_payload('The B', current_version))
self.assertIn('Adjacent whitespace', response_dict['error'])
response_dict = _put_and_expect_400_error(
_get_payload('The\t\tB', current_version))
self.assertIn('Adjacent whitespace', response_dict['error'])
self.logout()
def test_untrained_answers_handler(self):
with self.swap(feconf, 'SHOW_TRAINABLE_UNRESOLVED_ANSWERS', True):
def _create_answer(value, count=1):
return {'answer': value, 'frequency': count}
def _create_training_data(*arg):
return [_create_answer(value) for value in arg]
def _submit_answer(
exp_id, state_name, interaction_id, answer_group_index,
rule_spec_index, classification_categorization, answer,
exp_version=1, session_id='dummy_session_id',
time_spent_in_secs=0.0):
event_services.AnswerSubmissionEventHandler.record(
exp_id, exp_version, state_name, interaction_id,
answer_group_index, rule_spec_index,
classification_categorization, session_id,
time_spent_in_secs, {}, answer)
# Load the string classifier demo exploration.
exp_id = '15'
exp_services.load_demo(exp_id)
rights_manager.release_ownership_of_exploration(
feconf.SYSTEM_COMMITTER_ID, exp_id)
exploration_dict = self.get_json(
'%s/%s' % (feconf.EXPLORATION_INIT_URL_PREFIX, exp_id))
self.assertEqual(
exploration_dict['exploration']['title'],
'Demonstrating string classifier')
# This test uses the interaction which supports numeric input.
state_name = 'text'
self.assertIn(
state_name, exploration_dict['exploration']['states'])
self.assertEqual(
exploration_dict['exploration']['states'][state_name][
'interaction']['id'], 'TextInput')
# Input happy since there is an explicit rule checking for that.
_submit_answer(
exp_id, state_name, 'TextInput', 0, 0,
exp_domain.EXPLICIT_CLASSIFICATION, 'happy')
# Input text not at all similar to happy (default outcome).
_submit_answer(
exp_id, state_name, 'TextInput', 2, 0,
exp_domain.DEFAULT_OUTCOME_CLASSIFICATION, 'sad')
# Input cheerful: this is current training data and falls under the
# classifier.
_submit_answer(
exp_id, state_name, 'TextInput', 1, 0,
exp_domain.TRAINING_DATA_CLASSIFICATION, 'cheerful')
# Input joyful: this is not training data but it will later be
# classified under the classifier.
_submit_answer(
exp_id, state_name, 'TextInput', 2, 0,
exp_domain.DEFAULT_OUTCOME_CLASSIFICATION, 'joyful')
# Perform answer summarization on the summarized answers.
with self.swap(
jobs_registry, 'ALL_CONTINUOUS_COMPUTATION_MANAGERS',
self.ALL_CC_MANAGERS_FOR_TESTS):
# Run job on exploration with answers
stats_jobs_continuous_test.ModifiedInteractionAnswerSummariesAggregator.start_computation() # pylint: disable=line-too-long
self.assertEqual(self.count_jobs_in_taskqueue(), 1)
self.process_and_flush_pending_tasks()
self.assertEqual(self.count_jobs_in_taskqueue(), 0)
# Log in as an editor.
self.login(self.EDITOR_EMAIL)
response = self.testapp.get('/create/%s' % exp_id)
csrf_token = self.get_csrf_token_from_response(response)
url = str(
'/createhandler/training_data/%s/%s' % (exp_id, state_name))
exploration_dict = self.get_json(
'%s/%s' % (feconf.EXPLORATION_INIT_URL_PREFIX, exp_id))
# Only two of the four submitted answers should be unhandled.
# NOTE: Here, the return data here should really be
# _create_training_data('joyful', 'sad'). However, it is the
# empty list here because unhandled answers have not been
# implemented yet.
response_dict = self.get_json(url)
self.assertEqual(response_dict['unhandled_answers'], [])
self.assertTrue(exploration_dict['version'])
# If the confirmed unclassified answers is trained for one of the
# values, it should no longer show up in unhandled answers.
self.put_json('/createhandler/data/%s' % exp_id, {
'change_list': [{
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': state_name,
'property_name': (
exp_domain.STATE_PROPERTY_UNCLASSIFIED_ANSWERS),
'new_value': ['sad']
}],
'commit_message': 'Update confirmed unclassified answers',
'version': exploration_dict['version'],
}, csrf_token)
response_dict = self.get_json(url)
# NOTE: Here, the return data here should really be
# _create_training_data('joyful'). However, it is the
# empty list here because unhandled answers have not been
# implemented yet.
self.assertEqual(response_dict['unhandled_answers'], [])
exploration_dict = self.get_json(
'%s/%s' % (feconf.EXPLORATION_INIT_URL_PREFIX, exp_id))
# If one of the values is added to the training data of the
# classifier, then it should not be returned as an unhandled answer.
state = exploration_dict['exploration']['states'][state_name]
answer_group = state['interaction']['answer_groups'][1]
rule_spec = answer_group['rule_specs'][0]
self.assertEqual(
rule_spec['rule_type'], exp_domain.RULE_TYPE_CLASSIFIER)
rule_spec['inputs']['training_data'].append('joyful')
self.put_json('/createhandler/data/%s' % exp_id, {
'change_list': [{
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': state_name,
'property_name': (
exp_domain.STATE_PROPERTY_UNCLASSIFIED_ANSWERS),
'new_value': []
}, {
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': state_name,
'property_name': (
exp_domain.STATE_PROPERTY_INTERACTION_ANSWER_GROUPS),
'new_value': state['interaction']['answer_groups']
}],
'commit_message': 'Update confirmed unclassified answers',
'version': exploration_dict['version'],
}, csrf_token)
response_dict = self.get_json(url)
# NOTE: Here, the return data here should really be
# _create_training_data('sad'). However, it is the
# empty list here because unhandled answers have not been
# implemented yet.
self.assertEqual(response_dict['unhandled_answers'], [])
exploration_dict = self.get_json(
'%s/%s' % (feconf.EXPLORATION_INIT_URL_PREFIX, exp_id))
# If both are classified, then nothing should be returned
# unhandled.
self.put_json('/createhandler/data/%s' % exp_id, {
'change_list': [{
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': state_name,
'property_name': (
exp_domain.STATE_PROPERTY_UNCLASSIFIED_ANSWERS),
'new_value': ['sad']
}],
'commit_message': 'Update confirmed unclassified answers',
'version': exploration_dict['version'],
}, csrf_token)
response_dict = self.get_json(url)
self.assertEqual(response_dict['unhandled_answers'], [])
exploration_dict = self.get_json(
'%s/%s' % (feconf.EXPLORATION_INIT_URL_PREFIX, exp_id))
# If one of the existing training data elements in the classifier
# is removed (5 in this case), but it is not backed up by an
# answer, it will not be returned as potential training data.
state = exploration_dict['exploration']['states'][state_name]
answer_group = state['interaction']['answer_groups'][1]
rule_spec = answer_group['rule_specs'][0]
del rule_spec['inputs']['training_data'][1]
self.put_json('/createhandler/data/15', {
'change_list': [{
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': state_name,
'property_name': (
exp_domain.STATE_PROPERTY_INTERACTION_ANSWER_GROUPS),
'new_value': state['interaction']['answer_groups']
}],
'commit_message': 'Update confirmed unclassified answers',
'version': exploration_dict['version'],
}, csrf_token)
response_dict = self.get_json(url)
self.assertEqual(response_dict['unhandled_answers'], [])
self.logout()
class DownloadIntegrationTest(BaseEditorControllerTest):
"""Test handler for exploration and state download."""
SAMPLE_JSON_CONTENT = {
'State A': ("""classifier_model_id: null
content:
audio_translations: {}
html: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: State A
feedback: []
param_changes: []
fallbacks: []
hints: []
id: TextInput
solution: {}
param_changes: []
"""),
'State B': ("""classifier_model_id: null
content:
audio_translations: {}
html: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: State B
feedback: []
param_changes: []
fallbacks: []
hints: []
id: TextInput
solution: {}
param_changes: []
"""),
feconf.DEFAULT_INIT_STATE_NAME: ("""classifier_model_id: null
content:
audio_translations: {}
html: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: %s
feedback: []
param_changes: []
fallbacks: []
hints: []
id: TextInput
solution: {}
param_changes: []
""") % feconf.DEFAULT_INIT_STATE_NAME
}
SAMPLE_STATE_STRING = ("""classifier_model_id: null
content:
audio_translations: {}
html: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: State A
feedback: []
param_changes: []
fallbacks: []
hints: []
id: TextInput
solution: {}
param_changes: []
""")
def test_exploration_download_handler_for_default_exploration(self):
self.login(self.EDITOR_EMAIL)
owner_id = self.get_user_id_from_email(self.EDITOR_EMAIL)
# Create a simple exploration
exp_id = 'eid'
self.save_new_valid_exploration(
exp_id, owner_id,
title='The title for ZIP download handler test!',
category='This is just a test category',
objective='')
exploration = exp_services.get_exploration_by_id(exp_id)
init_state = exploration.states[exploration.init_state_name]
init_interaction = init_state.interaction
init_interaction.default_outcome.dest = exploration.init_state_name
exploration.add_states(['State A', 'State 2', 'State 3'])
exploration.states['State A'].update_interaction_id('TextInput')
exploration.states['State 2'].update_interaction_id('TextInput')
exploration.states['State 3'].update_interaction_id('TextInput')
exploration.rename_state('State 2', 'State B')
exploration.delete_state('State 3')
exp_services._save_exploration( # pylint: disable=protected-access
owner_id, exploration, '', [])
response = self.testapp.get('/create/%s' % exp_id)
# Check download to zip file
# Download to zip file using download handler
download_url = '/createhandler/download/%s' % exp_id
response = self.testapp.get(download_url)
# Check downloaded zip file
self.assertEqual(response.headers['Content-Type'], 'text/plain')
filename = 'oppia-ThetitleforZIPdownloadhandlertest!-v2.zip'
self.assertEqual(response.headers['Content-Disposition'],
'attachment; filename=%s' % str(filename))
zf_saved = zipfile.ZipFile(StringIO.StringIO(response.body))
self.assertEqual(
zf_saved.namelist(),
['The title for ZIP download handler test!.yaml'])
# Load golden zip file
with open(os.path.join(
feconf.TESTS_DATA_DIR,
'oppia-ThetitleforZIPdownloadhandlertest!-v2-gold.zip'),
'rb') as f:
golden_zipfile = f.read()
zf_gold = zipfile.ZipFile(StringIO.StringIO(golden_zipfile))
# Compare saved with golden file
self.assertEqual(
zf_saved.open(
'The title for ZIP download handler test!.yaml').read(),
zf_gold.open(
'The title for ZIP download handler test!.yaml').read())
# Check download to JSON
exploration.update_objective('Test JSON download')
exp_services._save_exploration( # pylint: disable=protected-access
owner_id, exploration, '', [])
# Download to JSON string using download handler
self.maxDiff = None
download_url = (
'/createhandler/download/%s?output_format=%s&width=50' %
(exp_id, feconf.OUTPUT_FORMAT_JSON))
response = self.get_json(download_url)
# Check downloaded dict
self.assertEqual(self.SAMPLE_JSON_CONTENT, response)
self.logout()
def test_state_yaml_handler(self):
self.login(self.EDITOR_EMAIL)
owner_id = self.get_user_id_from_email(self.EDITOR_EMAIL)
# Create a simple exploration
exp_id = 'eid'
self.save_new_valid_exploration(
exp_id, owner_id,
title='The title for states download handler test!',
category='This is just a test category')
exploration = exp_services.get_exploration_by_id(exp_id)
exploration.add_states(['State A', 'State 2', 'State 3'])
exploration.states['State A'].update_interaction_id('TextInput')
response = self.testapp.get(
'%s/%s' % (feconf.EDITOR_URL_PREFIX, exp_id))
csrf_token = self.get_csrf_token_from_response(response)
response = self.post_json('/createhandler/state_yaml/%s' % exp_id, {
'state_dict': exploration.states['State A'].to_dict(),
'width': 50,
}, csrf_token=csrf_token)
self.assertEqual({
'yaml': self.SAMPLE_STATE_STRING
}, response)
self.logout()
class ExplorationDeletionRightsTest(BaseEditorControllerTest):
def test_deletion_rights_for_unpublished_exploration(self):
"""Test rights management for deletion of unpublished explorations."""
unpublished_exp_id = 'unpublished_eid'
exploration = exp_domain.Exploration.create_default_exploration(
unpublished_exp_id)
exp_services.save_new_exploration(self.owner_id, exploration)
rights_manager.assign_role_for_exploration(
self.owner_id, unpublished_exp_id, self.editor_id,
rights_manager.ROLE_EDITOR)
self.login(self.EDITOR_EMAIL)
response = self.testapp.delete(
'/createhandler/data/%s' % unpublished_exp_id, expect_errors=True)
self.assertEqual(response.status_int, 401)
self.logout()
self.login(self.VIEWER_EMAIL)
response = self.testapp.delete(
'/createhandler/data/%s' % unpublished_exp_id, expect_errors=True)
self.assertEqual(response.status_int, 401)
self.logout()
self.login(self.OWNER_EMAIL)
response = self.testapp.delete(
'/createhandler/data/%s' % unpublished_exp_id)
self.assertEqual(response.status_int, 200)
self.logout()
def test_deletion_rights_for_published_exploration(self):
"""Test rights management for deletion of published explorations."""
published_exp_id = 'published_eid'
exploration = exp_domain.Exploration.create_default_exploration(
published_exp_id, title='A title', category='A category')
exp_services.save_new_exploration(self.owner_id, exploration)
rights_manager.assign_role_for_exploration(
self.owner_id, published_exp_id, self.editor_id,
rights_manager.ROLE_EDITOR)
rights_manager.publish_exploration(self.owner_id, published_exp_id)
self.login(self.EDITOR_EMAIL)
response = self.testapp.delete(
'/createhandler/data/%s' % published_exp_id, expect_errors=True)
self.assertEqual(response.status_int, 401)
self.logout()
self.login(self.VIEWER_EMAIL)
response = self.testapp.delete(
'/createhandler/data/%s' % published_exp_id, expect_errors=True)
self.assertEqual(response.status_int, 401)
self.logout()
self.login(self.OWNER_EMAIL)
response = self.testapp.delete(
'/createhandler/data/%s' % published_exp_id, expect_errors=True)
self.assertEqual(response.status_int, 401)
self.logout()
self.login(self.ADMIN_EMAIL)
response = self.testapp.delete(
'/createhandler/data/%s' % published_exp_id)
self.assertEqual(response.status_int, 200)
self.logout()
def test_logging_info_after_deletion(self):
"""Test correctness of logged statements while deleting exploration."""
observed_log_messages = []
def add_logging_info(msg, *_):
# Message logged by function clear_all_pending() in
# oppia_tools/google_appengine_1.9.50/google_appengine/google/
# appengine/ext/ndb/tasklets.py, not to be checked here.
log_from_google_app_engine = 'all_pending: clear %s'
if msg != log_from_google_app_engine:
observed_log_messages.append(msg)
with self.swap(logging, 'info', add_logging_info), self.swap(
logging, 'debug', add_logging_info):
# Checking for non-moderator/non-admin.
exp_id = 'unpublished_eid'
exploration = exp_domain.Exploration.create_default_exploration(
exp_id)
exp_services.save_new_exploration(self.owner_id, exploration)
self.login(self.OWNER_EMAIL)
self.testapp.delete(
'/createhandler/data/%s' % exp_id, expect_errors=True)
# Observed_log_messages[1] is 'Attempting to delete documents
# from index %s, ids: %s' % (index.name, ', '.join(doc_ids)). It
# is logged by function delete_documents_from_index in
# oppia/core/platform/search/gae_search_services.py,
# not to be checked here (same for admin and moderator).
self.assertEqual(len(observed_log_messages), 3)
self.assertEqual(observed_log_messages[0],
'(%s) %s tried to delete exploration %s' %
(feconf.ROLE_ID_EXPLORATION_EDITOR,
self.owner_id, exp_id))
self.assertEqual(observed_log_messages[2],
'(%s) %s deleted exploration %s' %
(feconf.ROLE_ID_EXPLORATION_EDITOR,
self.owner_id, exp_id))
self.logout()
# Checking for admin.
observed_log_messages = []
exp_id = 'unpublished_eid'
exploration = exp_domain.Exploration.create_default_exploration(
exp_id)
exp_services.save_new_exploration(self.admin_id, exploration)
self.login(self.ADMIN_EMAIL)
self.testapp.delete(
'/createhandler/data/%s' % exp_id, expect_errors=True)
self.assertEqual(len(observed_log_messages), 3)
self.assertEqual(observed_log_messages[0],
'(%s) %s tried to delete exploration %s' %
(feconf.ROLE_ID_ADMIN, self.admin_id, exp_id))
self.assertEqual(observed_log_messages[2],
'(%s) %s deleted exploration %s' %
(feconf.ROLE_ID_ADMIN, self.admin_id, exp_id))
self.logout()
# Checking for moderator.
observed_log_messages = []
exp_id = 'unpublished_eid'
exploration = exp_domain.Exploration.create_default_exploration(
exp_id)
exp_services.save_new_exploration(self.moderator_id, exploration)
self.login(self.MODERATOR_EMAIL)
self.testapp.delete(
'/createhandler/data/%s' % exp_id, expect_errors=True)
self.assertEqual(len(observed_log_messages), 3)
self.assertEqual(observed_log_messages[0],
'(%s) %s tried to delete exploration %s' %
(feconf.ROLE_ID_MODERATOR,
self.moderator_id, exp_id))
self.assertEqual(observed_log_messages[2],
'(%s) %s deleted exploration %s' %
(feconf.ROLE_ID_MODERATOR,
self.moderator_id, exp_id))
self.logout()
class VersioningIntegrationTest(BaseEditorControllerTest):
"""Test retrieval of and reverting to old exploration versions."""
EXP_ID = '0'
def setUp(self):
"""Create exploration with two versions"""
super(VersioningIntegrationTest, self).setUp()
exp_services.load_demo(self.EXP_ID)
rights_manager.release_ownership_of_exploration(
feconf.SYSTEM_COMMITTER_ID, self.EXP_ID)
self.login(self.EDITOR_EMAIL)
self.editor_id = self.get_user_id_from_email(self.EDITOR_EMAIL)
# In version 2, change the objective and the initial state content.
exploration = exp_services.get_exploration_by_id(self.EXP_ID)
exp_services.update_exploration(
self.editor_id, self.EXP_ID, [{
'cmd': 'edit_exploration_property',
'property_name': 'objective',
'new_value': 'the objective',
}, {
'cmd': 'edit_state_property',
'property_name': 'content',
'state_name': exploration.init_state_name,
'new_value': {
'html': 'ABC',
'audio_translations': {},
},
}], 'Change objective and init state content')
def test_reverting_to_old_exploration(self):
"""Test reverting to old exploration versions."""
# Open editor page
response = self.testapp.get(
'%s/%s' % (feconf.EDITOR_URL_PREFIX, self.EXP_ID))
csrf_token = self.get_csrf_token_from_response(response)
# May not revert to any version that's not 1
for rev_version in (-1, 0, 2, 3, 4, '1', ()):
response_dict = self.post_json(
'/createhandler/revert/%s' % self.EXP_ID, {
'current_version': 2,
'revert_to_version': rev_version
}, csrf_token, expect_errors=True, expected_status_int=400)
# Check error message
if not isinstance(rev_version, int):
self.assertIn('Expected an integer', response_dict['error'])
else:
self.assertIn('Cannot revert to version',
response_dict['error'])
# Check that exploration is really not reverted to old version
reader_dict = self.get_json(
'%s/%s' % (feconf.EXPLORATION_INIT_URL_PREFIX, self.EXP_ID))
init_state_name = reader_dict['exploration']['init_state_name']
init_state_data = (
reader_dict['exploration']['states'][init_state_name])
init_content = init_state_data['content']['html']
self.assertIn('ABC', init_content)
self.assertNotIn('Hi, welcome to Oppia!', init_content)
# Revert to version 1
rev_version = 1
response_dict = self.post_json(
'/createhandler/revert/%s' % self.EXP_ID, {
'current_version': 2,
'revert_to_version': rev_version
}, csrf_token)
# Check that exploration is really reverted to version 1
reader_dict = self.get_json(
'%s/%s' % (feconf.EXPLORATION_INIT_URL_PREFIX, self.EXP_ID))
init_state_name = reader_dict['exploration']['init_state_name']
init_state_data = (
reader_dict['exploration']['states'][init_state_name])
init_content = init_state_data['content']['html']
self.assertNotIn('ABC', init_content)
self.assertIn('Hi, welcome to Oppia!', init_content)
def test_versioning_for_default_exploration(self):
"""Test retrieval of old exploration versions."""
# The latest version contains 'ABC'.
reader_dict = self.get_json(
'%s/%s' % (feconf.EXPLORATION_INIT_URL_PREFIX, self.EXP_ID))
init_state_name = reader_dict['exploration']['init_state_name']
init_state_data = (
reader_dict['exploration']['states'][init_state_name])
init_content = init_state_data['content']['html']
self.assertIn('ABC', init_content)
self.assertNotIn('Hi, welcome to Oppia!', init_content)
# v1 contains 'Hi, welcome to Oppia!'.
reader_dict = self.get_json(
'%s/%s?v=1' % (feconf.EXPLORATION_INIT_URL_PREFIX, self.EXP_ID))
init_state_name = reader_dict['exploration']['init_state_name']
init_state_data = (
reader_dict['exploration']['states'][init_state_name])
init_content = init_state_data['content']['html']
self.assertIn('Hi, welcome to Oppia!', init_content)
self.assertNotIn('ABC', init_content)
# v2 contains 'ABC'.
reader_dict = self.get_json(
'%s/%s?v=2' % (feconf.EXPLORATION_INIT_URL_PREFIX, self.EXP_ID))
init_state_name = reader_dict['exploration']['init_state_name']
init_state_data = (
reader_dict['exploration']['states'][init_state_name])
init_content = init_state_data['content']['html']
self.assertIn('ABC', init_content)
self.assertNotIn('Hi, welcome to Oppia!', init_content)
# v3 does not exist.
response = self.testapp.get(
'%s/%s?v=3' % (feconf.EXPLORATION_INIT_URL_PREFIX, self.EXP_ID),
expect_errors=True)
self.assertEqual(response.status_int, 404)
class ExplorationEditRightsTest(BaseEditorControllerTest):
"""Test the handling of edit rights for explorations."""
def test_user_banning(self):
"""Test that banned users are banned."""
exp_id = '0'
exp_services.load_demo(exp_id)
rights_manager.release_ownership_of_exploration(
feconf.SYSTEM_COMMITTER_ID, exp_id)
# Sign-up new editors Joe and Sandra.
self.signup('joe@example.com', 'joe')
self.signup('sandra@example.com', 'sandra')
# Joe logs in.
self.login('joe@example.com')
response = self.testapp.get(feconf.LIBRARY_INDEX_URL)
self.assertEqual(response.status_int, 200)
response = self.testapp.get('/create/%s' % exp_id)
self.assertEqual(response.status_int, 200)
self.assert_can_edit(response.body)
# Ban joe.
self.set_banned_users(['joe'])
# Test that Joe is banned. (He can still access the library page.)
response = self.testapp.get(
feconf.LIBRARY_INDEX_URL, expect_errors=True)
self.assertEqual(response.status_int, 200)
response = self.testapp.get('/create/%s' % exp_id, expect_errors=True)
self.assertEqual(response.status_int, 200)
self.assert_cannot_edit(response.body)
# Joe logs out.
self.logout()
# Sandra logs in and is unaffected.
self.login('sandra@example.com')
response = self.testapp.get('/create/%s' % exp_id)
self.assertEqual(response.status_int, 200)
self.assert_can_edit(response.body)
self.logout()
class ExplorationRightsIntegrationTest(BaseEditorControllerTest):
"""Test the handler for managing exploration editing rights."""
COLLABORATOR_EMAIL = 'collaborator@example.com'
COLLABORATOR_USERNAME = 'collab'
COLLABORATOR2_EMAIL = 'collaborator2@example.com'
COLLABORATOR2_USERNAME = 'collab2'
COLLABORATOR3_EMAIL = 'collaborator3@example.com'
COLLABORATOR3_USERNAME = 'collab3'
def test_exploration_rights_handler(self):
"""Test exploration rights handler."""
# Create several users
self.signup(
self.COLLABORATOR_EMAIL, username=self.COLLABORATOR_USERNAME)
self.signup(
self.COLLABORATOR2_EMAIL, username=self.COLLABORATOR2_USERNAME)
self.signup(
self.COLLABORATOR3_EMAIL, username=self.COLLABORATOR3_USERNAME)
# Owner creates exploration
self.login(self.OWNER_EMAIL)
exp_id = 'eid'
self.save_new_valid_exploration(
exp_id, self.owner_id, title='Title for rights handler test!',
category='My category')
exploration = exp_services.get_exploration_by_id(exp_id)
exploration.add_states(['State A', 'State 2', 'State 3'])
exploration.states['State A'].update_interaction_id('TextInput')
exploration.states['State 2'].update_interaction_id('TextInput')
exploration.states['State 3'].update_interaction_id('TextInput')
response = self.testapp.get(
'%s/%s' % (feconf.EDITOR_URL_PREFIX, exp_id))
csrf_token = self.get_csrf_token_from_response(response)
# Owner adds rights for other users
rights_url = '%s/%s' % (feconf.EXPLORATION_RIGHTS_PREFIX, exp_id)
self.put_json(
rights_url, {
'version': exploration.version,
'new_member_username': self.VIEWER_USERNAME,
'new_member_role': rights_manager.ROLE_VIEWER
}, csrf_token)
self.put_json(
rights_url, {
'version': exploration.version,
'new_member_username': self.COLLABORATOR_USERNAME,
'new_member_role': rights_manager.ROLE_EDITOR
}, csrf_token)
self.put_json(
rights_url, {
'version': exploration.version,
'new_member_username': self.COLLABORATOR2_USERNAME,
'new_member_role': rights_manager.ROLE_EDITOR
}, csrf_token)
self.logout()
# Check that viewer can access editor page but cannot edit.
self.login(self.VIEWER_EMAIL)
response = self.testapp.get('/create/%s' % exp_id, expect_errors=True)
self.assertEqual(response.status_int, 200)
self.assert_cannot_edit(response.body)
self.logout()
# Check that collaborator can access editor page and can edit.
self.login(self.COLLABORATOR_EMAIL)
response = self.testapp.get('/create/%s' % exp_id)
self.assertEqual(response.status_int, 200)
self.assert_can_edit(response.body)
csrf_token = self.get_csrf_token_from_response(response)
# Check that collaborator can add a new state called 'State 4'
add_url = '%s/%s' % (feconf.EXPLORATION_DATA_PREFIX, exp_id)
response_dict = self.put_json(
add_url,
{
'version': exploration.version,
'commit_message': 'Added State 4',
'change_list': [{
'cmd': 'add_state',
'state_name': 'State 4'
}, {
'cmd': 'edit_state_property',
'state_name': 'State 4',
'property_name': 'widget_id',
'new_value': 'TextInput',
}]
},
csrf_token=csrf_token,
expected_status_int=200
)
self.assertIn('State 4', response_dict['states'])
# Check that collaborator cannot add new members
exploration = exp_services.get_exploration_by_id(exp_id)
rights_url = '%s/%s' % (feconf.EXPLORATION_RIGHTS_PREFIX, exp_id)
response_dict = self.put_json(
rights_url, {
'version': exploration.version,
'new_member_username': self.COLLABORATOR3_USERNAME,
'new_member_role': rights_manager.ROLE_EDITOR,
}, csrf_token, expect_errors=True, expected_status_int=401)
self.assertEqual(response_dict['code'], 401)
self.logout()
# Check that collaborator2 can access editor page and can edit.
self.login(self.COLLABORATOR2_EMAIL)
response = self.testapp.get('/create/%s' % exp_id)
self.assertEqual(response.status_int, 200)
self.assert_can_edit(response.body)
csrf_token = self.get_csrf_token_from_response(response)
# Check that collaborator2 can add a new state called 'State 5'
add_url = '%s/%s' % (feconf.EXPLORATION_DATA_PREFIX, exp_id)
response_dict = self.put_json(
add_url,
{
'version': exploration.version,
'commit_message': 'Added State 5',
'change_list': [{
'cmd': 'add_state',
'state_name': 'State 5'
}, {
'cmd': 'edit_state_property',
'state_name': 'State 5',
'property_name': 'widget_id',
'new_value': 'TextInput',
}]
},
csrf_token=csrf_token,
expected_status_int=200
)
self.assertIn('State 5', response_dict['states'])
# Check that collaborator2 cannot add new members.
exploration = exp_services.get_exploration_by_id(exp_id)
rights_url = '%s/%s' % (feconf.EXPLORATION_RIGHTS_PREFIX, exp_id)
response_dict = self.put_json(
rights_url, {
'version': exploration.version,
'new_member_username': self.COLLABORATOR3_USERNAME,
'new_member_role': rights_manager.ROLE_EDITOR,
}, csrf_token, expect_errors=True, expected_status_int=401)
self.assertEqual(response_dict['code'], 401)
self.logout()
class UserExplorationEmailsIntegrationTest(BaseEditorControllerTest):
"""Test the handler for user email notification preferences."""
def test_user_exploration_emails_handler(self):
"""Test user exploration emails handler."""
# Owner creates exploration
self.login(self.OWNER_EMAIL)
exp_id = 'eid'
self.save_new_valid_exploration(
exp_id, self.owner_id, title='Title for emails handler test!',
category='Category')
exploration = exp_services.get_exploration_by_id(exp_id)
response = self.testapp.get(
'%s/%s' % (feconf.EDITOR_URL_PREFIX, exp_id))
csrf_token = self.get_csrf_token_from_response(response)
exp_email_preferences = (
user_services.get_email_preferences_for_exploration(
self.owner_id, exp_id))
self.assertFalse(exp_email_preferences.mute_feedback_notifications)
self.assertFalse(exp_email_preferences.mute_suggestion_notifications)
# Owner changes email preferences
emails_url = '%s/%s' % (feconf.USER_EXPLORATION_EMAILS_PREFIX, exp_id)
self.put_json(
emails_url, {
'version': exploration.version,
'mute': True,
'message_type': 'feedback'
}, csrf_token)
exp_email_preferences = (
user_services.get_email_preferences_for_exploration(
self.owner_id, exp_id))
self.assertTrue(exp_email_preferences.mute_feedback_notifications)
self.assertFalse(exp_email_preferences.mute_suggestion_notifications)
self.put_json(
emails_url, {
'version': exploration.version,
'mute': True,
'message_type': 'suggestion'
}, csrf_token)
self.put_json(
emails_url, {
'version': exploration.version,
'mute': False,
'message_type': 'feedback'
}, csrf_token)
exp_email_preferences = (
user_services.get_email_preferences_for_exploration(
self.owner_id, exp_id))
self.assertFalse(exp_email_preferences.mute_feedback_notifications)
self.assertTrue(exp_email_preferences.mute_suggestion_notifications)
self.logout()
class ModeratorEmailsTest(test_utils.GenericTestBase):
"""Integration test for post-moderator action emails."""
EXP_ID = 'eid'
def setUp(self):
super(ModeratorEmailsTest, self).setUp()
self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME)
self.editor_id = self.get_user_id_from_email(self.EDITOR_EMAIL)
self.signup(self.MODERATOR_EMAIL, self.MODERATOR_USERNAME)
self.set_moderators([self.MODERATOR_USERNAME])
# The editor publishes an exploration.
self.save_new_valid_exploration(
self.EXP_ID, self.editor_id, title='My Exploration',
end_state_name='END')
rights_manager.publish_exploration(self.editor_id, self.EXP_ID)
# Set the default email config.
self.signup(self.ADMIN_EMAIL, self.ADMIN_USERNAME)
self.admin_id = self.get_user_id_from_email(self.ADMIN_EMAIL)
config_services.set_property(
self.admin_id, 'publicize_exploration_email_html_body',
'Default publicization email body')
config_services.set_property(
self.admin_id, 'unpublish_exploration_email_html_body',
'Default unpublishing email body')
def test_error_cases_for_email_sending(self):
with self.swap(
feconf, 'REQUIRE_EMAIL_ON_MODERATOR_ACTION', True
), self.swap(
feconf, 'CAN_SEND_EMAILS', False):
# Log in as a moderator.
self.login(self.MODERATOR_EMAIL)
# Go to the exploration editor page.
response = self.testapp.get('/create/%s' % self.EXP_ID)
self.assertEqual(response.status_int, 200)
csrf_token = self.get_csrf_token_from_response(response)
# Submit an invalid action. This should cause an error.
response_dict = self.put_json(
'/createhandler/moderatorrights/%s' % self.EXP_ID, {
'action': 'random_action',
'email_body': None,
'version': 1,
}, csrf_token, expect_errors=True, expected_status_int=400)
self.assertEqual(
response_dict['error'], 'Invalid moderator action.')
# Try to publicize the exploration without an email body. This
# should cause an error.
response_dict = self.put_json(
'/createhandler/moderatorrights/%s' % self.EXP_ID, {
'action': feconf.MODERATOR_ACTION_PUBLICIZE_EXPLORATION,
'email_body': None,
'version': 1,
}, csrf_token, expect_errors=True, expected_status_int=400)
self.assertIn(
'Moderator actions should include an email',
response_dict['error'])
response_dict = self.put_json(
'/createhandler/moderatorrights/%s' % self.EXP_ID, {
'action': feconf.MODERATOR_ACTION_PUBLICIZE_EXPLORATION,
'email_body': '',
'version': 1,
}, csrf_token, expect_errors=True, expected_status_int=400)
self.assertIn(
'Moderator actions should include an email',
response_dict['error'])
# Try to publicize the exploration even if the relevant feconf
# flags are not set. This should cause a system error.
valid_payload = {
'action': feconf.MODERATOR_ACTION_PUBLICIZE_EXPLORATION,
'email_body': 'Your exploration is featured!',
'version': 1,
}
self.put_json(
'/createhandler/moderatorrights/%s' % self.EXP_ID,
valid_payload, csrf_token, expect_errors=True,
expected_status_int=500)
with self.swap(feconf, 'CAN_SEND_EMAILS', True):
# Now the email gets sent with no error.
self.put_json(
'/createhandler/moderatorrights/%s' % self.EXP_ID,
valid_payload, csrf_token)
# Log out.
self.logout()
def test_email_is_sent_correctly_when_publicizing(self):
with self.swap(
feconf, 'REQUIRE_EMAIL_ON_MODERATOR_ACTION', True
), self.swap(
feconf, 'CAN_SEND_EMAILS', True):
# Log in as a moderator.
self.login(self.MODERATOR_EMAIL)
# Go to the exploration editor page.
response = self.testapp.get('/create/%s' % self.EXP_ID)
self.assertEqual(response.status_int, 200)
csrf_token = self.get_csrf_token_from_response(response)
new_email_body = 'Your exploration is featured!'
valid_payload = {
'action': feconf.MODERATOR_ACTION_PUBLICIZE_EXPLORATION,
'email_body': new_email_body,
'version': 1,
}
self.put_json(
'/createhandler/moderatorrights/%s' % self.EXP_ID,
valid_payload, csrf_token)
# Check that an email was sent with the correct content.
messages = self.mail_stub.get_sent_messages(
to=self.EDITOR_EMAIL)
self.assertEqual(1, len(messages))
self.assertEqual(
messages[0].sender,
'Site Admin <%s>' % feconf.SYSTEM_EMAIL_ADDRESS)
self.assertEqual(messages[0].to, self.EDITOR_EMAIL)
self.assertFalse(hasattr(messages[0], 'cc'))
self.assertEqual(messages[0].bcc, feconf.ADMIN_EMAIL_ADDRESS)
self.assertEqual(
messages[0].subject,
'Your Oppia exploration "My Exploration" has been featured!')
self.assertEqual(messages[0].body.decode(), (
'Hi %s,\n\n'
'%s\n\n'
'Thanks!\n'
'%s (Oppia moderator)\n\n'
'You can change your email preferences via the Preferences '
'page.' % (
self.EDITOR_USERNAME,
new_email_body,
self.MODERATOR_USERNAME)))
self.assertEqual(messages[0].html.decode(), (
'Hi %s,<br><br>'
'%s<br><br>'
'Thanks!<br>'
'%s (Oppia moderator)<br><br>'
'You can change your email preferences via the '
'<a href="https://www.example.com">Preferences</a> page.' % (
self.EDITOR_USERNAME,
new_email_body,
self.MODERATOR_USERNAME)))
self.logout()
def test_email_is_sent_correctly_when_unpublishing(self):
with self.swap(
feconf, 'REQUIRE_EMAIL_ON_MODERATOR_ACTION', True
), self.swap(
feconf, 'CAN_SEND_EMAILS', True):
# Log in as a moderator.
self.login(self.MODERATOR_EMAIL)
# Go to the exploration editor page.
response = self.testapp.get('/create/%s' % self.EXP_ID)
self.assertEqual(response.status_int, 200)
csrf_token = self.get_csrf_token_from_response(response)
new_email_body = 'Your exploration is unpublished :('
valid_payload = {
'action': feconf.MODERATOR_ACTION_UNPUBLISH_EXPLORATION,
'email_body': new_email_body,
'version': 1,
}
self.put_json(
'/createhandler/moderatorrights/%s' % self.EXP_ID,
valid_payload, csrf_token)
# Check that an email was sent with the correct content.
messages = self.mail_stub.get_sent_messages(
to=self.EDITOR_EMAIL)
self.assertEqual(1, len(messages))
self.assertEqual(
messages[0].sender,
'Site Admin <%s>' % feconf.SYSTEM_EMAIL_ADDRESS)
self.assertEqual(messages[0].to, self.EDITOR_EMAIL)
self.assertFalse(hasattr(messages[0], 'cc'))
self.assertEqual(messages[0].bcc, feconf.ADMIN_EMAIL_ADDRESS)
self.assertEqual(
messages[0].subject,
'Your Oppia exploration "My Exploration" has been unpublished')
self.assertEqual(messages[0].body.decode(), (
'Hi %s,\n\n'
'%s\n\n'
'Thanks!\n'
'%s (Oppia moderator)\n\n'
'You can change your email preferences via the Preferences '
'page.' % (
self.EDITOR_USERNAME,
new_email_body,
self.MODERATOR_USERNAME)))
self.assertEqual(messages[0].html.decode(), (
'Hi %s,<br><br>'
'%s<br><br>'
'Thanks!<br>'
'%s (Oppia moderator)<br><br>'
'You can change your email preferences via the '
'<a href="https://www.example.com">Preferences</a> page.' % (
self.EDITOR_USERNAME,
new_email_body,
self.MODERATOR_USERNAME)))
self.logout()
def test_email_functionality_cannot_be_used_by_non_moderators(self):
with self.swap(
feconf, 'REQUIRE_EMAIL_ON_MODERATOR_ACTION', True
), self.swap(
feconf, 'CAN_SEND_EMAILS', True):
# Log in as a non-moderator.
self.login(self.EDITOR_EMAIL)
# Go to the exploration editor page.
response = self.testapp.get('/create/%s' % self.EXP_ID)
self.assertEqual(response.status_int, 200)
csrf_token = self.get_csrf_token_from_response(response)
new_email_body = 'Your exploration is unpublished :('
valid_payload = {
'action': feconf.MODERATOR_ACTION_UNPUBLISH_EXPLORATION,
'email_body': new_email_body,
'version': 1,
}
# The user should receive an 'unauthorized user' error.
self.put_json(
'/createhandler/moderatorrights/%s' % self.EXP_ID,
valid_payload, csrf_token, expect_errors=True,
expected_status_int=401)
self.logout()
class EditorAutosaveTest(BaseEditorControllerTest):
"""Test the handling of editor autosave actions."""
EXP_ID1 = '1'
EXP_ID2 = '2'
EXP_ID3 = '3'
# 30 days into the future.
NEWER_DATETIME = datetime.datetime.utcnow() + datetime.timedelta(30)
# A date in the past.
OLDER_DATETIME = datetime.datetime.strptime('2015-03-16', '%Y-%m-%d')
DRAFT_CHANGELIST = [{
'cmd': 'edit_exploration_property',
'property_name': 'title',
'new_value': 'Updated title'}]
NEW_CHANGELIST = [{
'cmd': 'edit_exploration_property',
'property_name': 'title',
'new_value': 'New title'}]
INVALID_CHANGELIST = [{
'cmd': 'edit_exploration_property',
'property_name': 'title',
'new_value': 1}]
def _create_explorations_for_tests(self):
self.save_new_valid_exploration(self.EXP_ID1, self.owner_id)
exploration = exp_services.get_exploration_by_id(self.EXP_ID1)
exploration.add_states(['State A'])
exploration.states['State A'].update_interaction_id('TextInput')
self.save_new_valid_exploration(self.EXP_ID2, self.owner_id)
self.save_new_valid_exploration(self.EXP_ID3, self.owner_id)
def _create_exp_user_data_model_objects_for_tests(self):
# Explorations with draft set.
user_models.ExplorationUserDataModel(
id='%s.%s' % (self.owner_id, self.EXP_ID1), user_id=self.owner_id,
exploration_id=self.EXP_ID1,
draft_change_list=self.DRAFT_CHANGELIST,
draft_change_list_last_updated=self.NEWER_DATETIME,
draft_change_list_exp_version=1,
draft_change_list_id=1).put()
user_models.ExplorationUserDataModel(
id='%s.%s' % (self.owner_id, self.EXP_ID2), user_id=self.owner_id,
exploration_id=self.EXP_ID2,
draft_change_list=self.DRAFT_CHANGELIST,
draft_change_list_last_updated=self.OLDER_DATETIME,
draft_change_list_exp_version=1,
draft_change_list_id=1).put()
# Exploration with no draft.
user_models.ExplorationUserDataModel(
id='%s.%s' % (self.owner_id, self.EXP_ID3), user_id=self.owner_id,
exploration_id=self.EXP_ID3).put()
def setUp(self):
super(EditorAutosaveTest, self).setUp()
self.login(self.OWNER_EMAIL)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
self._create_explorations_for_tests()
self._create_exp_user_data_model_objects_for_tests()
# Generate CSRF token.
response = self.testapp.get('/create/%s' % self.EXP_ID1)
self.csrf_token = self.get_csrf_token_from_response(response)
def test_exploration_loaded_with_draft_applied(self):
response = self.get_json(
'/createhandler/data/%s' % self.EXP_ID2, {'apply_draft': True})
# Title updated because change list was applied.
self.assertEqual(response['title'], 'Updated title')
self.assertTrue(response['is_version_of_draft_valid'])
self.assertEqual(response['draft_change_list_id'], 1)
# Draft changes passed to UI.
self.assertEqual(response['draft_changes'], self.DRAFT_CHANGELIST)
def test_exploration_loaded_without_draft_when_draft_version_invalid(self):
exp_user_data = user_models.ExplorationUserDataModel.get_by_id(
'%s.%s' % (self.owner_id, self.EXP_ID2))
exp_user_data.draft_change_list_exp_version = 20
exp_user_data.put()
response = self.get_json(
'/createhandler/data/%s' % self.EXP_ID2, {'apply_draft': True})
# Title not updated because change list not applied.
self.assertEqual(response['title'], 'A title')
self.assertFalse(response['is_version_of_draft_valid'])
self.assertEqual(response['draft_change_list_id'], 1)
# Draft changes passed to UI even when version is invalid.
self.assertEqual(response['draft_changes'], self.DRAFT_CHANGELIST)
def test_exploration_loaded_without_draft_as_draft_does_not_exist(self):
response = self.get_json(
'/createhandler/data/%s' % self.EXP_ID3, {'apply_draft': True})
# Title not updated because change list not applied.
self.assertEqual(response['title'], 'A title')
self.assertIsNone(response['is_version_of_draft_valid'])
self.assertEqual(response['draft_change_list_id'], 0)
# Draft changes None.
self.assertIsNone(response['draft_changes'])
def test_draft_not_updated_because_newer_draft_exists(self):
payload = {
'change_list': self.NEW_CHANGELIST,
'version': 1,
}
response = self.put_json(
'/createhandler/autosave_draft/%s' % self.EXP_ID1, payload,
self.csrf_token)
# Check that draft change list hasn't been updated.
exp_user_data = user_models.ExplorationUserDataModel.get_by_id(
'%s.%s' % (self.owner_id, self.EXP_ID1))
self.assertEqual(
exp_user_data.draft_change_list, self.DRAFT_CHANGELIST)
self.assertTrue(response['is_version_of_draft_valid'])
self.assertEqual(response['draft_change_list_id'], 1)
def test_draft_not_updated_validation_error(self):
self.put_json(
'/createhandler/autosave_draft/%s' % self.EXP_ID2, {
'change_list': self.DRAFT_CHANGELIST,
'version': 1,
}, self.csrf_token)
response = self.put_json(
'/createhandler/autosave_draft/%s' % self.EXP_ID2, {
'change_list': self.INVALID_CHANGELIST,
'version': 2,
}, self.csrf_token, expect_errors=True, expected_status_int=400)
exp_user_data = user_models.ExplorationUserDataModel.get_by_id(
'%s.%s' % (self.owner_id, self.EXP_ID2))
self.assertEqual(
exp_user_data.draft_change_list, self.DRAFT_CHANGELIST)
#id is incremented the first time but not the second
self.assertEqual(exp_user_data.draft_change_list_id, 2)
self.assertEqual(
response, {'code': 400,
'error': 'Expected title to be a string, received 1'})
def test_draft_updated_version_valid(self):
payload = {
'change_list': self.NEW_CHANGELIST,
'version': 1,
}
response = self.put_json(
'/createhandler/autosave_draft/%s' % self.EXP_ID2, payload,
self.csrf_token)
exp_user_data = user_models.ExplorationUserDataModel.get_by_id(
'%s.%s' % (self.owner_id, self.EXP_ID2))
self.assertEqual(exp_user_data.draft_change_list, self.NEW_CHANGELIST)
self.assertEqual(exp_user_data.draft_change_list_exp_version, 1)
self.assertTrue(response['is_version_of_draft_valid'])
self.assertEqual(response['draft_change_list_id'], 2)
def test_draft_updated_version_invalid(self):
payload = {
'change_list': self.NEW_CHANGELIST,
'version': 10,
}
response = self.put_json(
'/createhandler/autosave_draft/%s' % self.EXP_ID2, payload,
self.csrf_token)
exp_user_data = user_models.ExplorationUserDataModel.get_by_id(
'%s.%s' % (self.owner_id, self.EXP_ID2))
self.assertEqual(exp_user_data.draft_change_list, self.NEW_CHANGELIST)
self.assertEqual(exp_user_data.draft_change_list_exp_version, 10)
self.assertFalse(response['is_version_of_draft_valid'])
self.assertEqual(response['draft_change_list_id'], 2)
def test_discard_draft(self):
self.post_json(
'/createhandler/autosave_draft/%s' % self.EXP_ID2, {},
self.csrf_token)
exp_user_data = user_models.ExplorationUserDataModel.get_by_id(
'%s.%s' % (self.owner_id, self.EXP_ID2))
self.assertIsNone(exp_user_data.draft_change_list)
self.assertIsNone(exp_user_data.draft_change_list_last_updated)
self.assertIsNone(exp_user_data.draft_change_list_exp_version)
| |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.predictors.convolutional_keras_box_predictor."""
import numpy as np
import tensorflow as tf
from google.protobuf import text_format
from object_detection.builders import box_predictor_builder
from object_detection.builders import hyperparams_builder
from object_detection.predictors import convolutional_keras_box_predictor as box_predictor
from object_detection.predictors.heads import keras_box_head
from object_detection.predictors.heads import keras_class_head
from object_detection.predictors.heads import keras_mask_head
from object_detection.protos import hyperparams_pb2
from object_detection.utils import test_case
class ConvolutionalKerasBoxPredictorTest(test_case.TestCase):
def _build_conv_hyperparams(self):
conv_hyperparams = hyperparams_pb2.Hyperparams()
conv_hyperparams_text_proto = """
activation: RELU_6
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
"""
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams)
return hyperparams_builder.KerasLayerHyperparams(conv_hyperparams)
def test_get_boxes_for_five_aspect_ratios_per_location(self):
def graph_fn(image_features):
conv_box_predictor = (
box_predictor_builder.build_convolutional_keras_box_predictor(
is_training=False,
num_classes=0,
conv_hyperparams=self._build_conv_hyperparams(),
freeze_batchnorm=False,
inplace_batchnorm_update=False,
num_predictions_per_location_list=[5],
min_depth=0,
max_depth=32,
num_layers_before_predictor=1,
use_dropout=True,
dropout_keep_prob=0.8,
kernel_size=1,
box_code_size=4
))
box_predictions = conv_box_predictor([image_features])
box_encodings = tf.concat(
box_predictions[box_predictor.BOX_ENCODINGS], axis=1)
objectness_predictions = tf.concat(
box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND],
axis=1)
return (box_encodings, objectness_predictions)
image_features = np.random.rand(4, 8, 8, 64).astype(np.float32)
(box_encodings, objectness_predictions) = self.execute(graph_fn,
[image_features])
self.assertAllEqual(box_encodings.shape, [4, 320, 1, 4])
self.assertAllEqual(objectness_predictions.shape, [4, 320, 1])
def test_get_boxes_for_one_aspect_ratio_per_location(self):
def graph_fn(image_features):
conv_box_predictor = (
box_predictor_builder.build_convolutional_keras_box_predictor(
is_training=False,
num_classes=0,
conv_hyperparams=self._build_conv_hyperparams(),
freeze_batchnorm=False,
inplace_batchnorm_update=False,
num_predictions_per_location_list=[1],
min_depth=0,
max_depth=32,
num_layers_before_predictor=1,
use_dropout=True,
dropout_keep_prob=0.8,
kernel_size=1,
box_code_size=4
))
box_predictions = conv_box_predictor([image_features])
box_encodings = tf.concat(
box_predictions[box_predictor.BOX_ENCODINGS], axis=1)
objectness_predictions = tf.concat(box_predictions[
box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], axis=1)
return (box_encodings, objectness_predictions)
image_features = np.random.rand(4, 8, 8, 64).astype(np.float32)
(box_encodings, objectness_predictions) = self.execute(graph_fn,
[image_features])
self.assertAllEqual(box_encodings.shape, [4, 64, 1, 4])
self.assertAllEqual(objectness_predictions.shape, [4, 64, 1])
def test_get_multi_class_predictions_for_five_aspect_ratios_per_location(
self):
num_classes_without_background = 6
image_features = np.random.rand(4, 8, 8, 64).astype(np.float32)
def graph_fn(image_features):
conv_box_predictor = (
box_predictor_builder.build_convolutional_keras_box_predictor(
is_training=False,
num_classes=num_classes_without_background,
conv_hyperparams=self._build_conv_hyperparams(),
freeze_batchnorm=False,
inplace_batchnorm_update=False,
num_predictions_per_location_list=[5],
min_depth=0,
max_depth=32,
num_layers_before_predictor=1,
use_dropout=True,
dropout_keep_prob=0.8,
kernel_size=1,
box_code_size=4
))
box_predictions = conv_box_predictor([image_features])
box_encodings = tf.concat(
box_predictions[box_predictor.BOX_ENCODINGS], axis=1)
class_predictions_with_background = tf.concat(
box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND],
axis=1)
return (box_encodings, class_predictions_with_background)
(box_encodings,
class_predictions_with_background) = self.execute(graph_fn,
[image_features])
self.assertAllEqual(box_encodings.shape, [4, 320, 1, 4])
self.assertAllEqual(class_predictions_with_background.shape,
[4, 320, num_classes_without_background+1])
def test_get_predictions_with_feature_maps_of_dynamic_shape(
self):
image_features = tf.placeholder(dtype=tf.float32, shape=[4, None, None, 64])
conv_box_predictor = (
box_predictor_builder.build_convolutional_keras_box_predictor(
is_training=False,
num_classes=0,
conv_hyperparams=self._build_conv_hyperparams(),
freeze_batchnorm=False,
inplace_batchnorm_update=False,
num_predictions_per_location_list=[5],
min_depth=0,
max_depth=32,
num_layers_before_predictor=1,
use_dropout=True,
dropout_keep_prob=0.8,
kernel_size=1,
box_code_size=4
))
box_predictions = conv_box_predictor([image_features])
box_encodings = tf.concat(
box_predictions[box_predictor.BOX_ENCODINGS], axis=1)
objectness_predictions = tf.concat(
box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND],
axis=1)
init_op = tf.global_variables_initializer()
resolution = 32
expected_num_anchors = resolution*resolution*5
with self.test_session() as sess:
sess.run(init_op)
(box_encodings_shape,
objectness_predictions_shape) = sess.run(
[tf.shape(box_encodings), tf.shape(objectness_predictions)],
feed_dict={image_features:
np.random.rand(4, resolution, resolution, 64)})
actual_variable_set = set(
[var.op.name for var in tf.trainable_variables()])
self.assertAllEqual(box_encodings_shape, [4, expected_num_anchors, 1, 4])
self.assertAllEqual(objectness_predictions_shape,
[4, expected_num_anchors, 1])
expected_variable_set = set([
'BoxPredictor/SharedConvolutions_0/Conv2d_0_1x1_32/bias',
'BoxPredictor/SharedConvolutions_0/Conv2d_0_1x1_32/kernel',
'BoxPredictor/ConvolutionalBoxHead_0/BoxEncodingPredictor/bias',
'BoxPredictor/ConvolutionalBoxHead_0/BoxEncodingPredictor/kernel',
'BoxPredictor/ConvolutionalClassHead_0/ClassPredictor/bias',
'BoxPredictor/ConvolutionalClassHead_0/ClassPredictor/kernel'])
self.assertEqual(expected_variable_set, actual_variable_set)
self.assertEqual(conv_box_predictor._sorted_head_names,
['box_encodings', 'class_predictions_with_background'])
def test_use_depthwise_convolution(self):
image_features = tf.placeholder(dtype=tf.float32, shape=[4, None, None, 64])
conv_box_predictor = (
box_predictor_builder.build_convolutional_keras_box_predictor(
is_training=False,
num_classes=0,
conv_hyperparams=self._build_conv_hyperparams(),
freeze_batchnorm=False,
inplace_batchnorm_update=False,
num_predictions_per_location_list=[5],
min_depth=0,
max_depth=32,
num_layers_before_predictor=1,
use_dropout=True,
dropout_keep_prob=0.8,
kernel_size=3,
box_code_size=4,
use_depthwise=True
))
box_predictions = conv_box_predictor([image_features])
box_encodings = tf.concat(
box_predictions[box_predictor.BOX_ENCODINGS], axis=1)
objectness_predictions = tf.concat(
box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND],
axis=1)
init_op = tf.global_variables_initializer()
resolution = 32
expected_num_anchors = resolution*resolution*5
with self.test_session() as sess:
sess.run(init_op)
(box_encodings_shape,
objectness_predictions_shape) = sess.run(
[tf.shape(box_encodings), tf.shape(objectness_predictions)],
feed_dict={image_features:
np.random.rand(4, resolution, resolution, 64)})
actual_variable_set = set(
[var.op.name for var in tf.trainable_variables()])
self.assertAllEqual(box_encodings_shape, [4, expected_num_anchors, 1, 4])
self.assertAllEqual(objectness_predictions_shape,
[4, expected_num_anchors, 1])
expected_variable_set = set([
'BoxPredictor/SharedConvolutions_0/Conv2d_0_1x1_32/bias',
'BoxPredictor/SharedConvolutions_0/Conv2d_0_1x1_32/kernel',
'BoxPredictor/ConvolutionalBoxHead_0/BoxEncodingPredictor_depthwise/'
'bias',
'BoxPredictor/ConvolutionalBoxHead_0/BoxEncodingPredictor_depthwise/'
'depthwise_kernel',
'BoxPredictor/ConvolutionalBoxHead_0/BoxEncodingPredictor/bias',
'BoxPredictor/ConvolutionalBoxHead_0/BoxEncodingPredictor/kernel',
'BoxPredictor/ConvolutionalClassHead_0/ClassPredictor_depthwise/bias',
'BoxPredictor/ConvolutionalClassHead_0/ClassPredictor_depthwise/'
'depthwise_kernel',
'BoxPredictor/ConvolutionalClassHead_0/ClassPredictor/bias',
'BoxPredictor/ConvolutionalClassHead_0/ClassPredictor/kernel'])
self.assertEqual(expected_variable_set, actual_variable_set)
self.assertEqual(conv_box_predictor._sorted_head_names,
['box_encodings', 'class_predictions_with_background'])
class WeightSharedConvolutionalKerasBoxPredictorTest(test_case.TestCase):
def _build_conv_hyperparams(self, add_batch_norm=True):
conv_hyperparams = hyperparams_pb2.Hyperparams()
conv_hyperparams_text_proto = """
activation: RELU_6
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
stddev: 0.01
mean: 0.0
}
}
"""
if add_batch_norm:
batch_norm_proto = """
batch_norm {
train: true,
}
"""
conv_hyperparams_text_proto += batch_norm_proto
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams)
return hyperparams_builder.KerasLayerHyperparams(conv_hyperparams)
# pylint: disable=line-too-long
def test_get_boxes_for_five_aspect_ratios_per_location(self):
def graph_fn(image_features):
conv_box_predictor = (
box_predictor_builder.build_weight_shared_convolutional_keras_box_predictor(
is_training=False,
num_classes=0,
conv_hyperparams=self._build_conv_hyperparams(),
freeze_batchnorm=False,
inplace_batchnorm_update=False,
num_predictions_per_location_list=[5],
depth=32,
num_layers_before_predictor=1,
box_code_size=4))
box_predictions = conv_box_predictor([image_features])
box_encodings = tf.concat(
box_predictions[box_predictor.BOX_ENCODINGS], axis=1)
objectness_predictions = tf.concat(box_predictions[
box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], axis=1)
return (box_encodings, objectness_predictions)
image_features = np.random.rand(4, 8, 8, 64).astype(np.float32)
(box_encodings, objectness_predictions) = self.execute(
graph_fn, [image_features])
self.assertAllEqual(box_encodings.shape, [4, 320, 4])
self.assertAllEqual(objectness_predictions.shape, [4, 320, 1])
def test_bias_predictions_to_background_with_sigmoid_score_conversion(self):
def graph_fn(image_features):
conv_box_predictor = (
box_predictor_builder.build_weight_shared_convolutional_keras_box_predictor(
is_training=True,
num_classes=2,
conv_hyperparams=self._build_conv_hyperparams(),
freeze_batchnorm=False,
inplace_batchnorm_update=False,
num_predictions_per_location_list=[5],
depth=32,
num_layers_before_predictor=1,
class_prediction_bias_init=-4.6,
box_code_size=4))
box_predictions = conv_box_predictor([image_features])
class_predictions = tf.concat(box_predictions[
box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], axis=1)
return (tf.nn.sigmoid(class_predictions),)
image_features = np.random.rand(4, 8, 8, 64).astype(np.float32)
class_predictions = self.execute(graph_fn, [image_features])
self.assertAlmostEqual(np.mean(class_predictions), 0.01, places=3)
def test_get_multi_class_predictions_for_five_aspect_ratios_per_location(
self):
num_classes_without_background = 6
def graph_fn(image_features):
conv_box_predictor = (
box_predictor_builder.build_weight_shared_convolutional_keras_box_predictor(
is_training=False,
num_classes=num_classes_without_background,
conv_hyperparams=self._build_conv_hyperparams(),
freeze_batchnorm=False,
inplace_batchnorm_update=False,
num_predictions_per_location_list=[5],
depth=32,
num_layers_before_predictor=1,
box_code_size=4))
box_predictions = conv_box_predictor([image_features])
box_encodings = tf.concat(
box_predictions[box_predictor.BOX_ENCODINGS], axis=1)
class_predictions_with_background = tf.concat(box_predictions[
box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], axis=1)
return (box_encodings, class_predictions_with_background)
image_features = np.random.rand(4, 8, 8, 64).astype(np.float32)
(box_encodings, class_predictions_with_background) = self.execute(
graph_fn, [image_features])
self.assertAllEqual(box_encodings.shape, [4, 320, 4])
self.assertAllEqual(class_predictions_with_background.shape,
[4, 320, num_classes_without_background+1])
def test_get_multi_class_predictions_from_two_feature_maps(
self):
num_classes_without_background = 6
def graph_fn(image_features1, image_features2):
conv_box_predictor = (
box_predictor_builder.build_weight_shared_convolutional_keras_box_predictor(
is_training=False,
num_classes=num_classes_without_background,
conv_hyperparams=self._build_conv_hyperparams(),
freeze_batchnorm=False,
inplace_batchnorm_update=False,
num_predictions_per_location_list=[5, 5],
depth=32,
num_layers_before_predictor=1,
box_code_size=4))
box_predictions = conv_box_predictor([image_features1, image_features2])
box_encodings = tf.concat(
box_predictions[box_predictor.BOX_ENCODINGS], axis=1)
class_predictions_with_background = tf.concat(
box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND],
axis=1)
return (box_encodings, class_predictions_with_background)
image_features1 = np.random.rand(4, 8, 8, 64).astype(np.float32)
image_features2 = np.random.rand(4, 8, 8, 64).astype(np.float32)
(box_encodings, class_predictions_with_background) = self.execute(
graph_fn, [image_features1, image_features2])
self.assertAllEqual(box_encodings.shape, [4, 640, 4])
self.assertAllEqual(class_predictions_with_background.shape,
[4, 640, num_classes_without_background+1])
def test_get_multi_class_predictions_from_feature_maps_of_different_depth(
self):
num_classes_without_background = 6
def graph_fn(image_features1, image_features2, image_features3):
conv_box_predictor = (
box_predictor_builder.build_weight_shared_convolutional_keras_box_predictor(
is_training=False,
num_classes=num_classes_without_background,
conv_hyperparams=self._build_conv_hyperparams(),
freeze_batchnorm=False,
inplace_batchnorm_update=False,
num_predictions_per_location_list=[5, 5, 5],
depth=32,
num_layers_before_predictor=1,
box_code_size=4))
box_predictions = conv_box_predictor(
[image_features1, image_features2, image_features3])
box_encodings = tf.concat(
box_predictions[box_predictor.BOX_ENCODINGS], axis=1)
class_predictions_with_background = tf.concat(
box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND],
axis=1)
return (box_encodings, class_predictions_with_background)
image_features1 = np.random.rand(4, 8, 8, 64).astype(np.float32)
image_features2 = np.random.rand(4, 8, 8, 64).astype(np.float32)
image_features3 = np.random.rand(4, 8, 8, 32).astype(np.float32)
(box_encodings, class_predictions_with_background) = self.execute(
graph_fn, [image_features1, image_features2, image_features3])
self.assertAllEqual(box_encodings.shape, [4, 960, 4])
self.assertAllEqual(class_predictions_with_background.shape,
[4, 960, num_classes_without_background+1])
def test_predictions_multiple_feature_maps_share_weights_separate_batchnorm(
self):
num_classes_without_background = 6
def graph_fn(image_features1, image_features2):
conv_box_predictor = (
box_predictor_builder.build_weight_shared_convolutional_keras_box_predictor(
is_training=False,
num_classes=num_classes_without_background,
conv_hyperparams=self._build_conv_hyperparams(),
freeze_batchnorm=False,
inplace_batchnorm_update=False,
num_predictions_per_location_list=[5, 5],
depth=32,
num_layers_before_predictor=2,
box_code_size=4))
box_predictions = conv_box_predictor([image_features1, image_features2])
box_encodings = tf.concat(
box_predictions[box_predictor.BOX_ENCODINGS], axis=1)
class_predictions_with_background = tf.concat(
box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND],
axis=1)
return (box_encodings, class_predictions_with_background)
with self.test_session(graph=tf.Graph()):
graph_fn(tf.random_uniform([4, 32, 32, 3], dtype=tf.float32),
tf.random_uniform([4, 16, 16, 3], dtype=tf.float32))
actual_variable_set = set(
[var.op.name for var in tf.trainable_variables()])
expected_variable_set = set([
# Box prediction tower
('WeightSharedConvolutionalBoxPredictor/'
'BoxPredictionTower/conv2d_0/kernel'),
('WeightSharedConvolutionalBoxPredictor/'
'BoxPredictionTower/conv2d_0/BatchNorm/feature_0/beta'),
('WeightSharedConvolutionalBoxPredictor/'
'BoxPredictionTower/conv2d_0/BatchNorm/feature_1/beta'),
('WeightSharedConvolutionalBoxPredictor/'
'BoxPredictionTower/conv2d_1/kernel'),
('WeightSharedConvolutionalBoxPredictor/'
'BoxPredictionTower/conv2d_1/BatchNorm/feature_0/beta'),
('WeightSharedConvolutionalBoxPredictor/'
'BoxPredictionTower/conv2d_1/BatchNorm/feature_1/beta'),
# Box prediction head
('WeightSharedConvolutionalBoxPredictor/'
'WeightSharedConvolutionalBoxHead/BoxPredictor/kernel'),
('WeightSharedConvolutionalBoxPredictor/'
'WeightSharedConvolutionalBoxHead/BoxPredictor/bias'),
# Class prediction tower
('WeightSharedConvolutionalBoxPredictor/'
'ClassPredictionTower/conv2d_0/kernel'),
('WeightSharedConvolutionalBoxPredictor/'
'ClassPredictionTower/conv2d_0/BatchNorm/feature_0/beta'),
('WeightSharedConvolutionalBoxPredictor/'
'ClassPredictionTower/conv2d_0/BatchNorm/feature_1/beta'),
('WeightSharedConvolutionalBoxPredictor/'
'ClassPredictionTower/conv2d_1/kernel'),
('WeightSharedConvolutionalBoxPredictor/'
'ClassPredictionTower/conv2d_1/BatchNorm/feature_0/beta'),
('WeightSharedConvolutionalBoxPredictor/'
'ClassPredictionTower/conv2d_1/BatchNorm/feature_1/beta'),
# Class prediction head
('WeightSharedConvolutionalBoxPredictor/'
'WeightSharedConvolutionalClassHead/ClassPredictor/kernel'),
('WeightSharedConvolutionalBoxPredictor/'
'WeightSharedConvolutionalClassHead/ClassPredictor/bias')])
self.assertEqual(expected_variable_set, actual_variable_set)
def test_predictions_multiple_feature_maps_share_weights_without_batchnorm(
self):
num_classes_without_background = 6
def graph_fn(image_features1, image_features2):
conv_box_predictor = (
box_predictor_builder.build_weight_shared_convolutional_keras_box_predictor(
is_training=False,
num_classes=num_classes_without_background,
conv_hyperparams=self._build_conv_hyperparams(),
freeze_batchnorm=False,
inplace_batchnorm_update=False,
num_predictions_per_location_list=[5, 5],
depth=32,
num_layers_before_predictor=2,
box_code_size=4,
apply_batch_norm=False))
box_predictions = conv_box_predictor([image_features1, image_features2])
box_encodings = tf.concat(
box_predictions[box_predictor.BOX_ENCODINGS], axis=1)
class_predictions_with_background = tf.concat(
box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND],
axis=1)
return (box_encodings, class_predictions_with_background)
with self.test_session(graph=tf.Graph()):
graph_fn(tf.random_uniform([4, 32, 32, 3], dtype=tf.float32),
tf.random_uniform([4, 16, 16, 3], dtype=tf.float32))
actual_variable_set = set(
[var.op.name for var in tf.trainable_variables()])
expected_variable_set = set([
# Box prediction tower
('WeightSharedConvolutionalBoxPredictor/'
'BoxPredictionTower/conv2d_0/kernel'),
('WeightSharedConvolutionalBoxPredictor/'
'BoxPredictionTower/conv2d_0/bias'),
('WeightSharedConvolutionalBoxPredictor/'
'BoxPredictionTower/conv2d_1/kernel'),
('WeightSharedConvolutionalBoxPredictor/'
'BoxPredictionTower/conv2d_1/bias'),
# Box prediction head
('WeightSharedConvolutionalBoxPredictor/'
'WeightSharedConvolutionalBoxHead/BoxPredictor/kernel'),
('WeightSharedConvolutionalBoxPredictor/'
'WeightSharedConvolutionalBoxHead/BoxPredictor/bias'),
# Class prediction tower
('WeightSharedConvolutionalBoxPredictor/'
'ClassPredictionTower/conv2d_0/kernel'),
('WeightSharedConvolutionalBoxPredictor/'
'ClassPredictionTower/conv2d_0/bias'),
('WeightSharedConvolutionalBoxPredictor/'
'ClassPredictionTower/conv2d_1/kernel'),
('WeightSharedConvolutionalBoxPredictor/'
'ClassPredictionTower/conv2d_1/bias'),
# Class prediction head
('WeightSharedConvolutionalBoxPredictor/'
'WeightSharedConvolutionalClassHead/ClassPredictor/kernel'),
('WeightSharedConvolutionalBoxPredictor/'
'WeightSharedConvolutionalClassHead/ClassPredictor/bias')])
self.assertEqual(expected_variable_set, actual_variable_set)
def test_predictions_multiple_feature_maps_share_weights_with_depthwise(
self):
num_classes_without_background = 6
def graph_fn(image_features1, image_features2):
conv_box_predictor = (
box_predictor_builder.build_weight_shared_convolutional_keras_box_predictor(
is_training=False,
num_classes=num_classes_without_background,
conv_hyperparams=self._build_conv_hyperparams(
add_batch_norm=False),
freeze_batchnorm=False,
inplace_batchnorm_update=False,
num_predictions_per_location_list=[5, 5],
depth=32,
num_layers_before_predictor=2,
box_code_size=4,
apply_batch_norm=False,
use_depthwise=True))
box_predictions = conv_box_predictor([image_features1, image_features2])
box_encodings = tf.concat(
box_predictions[box_predictor.BOX_ENCODINGS], axis=1)
class_predictions_with_background = tf.concat(
box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND],
axis=1)
return (box_encodings, class_predictions_with_background)
with self.test_session(graph=tf.Graph()):
graph_fn(tf.random_uniform([4, 32, 32, 3], dtype=tf.float32),
tf.random_uniform([4, 16, 16, 3], dtype=tf.float32))
actual_variable_set = set(
[var.op.name for var in tf.trainable_variables()])
expected_variable_set = set([
# Box prediction tower
('WeightSharedConvolutionalBoxPredictor/'
'BoxPredictionTower/conv2d_0/depthwise_kernel'),
('WeightSharedConvolutionalBoxPredictor/'
'BoxPredictionTower/conv2d_0/pointwise_kernel'),
('WeightSharedConvolutionalBoxPredictor/'
'BoxPredictionTower/conv2d_0/bias'),
('WeightSharedConvolutionalBoxPredictor/'
'BoxPredictionTower/conv2d_1/depthwise_kernel'),
('WeightSharedConvolutionalBoxPredictor/'
'BoxPredictionTower/conv2d_1/pointwise_kernel'),
('WeightSharedConvolutionalBoxPredictor/'
'BoxPredictionTower/conv2d_1/bias'),
# Box prediction head
('WeightSharedConvolutionalBoxPredictor/'
'WeightSharedConvolutionalBoxHead/BoxPredictor/depthwise_kernel'),
('WeightSharedConvolutionalBoxPredictor/'
'WeightSharedConvolutionalBoxHead/BoxPredictor/pointwise_kernel'),
('WeightSharedConvolutionalBoxPredictor/'
'WeightSharedConvolutionalBoxHead/BoxPredictor/bias'),
# Class prediction tower
('WeightSharedConvolutionalBoxPredictor/'
'ClassPredictionTower/conv2d_0/depthwise_kernel'),
('WeightSharedConvolutionalBoxPredictor/'
'ClassPredictionTower/conv2d_0/pointwise_kernel'),
('WeightSharedConvolutionalBoxPredictor/'
'ClassPredictionTower/conv2d_0/bias'),
('WeightSharedConvolutionalBoxPredictor/'
'ClassPredictionTower/conv2d_1/depthwise_kernel'),
('WeightSharedConvolutionalBoxPredictor/'
'ClassPredictionTower/conv2d_1/pointwise_kernel'),
('WeightSharedConvolutionalBoxPredictor/'
'ClassPredictionTower/conv2d_1/bias'),
# Class prediction head
('WeightSharedConvolutionalBoxPredictor/'
'WeightSharedConvolutionalClassHead/ClassPredictor/depthwise_kernel'),
('WeightSharedConvolutionalBoxPredictor/'
'WeightSharedConvolutionalClassHead/ClassPredictor/pointwise_kernel'),
('WeightSharedConvolutionalBoxPredictor/'
'WeightSharedConvolutionalClassHead/ClassPredictor/bias')])
self.assertEqual(expected_variable_set, actual_variable_set)
def test_no_batchnorm_params_when_batchnorm_is_not_configured(self):
num_classes_without_background = 6
def graph_fn(image_features1, image_features2):
conv_box_predictor = (
box_predictor_builder.build_weight_shared_convolutional_keras_box_predictor(
is_training=False,
num_classes=num_classes_without_background,
conv_hyperparams=self._build_conv_hyperparams(
add_batch_norm=False),
freeze_batchnorm=False,
inplace_batchnorm_update=False,
num_predictions_per_location_list=[5, 5],
depth=32,
num_layers_before_predictor=2,
box_code_size=4,
apply_batch_norm=False))
box_predictions = conv_box_predictor(
[image_features1, image_features2])
box_encodings = tf.concat(
box_predictions[box_predictor.BOX_ENCODINGS], axis=1)
class_predictions_with_background = tf.concat(
box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND],
axis=1)
return (box_encodings, class_predictions_with_background)
with self.test_session(graph=tf.Graph()):
graph_fn(tf.random_uniform([4, 32, 32, 3], dtype=tf.float32),
tf.random_uniform([4, 16, 16, 3], dtype=tf.float32))
actual_variable_set = set(
[var.op.name for var in tf.trainable_variables()])
expected_variable_set = set([
# Box prediction tower
('WeightSharedConvolutionalBoxPredictor/'
'BoxPredictionTower/conv2d_0/kernel'),
('WeightSharedConvolutionalBoxPredictor/'
'BoxPredictionTower/conv2d_0/bias'),
('WeightSharedConvolutionalBoxPredictor/'
'BoxPredictionTower/conv2d_1/kernel'),
('WeightSharedConvolutionalBoxPredictor/'
'BoxPredictionTower/conv2d_1/bias'),
# Box prediction head
('WeightSharedConvolutionalBoxPredictor/'
'WeightSharedConvolutionalBoxHead/BoxPredictor/kernel'),
('WeightSharedConvolutionalBoxPredictor/'
'WeightSharedConvolutionalBoxHead/BoxPredictor/bias'),
# Class prediction tower
('WeightSharedConvolutionalBoxPredictor/'
'ClassPredictionTower/conv2d_0/kernel'),
('WeightSharedConvolutionalBoxPredictor/'
'ClassPredictionTower/conv2d_0/bias'),
('WeightSharedConvolutionalBoxPredictor/'
'ClassPredictionTower/conv2d_1/kernel'),
('WeightSharedConvolutionalBoxPredictor/'
'ClassPredictionTower/conv2d_1/bias'),
# Class prediction head
('WeightSharedConvolutionalBoxPredictor/'
'WeightSharedConvolutionalClassHead/ClassPredictor/kernel'),
('WeightSharedConvolutionalBoxPredictor/'
'WeightSharedConvolutionalClassHead/ClassPredictor/bias')])
self.assertEqual(expected_variable_set, actual_variable_set)
def test_predictions_share_weights_share_tower_separate_batchnorm(
self):
num_classes_without_background = 6
def graph_fn(image_features1, image_features2):
conv_box_predictor = (
box_predictor_builder.build_weight_shared_convolutional_keras_box_predictor(
is_training=False,
num_classes=num_classes_without_background,
conv_hyperparams=self._build_conv_hyperparams(),
freeze_batchnorm=False,
inplace_batchnorm_update=False,
num_predictions_per_location_list=[5, 5],
depth=32,
num_layers_before_predictor=2,
box_code_size=4,
share_prediction_tower=True))
box_predictions = conv_box_predictor(
[image_features1, image_features2])
box_encodings = tf.concat(
box_predictions[box_predictor.BOX_ENCODINGS], axis=1)
class_predictions_with_background = tf.concat(
box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND],
axis=1)
return (box_encodings, class_predictions_with_background)
with self.test_session(graph=tf.Graph()):
graph_fn(tf.random_uniform([4, 32, 32, 3], dtype=tf.float32),
tf.random_uniform([4, 16, 16, 3], dtype=tf.float32))
actual_variable_set = set(
[var.op.name for var in tf.trainable_variables()])
expected_variable_set = set([
# Shared prediction tower
('WeightSharedConvolutionalBoxPredictor/'
'PredictionTower/conv2d_0/kernel'),
('WeightSharedConvolutionalBoxPredictor/'
'PredictionTower/conv2d_0/BatchNorm/feature_0/beta'),
('WeightSharedConvolutionalBoxPredictor/'
'PredictionTower/conv2d_0/BatchNorm/feature_1/beta'),
('WeightSharedConvolutionalBoxPredictor/'
'PredictionTower/conv2d_1/kernel'),
('WeightSharedConvolutionalBoxPredictor/'
'PredictionTower/conv2d_1/BatchNorm/feature_0/beta'),
('WeightSharedConvolutionalBoxPredictor/'
'PredictionTower/conv2d_1/BatchNorm/feature_1/beta'),
# Box prediction head
('WeightSharedConvolutionalBoxPredictor/'
'WeightSharedConvolutionalBoxHead/BoxPredictor/kernel'),
('WeightSharedConvolutionalBoxPredictor/'
'WeightSharedConvolutionalBoxHead/BoxPredictor/bias'),
# Class prediction head
('WeightSharedConvolutionalBoxPredictor/'
'WeightSharedConvolutionalClassHead/ClassPredictor/kernel'),
('WeightSharedConvolutionalBoxPredictor/'
'WeightSharedConvolutionalClassHead/ClassPredictor/bias')])
self.assertEqual(expected_variable_set, actual_variable_set)
def test_predictions_share_weights_share_tower_without_batchnorm(
self):
num_classes_without_background = 6
def graph_fn(image_features1, image_features2):
conv_box_predictor = (
box_predictor_builder.build_weight_shared_convolutional_keras_box_predictor(
is_training=False,
num_classes=num_classes_without_background,
conv_hyperparams=self._build_conv_hyperparams(
add_batch_norm=False),
freeze_batchnorm=False,
inplace_batchnorm_update=False,
num_predictions_per_location_list=[5, 5],
depth=32,
num_layers_before_predictor=2,
box_code_size=4,
share_prediction_tower=True,
apply_batch_norm=False))
box_predictions = conv_box_predictor(
[image_features1, image_features2])
box_encodings = tf.concat(
box_predictions[box_predictor.BOX_ENCODINGS], axis=1)
class_predictions_with_background = tf.concat(
box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND],
axis=1)
return (box_encodings, class_predictions_with_background)
with self.test_session(graph=tf.Graph()):
graph_fn(tf.random_uniform([4, 32, 32, 3], dtype=tf.float32),
tf.random_uniform([4, 16, 16, 3], dtype=tf.float32))
actual_variable_set = set(
[var.op.name for var in tf.trainable_variables()])
expected_variable_set = set([
# Shared prediction tower
('WeightSharedConvolutionalBoxPredictor/'
'PredictionTower/conv2d_0/kernel'),
('WeightSharedConvolutionalBoxPredictor/'
'PredictionTower/conv2d_0/bias'),
('WeightSharedConvolutionalBoxPredictor/'
'PredictionTower/conv2d_1/kernel'),
('WeightSharedConvolutionalBoxPredictor/'
'PredictionTower/conv2d_1/bias'),
# Box prediction head
('WeightSharedConvolutionalBoxPredictor/'
'WeightSharedConvolutionalBoxHead/BoxPredictor/kernel'),
('WeightSharedConvolutionalBoxPredictor/'
'WeightSharedConvolutionalBoxHead/BoxPredictor/bias'),
# Class prediction head
('WeightSharedConvolutionalBoxPredictor/'
'WeightSharedConvolutionalClassHead/ClassPredictor/kernel'),
('WeightSharedConvolutionalBoxPredictor/'
'WeightSharedConvolutionalClassHead/ClassPredictor/bias')])
self.assertEqual(expected_variable_set, actual_variable_set)
def test_get_predictions_with_feature_maps_of_dynamic_shape(
self):
image_features = tf.placeholder(dtype=tf.float32, shape=[4, None, None, 64])
conv_box_predictor = (
box_predictor_builder.build_weight_shared_convolutional_keras_box_predictor(
is_training=False,
num_classes=0,
conv_hyperparams=self._build_conv_hyperparams(),
freeze_batchnorm=False,
inplace_batchnorm_update=False,
num_predictions_per_location_list=[5],
depth=32,
num_layers_before_predictor=1,
box_code_size=4))
box_predictions = conv_box_predictor([image_features])
box_encodings = tf.concat(box_predictions[box_predictor.BOX_ENCODINGS],
axis=1)
objectness_predictions = tf.concat(box_predictions[
box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], axis=1)
init_op = tf.global_variables_initializer()
resolution = 32
expected_num_anchors = resolution*resolution*5
with self.test_session() as sess:
sess.run(init_op)
(box_encodings_shape,
objectness_predictions_shape) = sess.run(
[tf.shape(box_encodings), tf.shape(objectness_predictions)],
feed_dict={image_features:
np.random.rand(4, resolution, resolution, 64)})
self.assertAllEqual(box_encodings_shape, [4, expected_num_anchors, 4])
self.assertAllEqual(objectness_predictions_shape,
[4, expected_num_anchors, 1])
def test_other_heads_predictions(self):
box_code_size = 4
num_classes_without_background = 3
other_head_name = 'Mask'
mask_height = 5
mask_width = 5
num_predictions_per_location = 5
def graph_fn(image_features):
box_prediction_head = keras_box_head.WeightSharedConvolutionalBoxHead(
box_code_size=box_code_size,
conv_hyperparams=self._build_conv_hyperparams(),
num_predictions_per_location=num_predictions_per_location)
class_prediction_head = keras_class_head.WeightSharedConvolutionalClassHead(
num_class_slots=num_classes_without_background + 1,
conv_hyperparams=self._build_conv_hyperparams(),
num_predictions_per_location=num_predictions_per_location)
other_heads = {
other_head_name:
keras_mask_head.WeightSharedConvolutionalMaskHead(
num_classes=num_classes_without_background,
conv_hyperparams=self._build_conv_hyperparams(),
num_predictions_per_location=num_predictions_per_location,
mask_height=mask_height,
mask_width=mask_width)
}
conv_box_predictor = box_predictor.WeightSharedConvolutionalBoxPredictor(
is_training=False,
num_classes=num_classes_without_background,
box_prediction_head=box_prediction_head,
class_prediction_head=class_prediction_head,
other_heads=other_heads,
conv_hyperparams=self._build_conv_hyperparams(),
freeze_batchnorm=False,
inplace_batchnorm_update=False,
depth=32,
num_layers_before_predictor=2)
box_predictions = conv_box_predictor([image_features])
for key, value in box_predictions.items():
box_predictions[key] = tf.concat(value, axis=1)
assert len(box_predictions) == 3
return (box_predictions[box_predictor.BOX_ENCODINGS],
box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND],
box_predictions[other_head_name])
batch_size = 4
feature_ht = 8
feature_wt = 8
image_features = np.random.rand(batch_size, feature_ht, feature_wt,
64).astype(np.float32)
(box_encodings, class_predictions, other_head_predictions) = self.execute(
graph_fn, [image_features])
num_anchors = feature_ht * feature_wt * num_predictions_per_location
self.assertAllEqual(box_encodings.shape,
[batch_size, num_anchors, box_code_size])
self.assertAllEqual(
class_predictions.shape,
[batch_size, num_anchors, num_classes_without_background + 1])
self.assertAllEqual(other_head_predictions.shape, [
batch_size, num_anchors, num_classes_without_background, mask_height,
mask_width
])
if __name__ == '__main__':
tf.test.main()
| |
# -*- coding: utf-8 -*-
import os
from babelfish import Language, language_converters
import pytest
from vcr import VCR
from subliminal.exceptions import ConfigurationError, AuthenticationError
from subliminal.providers.legendastv import LegendasTVSubtitle, LegendasTVProvider, LegendasTVArchive
USERNAME = 'python-subliminal'
PASSWORD = 'subliminal'
vcr = VCR(path_transformer=lambda path: path + '.yaml',
record_mode=os.environ.get('VCR_RECORD_MODE', 'once'),
match_on=['method', 'scheme', 'host', 'port', 'path', 'query', 'body'],
cassette_library_dir=os.path.join('tests', 'cassettes', 'legendastv'))
@pytest.mark.converter
def test_converter_convert_alpha3_country():
assert language_converters['legendastv'].convert('por', 'BR') == 1
@pytest.mark.converter
def test_converter_convert_alpha3():
assert language_converters['legendastv'].convert('eng') == 2
@pytest.mark.converter
def test_converter_convert_unsupported_alpha3():
with pytest.raises(ConfigurationError):
language_converters['legendastv'].convert('rus')
@pytest.mark.converter
def test_converter_reverse():
assert language_converters['legendastv'].reverse(10) == ('por',)
@pytest.mark.converter
def test_converter_reverse_name_converter():
assert language_converters['legendastv'].reverse(3) == ('spa',)
@pytest.mark.converter
def test_converter_reverse_unsupported_language_number():
with pytest.raises(ConfigurationError):
language_converters['legendastv'].reverse(20)
def test_get_matches(episodes):
archive = LegendasTVArchive('537a74584945b', 'The.Big.Bang.Theory.S07.HDTV.x264', True, False,
'http://legendas.tv/download/537a74584945b/The_Big_Bang_Theory/'
'The_Big_Bang_Theory_S07_HDTV_x264', 6915, 10)
subtitle = LegendasTVSubtitle(Language('por', 'BR'), 'episode', 'The Big Bang Theory', 2013, 'tt0898266', 7,
archive, 'TBBT S07 x264/The.Big.Bang.Theory.S07E05.HDTV.x264-LOL.srt')
matches = subtitle.get_matches(episodes['bbt_s07e05'])
assert matches == {'series', 'year', 'season', 'episode', 'format', 'video_codec', 'series_imdb_id'}
def test_get_matches_no_match(episodes):
archive = LegendasTVArchive('537a74584945b', 'The.Big.Bang.Theory.S07.HDTV.x264', True, False,
'http://legendas.tv/download/537a74584945b/The_Big_Bang_Theory/'
'The_Big_Bang_Theory_S07_HDTV_x264', 6915, 10)
subtitle = LegendasTVSubtitle(Language('por', 'BR'), 'episode', 'The Big Bang Theory', 2013, 'tt0898266', 7,
archive, 'TBBT S07 x264/The.Big.Bang.Theory.S07E05.HDTV.x264-LOL.srt')
matches = subtitle.get_matches(episodes['dallas_2012_s01e03'])
assert matches == set()
@pytest.mark.integration
@vcr.use_cassette
def test_login():
provider = LegendasTVProvider(USERNAME, PASSWORD)
assert provider.logged_in is False
provider.initialize()
assert provider.logged_in is True
@pytest.mark.integration
@vcr.use_cassette
def test_login_bad_password():
provider = LegendasTVProvider(USERNAME, 'wrong')
with pytest.raises(AuthenticationError):
provider.initialize()
@pytest.mark.integration
@vcr.use_cassette
def test_logout():
provider = LegendasTVProvider(USERNAME, PASSWORD)
provider.initialize()
provider.terminate()
assert provider.logged_in is False
@pytest.mark.integration
@vcr.use_cassette
def test_search_titles_episode(episodes):
with LegendasTVProvider() as provider:
titles = provider.search_titles(episodes['bbt_s07e05'].series)
assert len(titles) == 10
assert set(titles.keys()) == {7623, 12620, 17710, 22056, 25314, 28507, 28900, 30730, 34546, 38908}
assert {t['title'] for t in titles.values()} == {episodes['bbt_s07e05'].series}
assert {t['season'] for t in titles.values() if t['type'] == 'episode'} == set(range(1, 10))
@pytest.mark.integration
@vcr.use_cassette
def test_search_titles_movie(movies):
with LegendasTVProvider() as provider:
titles = provider.search_titles(movies['interstellar'].title)
assert len(titles) == 2
assert set(titles.keys()) == {34084, 37333}
assert {t['title'] for t in titles.values()} == {movies['interstellar'].title, 'The Science of Interstellar'}
@pytest.mark.integration
@vcr.use_cassette
def test_search_titles_dots():
with LegendasTVProvider() as provider:
titles = provider.search_titles('11.22.63')
assert len(titles) == 1
assert set(titles.keys()) == {40092}
@pytest.mark.integration
@vcr.use_cassette
def test_search_titles_quote():
with LegendasTVProvider() as provider:
titles = provider.search_titles('Marvel\'s Jessica Jones')
assert len(titles) == 1
assert set(titles.keys()) == {39376}
@pytest.mark.integration
@vcr.use_cassette
def test_search_titles_with_invalid_year():
with LegendasTVProvider() as provider:
titles = provider.search_titles('Grave Danger')
assert len(titles) == 1
assert set(titles.keys()) == {22034}
@pytest.mark.integration
@vcr.use_cassette
def test_search_titles_with_season_information_in_english():
with LegendasTVProvider() as provider:
titles = provider.search_titles('Pretty Little Liars')
assert len(titles) == 7
assert set(titles.keys()) == {20917, 24586, 27500, 28332, 30303, 33223, 38105}
@pytest.mark.integration
@vcr.use_cassette
def test_search_titles_without_season_information():
with LegendasTVProvider() as provider:
titles = provider.search_titles('The Walking Dead Webisodes Torn Apart')
assert len(titles) == 1
assert set(titles.keys()) == {25770}
@pytest.mark.integration
@vcr.use_cassette
def test_get_archives():
with LegendasTVProvider() as provider:
archives = provider.get_archives(34084, 2)
assert len(archives) == 2
assert {a.id for a in archives} == {'5515d27a72921', '54a2e41d8cae4'}
assert {a.content for a in archives} == {None}
@pytest.mark.integration
@vcr.use_cassette
def test_get_archives_no_result():
with LegendasTVProvider() as provider:
archives = provider.get_archives(34084, 17)
assert len(archives) == 0
@pytest.mark.integration
@vcr.use_cassette
def test_download_archive():
with LegendasTVProvider(USERNAME, PASSWORD) as provider:
archive = provider.get_archives(34084, 2)[0]
provider.download_archive(archive)
assert archive.content is not None
@pytest.mark.integration
@vcr.use_cassette
def test_query_movie(movies):
video = movies['interstellar']
language = Language('eng')
expected_subtitles = {
('54a2e41d8cae4', 'Interstellar 2014 HDCAM NEW SOURCE READNFO XVID AC3 ACAB.srt'),
('5515d27a72921', 'Interstellar.2014.1080p.BluRay.x264.DTS-RARBG.eng.srt'),
}
with LegendasTVProvider(USERNAME, PASSWORD) as provider:
subtitles = provider.query(language, video.title, year=video.year)
assert {(s.archive.id, s.name) for s in subtitles} == expected_subtitles
@pytest.mark.integration
@vcr.use_cassette
def test_query_episode(episodes):
video = episodes['colony_s01e09']
language = Language('por', 'BR')
expected_subtitles = {
('56ed8159e36ec', 'Colony.S01E09.HDTV.XviD-FUM.srt'),
('56ed8159e36ec', 'Colony.S01E09.HDTV.x264-FLEET.srt'),
('56ed8159e36ec', 'Colony.S01E09.1080p.WEB-DL.x265.HEVC.AAC.5.1.Condo.srt'),
('56ed8159e36ec', 'Colony.S01E09.720p.HDTV.HEVC.x265-RMTeam.srt'),
('56ed8159e36ec', 'Colony.S01E09.WEB-DL.x264-RARBG.srt'),
('56ed8159e36ec', 'Colony.S01E09.Zero.Day.1080p.WEB-DL.6CH.x265.HEVC-PSA.srt'),
('56ed8159e36ec', 'Colony.S01E09.720p.HDTV.x264-KILLERS.srt'),
('56ed8159e36ec', 'Colony.S01E09.720p.WEB-DL.HEVC.x265-RMTeam.srt'),
('56ed8159e36ec', 'Colony.S01E09.HDTV.XviD-AFG.srt'),
('56ed812f354f6', 'Colony.S01E09.HDTV.x264-FUM.srt'),
('56eb3817111be', 'Colony S01E09 1080p WEB DL DD5 1 H264 RARBG /'
'Colony S01E09 1080p WEB DL DD5 1 H264 RARBG .srt'),
('56ed8159e36ec', 'Colony.S01E09.Zero.Day.1080p.WEB-DL.DD5.1.H265-LGC.srt'),
('56ed8159e36ec', 'Colony.S01E09.Zero.Day.720p.WEB-DL.2CH.x265.HEVC-PSA.srt'),
('56ed8159e36ec', 'Colony.S01E09.1080p.WEB-DL.6CH.HEVC.x265-RMTeam.srt'),
('56ed8159e36ec', 'Colony.S01E09.720p.HDTV.2CH.x265.HEVC-PSA.srt'),
('56ed8159e36ec', 'Colony.S01E09.1080p.WEB-DL.DD5.1.H264-RARBG.srt'),
('56ed8159e36ec', 'Colony.S01E09.HDTV.x264-FUM.srt'),
('56ed8159e36ec', 'Colony.S01E09.720p.WEB-DL.DD5.1.H264-RARBG.srt'),
('56e442ddbb615', 'Colony.S01E09.720p.HDTV.x264-KILLERS.srt')
}
with LegendasTVProvider(USERNAME, PASSWORD) as provider:
subtitles = provider.query(language, video.series, video.season, video.episode, video.year)
assert {(s.archive.id, s.name) for s in subtitles} == expected_subtitles
@pytest.mark.integration
@vcr.use_cassette
def test_list_subtitles_episode(episodes):
video = episodes['the_x_files_s10e02']
languages = {Language('eng')}
expected_subtitles = {('56a756935a76c', 'The.X-Files.S10E02.720p.HDTV.AVS.en.srt')}
with LegendasTVProvider(USERNAME, PASSWORD) as provider:
subtitles = provider.list_subtitles(video, languages)
assert {(s.archive.id, s.name) for s in subtitles} == expected_subtitles
@pytest.mark.integration
@vcr.use_cassette
def test_list_subtitles_movie(movies):
video = movies['man_of_steel']
languages = {Language('eng')}
expected_subtitles = {('525d8c2444851', 'Man.Of.Steel.2013.[BluRay.BRRip.BDRip].srt')}
with LegendasTVProvider(USERNAME, PASSWORD) as provider:
subtitles = provider.list_subtitles(video, languages)
assert {(s.archive.id, s.name) for s in subtitles} == expected_subtitles
@pytest.mark.integration
@vcr.use_cassette
def test_download_subtitle(movies):
video = movies['man_of_steel']
languages = {Language('eng')}
with LegendasTVProvider(USERNAME, PASSWORD) as provider:
subtitles = provider.list_subtitles(video, languages)
provider.download_subtitle(subtitles[0])
assert subtitles[0].content is not None
assert subtitles[0].is_valid() is True
| |
#!/usr/bin/python
#======================================================================
#
# Project : hpp_IOStressTest
# File : Libs/IOST_Basic/__init__.py
# Date : Nov 05, 2016
# Author : HuuHoang Nguyen
# Contact : hhnguyen@apm.com
# : hoangnh.hpp@gmail.com
# License : MIT License
# Copyright : 2016
# Description: The hpp_IOStressTest is under the MIT License, a copy of license which may be found in LICENSE
#
#======================================================================
import io
import os
import re
import operator
import sys
import base64
import time
from IOST_Prepare import *
import gtk
import gtk.glade
import gobject
#======================================================================
IOST_Basic_Enable_Write_Config_File = 1
#======================================================================
STATUS_RUN = "Running ..."
STATUS_INIT = "Initting ..."
STATUS_PAUSE = "Pausing ..."
STATUS_DISABLE = "Disable"
STATUS_ENABLE = "Enable"
STATUS_CORRUPT = "Corruped"
STATUS_SUPPORT = "Supported"
STATUS_EMPTY = ""
STATUS_NONE = "None"
STATUS_N_A = "N/A"
STATUS_AVAIL = "Available"
STATUS_NOT_AVAIL = "Not Available "
WRUN_IP_COLOR_DEFAULT = "#990033"
WRIN_SATION_INFO_COLOR = "#0099ff"
TEMPERATURE_STR="Temperature ( "+ unichr(186) +"C) :"
def IOST_ExtracPort(ip_name):
"""
Extracted port numaber from string ip_name
Example:
S = "ETH0"
S1 = IOST_ExtracPort(S)
# --> S1 = 0
"""
return re.findall('\d+', S)[0]
#======================================================================
IOST_CurrenPathWhenRun = ""
#======================================================================
class IOST_Define():
WORD_SEPARATORS = "-A-Za-z0-9,./?%&#:_=+@~"
BUFFER_LINES = 2000000
STARTUP_LOCAL = True
CONFIRM_ON_EXIT = True
FONT_COLOR = ""
BACKGROUND_COLOR = ""
TRANSPARENCY = 0
PASTE_ON_RIGHT_CLICK = 1
CONFIRM_ON_CLOSE_TAB = 0
AUTO_CLOSE_TAB = 0
COLLAPSED_FOLDERS = ""
LEFT_PANEL_WIDTH = 100
CHECK_UPDATES = True
WINDOW_WIDTH = -1
WINDOW_HEIGHT = -1
FONT = ""
AUTO_COPY_SELECTION = 0
LOG_PATH = os.path.expanduser("~")
SHOW_TOOLBAR = True
SHOW_PANEL = True
VERSION = 0
#======================================================================
class IOST_Basic(IOST_Prepare):
def __init__(self):
"""
"""
#-----------------------------------------------------------------------
def Str2Boolean(self, s):
if s in ["True", "Enable", "Yes"]:
return True
elif s in ["False", "Disable", "No"]:
return False
else:
raise ValueError
# evil ValueError that doesn't tell you what the wrong value was
def Boolean2Str(self, boolean):
if boolean:
return 'Enable'
else:
return 'Disable'
#-----------------------------------------------------------------------
def MsgBox(self, text, icon_file=None, parent=None, msg_type=gtk.MESSAGE_ERROR):
""
MsgBox = gtk.MessageDialog(parent, gtk.DIALOG_MODAL, gtk.MESSAGE_ERROR, gtk.BUTTONS_OK, text)
if icon_file != None:
MsgBox.set_icon_from_file(icon_file)
image = gtk.Image ()
image.set_from_file (icon_file)
MsgBox.set_image(image)
MsgBox.show_all()
Res = MsgBox.run()
MsgBox.destroy()
#-----------------------------------------------------------------------
def MsgConfirm(self, text=""):
""
# global IOST_Config
MsgBox=gtk.MessageDialog(None, gtk.DIALOG_MODAL, gtk.MESSAGE_QUESTION, gtk.BUTTONS_OK_CANCEL, text)
# MsgBox.set_icon(IOST_Config["IconPath"])
Response = MsgBox.run()
MsgBox.destroy()
return Response
#-----------------------------------------------------------------------
def InputBox(self, title, text, default='', password=False):
""
global IOST_Config
MsgBox = EntryDialog(title, text, default, mask=password)
# MsgBox.set_icon(IOST_Config["IconPath"])
if MsgBox.run() == gtk.RESPONSE_OK:
Response = MsgBox.value
else:
Response = None
MsgBox.destroy()
return Response
#-----------------------------------------------------------------------
def ShowFontDialog(self, parent, title, button):
""
Dlg = gtk.FileChooserDialog(title=title, parent=parent, action=action)
Dlg.add_button(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL)
Dlg.add_button(gtk.STOCK_SAVE if action==gtk.FILE_CHOOSER_ACTION_SAVE else gtk.STOCK_OPEN, gtk.RESPONSE_OK)
Dlg.set_do_overwrite_confirmation(True)
if not hasattr(parent,'lastPath'):
parent.lastPath = os.path.expanduser("~")
Dlg.set_current_folder( parent.lastPath )
if Dlg.run() == gtk.RESPONSE_OK:
filename = dlg.get_filename()
parent.lastPath = os.path.dirname(filename)
else:
filename = None
Dlg.destroy()
return filename
#-----------------------------------------------------------------------
def GetKeyName(self, event):
""
name = ""
if event.state & 4:
name = name + "CTRL+"
if event.state & 1:
name = name + "SHIFT+"
if event.state & 8:
name = name + "ALT+"
if event.state & 67108864:
name = name + "SUPER+"
return name + gtk.gdk.keyval_name(event.keyval).upper()
#-----------------------------------------------------------------------
def GetUserName():
return os.getenv('USER') or os.getenv('LOGNAME') or os.getenv('USERNAME')
# def IOST_Basic_GetPassword():
# return get_username() + enc_passwd
#-----------------------------------------------------------------------
def ReadFileJSON(file_name=""):
with open(file_name) as ReadFileName:
ReadData = json.load(ReadFileName, object_pairs_hook=OrderedDict)
# if IOST_Config_DebugEnable:
# pprint (IOST_Config_ReadData)
return ReadData
#-----------------------------------------------------------------------
def WriteFileJSON(file_name="", data=None):
with open(file_name, 'w') as WriteFileName:
json.dump(data, WriteFileName,indent=4)
#-----------------------------------------------------------------------
def FormatText(self, object_name, color=None, bold=False, italic=False, text=None):
"""
Format the text with color, bold, italic and modify text default
1. color:
format-1: color=<"color">
color="blue"
color="green"
color="red"
format-2: color="#<R><G><B>"
color="#AABBCC"
color="#FF00BB"
2. bold :
bold = True
or bold = False
3. italic :
italic = True
or italic = False
"""
if text == None:
text=object_name.get_text()
if bold:
text = "<b>"+text+"</b>"
if italic:
text = "<b>"+text+"</b>"
if color != None:
if '#' in color:
color = gtk.gdk.Color(color).to_string()
text = "<span foreground='"+color+"'>"+text+"</span>"
else:
text = "<span foreground='"+str(color)+"'>"+text+"</span>"
object_name.set_text(text)
object_name.set_use_markup(True)
#-----------------------------------------------------------------------
def Msg_NotSupported(self, image_file):
msg_Text = " The feature have NOT supported (^.^) "
self.MsgBox(msg_Text, icon_file=image_file, msg_type=gtk.MESSAGE_INFO)
| |
# api/resources/bucketlist.py
import datetime
from flask import request, jsonify, g, make_response, current_app
from sqlalchemy.exc import SQLAlchemyError
from common.authentication import AuthRequiredResource
from api.models import BucketList, BucketItem, db
from api.schemas import BucketItemSchema, BucketListSchema
from common.utils import PaginateData
buckets_schema = BucketListSchema()
bucketitem_schema = BucketItemSchema()
class ResourceBucketLists(AuthRequiredResource):
# get all bucketlists for the user
def get(self):
search_term = request.args.get('q')
if search_term:
search_results = BucketList.query.filter_by(
created_by=g.user.id).filter(
BucketList.name.ilike('%' + search_term + '%'))
get_data_query = search_results
else:
get_data_query = BucketList.query.filter_by(created_by=g.user.id)
paginate_content = PaginateData(
request,
query=get_data_query,
resource_for_url='api_v1.bucket_lists',
key_name='results',
schema=buckets_schema
)
paginated_data = paginate_content.paginate_query()
if paginated_data['results']:
return paginated_data, 200
else:
response = {'Results': 'No Resource found'}
return response, 404
# create a bucketlist for the user
def post(self):
request_data = request.get_json()
if not request_data:
response = {'Bucketlist': 'No input data provided'}
return response, 400
errors = buckets_schema.validate(request_data)
if errors:
return errors, 403
try:
bucket_name = request_data['name']
exists = BucketList.query.filter_by(created_by=g.user.id, name=bucket_name).first()
if not exists:
bucketlist = BucketList()
bucketlist.name = bucket_name
bucketlist.created_by = g.user.id
bucketlist.add(bucketlist)
response_data = BucketList.query.filter_by(created_by=g.user.id, name=bucket_name).first()
response = buckets_schema.dump(response_data).data
return response, 201
else:
response = {'Error': '{} already exists!'.format(bucket_name)}
return response, 409
except SQLAlchemyError:
db.session.rollback()
response = {'error': 'Resource could not be created'}
return response, 401
class ResourceBucketList(AuthRequiredResource):
def get(self, id):
# return all bucketlists with their items for the user
# return a specific bucketlist with its items for the user
bucket = BucketList.query.get(id)
if not bucket:
response = {'Error': 'Resource not found'}
return response, 404
response = buckets_schema.dump(bucket).data
return response, 200
def put(self, id):
# edit a bucketlist
bucket = BucketList.query.get(id)
if not bucket:
response = {'Error': 'Resource does not exist'}
return response, 404
bucket_request = request.get_json(force=True)
if 'name' in bucket_request:
bucket.name = bucket_request['name']
bucket.date_modified = datetime.datetime.now()
dumped_message, dump_errors = buckets_schema.dump(bucket)
if dump_errors:
return dump_errors, 400
validate_error = buckets_schema.validate(dumped_message)
if validate_error:
return validate_error, 400
try:
bucket.update()
return self.get(id)
except SQLAlchemyError:
db.session.rollback()
response = {'Error': 'Could not update'}
return response, 400
def delete(self, id):
# delete a bucketlist
bucket = BucketList.query.get(id)
if not bucket:
response = {'Error': 'Resource does not exist!'}
return response, 404
try:
bucket.delete(bucket)
response = {'Status': 'Delete operation successful'}
return response, 204
except SQLAlchemyError:
db.session.rollback()
response = {"error": "Error Deleting Object"}
return response, 500
class ResourceBucketItems(AuthRequiredResource):
# create a new item, in bucketlist
def post(self, id):
request_data = request.get_json()
if not request_data:
response = {'Error': 'No input data not provided'}
return response, 400
errors = bucketitem_schema.validate(request_data)
if errors:
return errors, 403
try:
bucket_item_name = request_data['name']
exists = BucketItem.query.filter_by(bucket_id=id, name=bucket_item_name).first()
if not exists:
bucket_item = BucketItem()
bucket_item.name = request_data['name']
bucket_item.bucket_id = id
bucket_item.add(bucket_item)
response_data = BucketItem.query.filter_by(bucket_id=id, name=bucket_item_name).first()
response = bucketitem_schema.dump(response_data).data
return response, 201
else:
response = {'Error': '{} already exists!'.format(bucket_item_name)}
return response, 409
except SQLAlchemyError:
db.session.rollback()
response = {'error': 'Could not create resource'}
return response, 401
# get all bucket items
def get(self, id):
bucket_items_query = BucketItem.query.filter_by(bucket_id=id)
if not bucket_items_query.count():
response = {'Error': 'Resource not found'}
return response, 404
bucketitems = bucketitem_schema.dump(bucket_items_query, many=True).data
return bucketitems, 200
class ResourceBucketItem(AuthRequiredResource):
# get a single bucket item
def get(self, id, item_id):
bucket = BucketItem.query.get(item_id)
if not bucket:
response = {'Error': 'Resource not found'}
return response, 404
response = bucketitem_schema.dump(bucket).data
return response, 200
def put(self, id, item_id):
# Update a bucket list item
bucket_item = BucketItem.query.get(item_id)
if not bucket_item:
response = {'Error': 'Resource not found'}
return response, 404
bucket_item_request = request.get_json(force=True)
if not bucket_item_request:
response = {'Error': 'Nothing to update'}
return response, 412
else:
if 'name' in bucket_item_request:
bucket_item.name = bucket_item_request['name']
if 'done' in bucket_item_request:
bucket_item.done = bucket_item_request['done']
bucket_item.date_modified = datetime.datetime.now()
dumped_message, dump_errors = bucketitem_schema.dump(bucket_item)
if dump_errors:
return dump_errors, 400
validate_error = bucketitem_schema.validate(dumped_message)
if validate_error:
return validate_error, 400
try:
bucket_item.update()
return self.get(id, item_id)
except SQLAlchemyError:
db.session.rollback()
response = {"error": "Resource could not be updated"}
return response, 400
def delete(self, id, item_id):
# Delete a bucketlist item
bucket_item = BucketItem.query.get(item_id)
if not bucket_item:
response = {'Error': 'Resource not found'}
return response, 404
try:
bucket_item.delete(bucket_item)
response = {'Status': 'Delete operation successful'}
return response, 204
except SQLAlchemyError:
db.session.rollback()
response = {"error": "Could not delete resource"}
return response, 401
| |
from ctypes import byref, c_int
from datetime import date, datetime, time
from django.contrib.gis.gdal.base import GDALBase
from django.contrib.gis.gdal.error import GDALException
from django.contrib.gis.gdal.prototypes import ds as capi
from django.utils.encoding import force_text
# For more information, see the OGR C API source code:
# http://www.gdal.org/ogr__api_8h.html
#
# The OGR_Fld_* routines are relevant here.
class Field(GDALBase):
"""
Wrap an OGR Field. Needs to be instantiated from a Feature object.
"""
def __init__(self, feat, index):
"""
Initialize on the feature object and the integer index of
the field within the feature.
"""
# Setting the feature pointer and index.
self._feat = feat
self._index = index
# Getting the pointer for this field.
fld_ptr = capi.get_feat_field_defn(feat.ptr, index)
if not fld_ptr:
raise GDALException('Cannot create OGR Field, invalid pointer given.')
self.ptr = fld_ptr
# Setting the class depending upon the OGR Field Type (OFT)
self.__class__ = OGRFieldTypes[self.type]
# OFTReal with no precision should be an OFTInteger.
if isinstance(self, OFTReal) and self.precision == 0:
self.__class__ = OFTInteger
self._double = True
def __str__(self):
"Return the string representation of the Field."
return str(self.value).strip()
# #### Field Methods ####
def as_double(self):
"Retrieve the Field's value as a double (float)."
return capi.get_field_as_double(self._feat.ptr, self._index)
def as_int(self, is_64=False):
"Retrieve the Field's value as an integer."
if is_64:
return capi.get_field_as_integer64(self._feat.ptr, self._index)
else:
return capi.get_field_as_integer(self._feat.ptr, self._index)
def as_string(self):
"Retrieve the Field's value as a string."
string = capi.get_field_as_string(self._feat.ptr, self._index)
return force_text(string, encoding=self._feat.encoding, strings_only=True)
def as_datetime(self):
"Retrieve the Field's value as a tuple of date & time components."
yy, mm, dd, hh, mn, ss, tz = [c_int() for i in range(7)]
status = capi.get_field_as_datetime(
self._feat.ptr, self._index, byref(yy), byref(mm), byref(dd),
byref(hh), byref(mn), byref(ss), byref(tz))
if status:
return (yy, mm, dd, hh, mn, ss, tz)
else:
raise GDALException('Unable to retrieve date & time information from the field.')
# #### Field Properties ####
@property
def name(self):
"Return the name of this Field."
name = capi.get_field_name(self.ptr)
return force_text(name, encoding=self._feat.encoding, strings_only=True)
@property
def precision(self):
"Return the precision of this Field."
return capi.get_field_precision(self.ptr)
@property
def type(self):
"Return the OGR type of this Field."
return capi.get_field_type(self.ptr)
@property
def type_name(self):
"Return the OGR field type name for this Field."
return capi.get_field_type_name(self.type)
@property
def value(self):
"Return the value of this Field."
# Default is to get the field as a string.
return self.as_string()
@property
def width(self):
"Return the width of this Field."
return capi.get_field_width(self.ptr)
# ### The Field sub-classes for each OGR Field type. ###
class OFTInteger(Field):
_double = False
_bit64 = False
@property
def value(self):
"Return an integer contained in this field."
if self._double:
# If this is really from an OFTReal field with no precision,
# read as a double and cast as Python int (to prevent overflow).
return int(self.as_double())
else:
return self.as_int(self._bit64)
@property
def type(self):
"""
GDAL uses OFTReals to represent OFTIntegers in created
shapefiles -- forcing the type here since the underlying field
type may actually be OFTReal.
"""
return 0
class OFTReal(Field):
@property
def value(self):
"Return a float contained in this field."
return self.as_double()
# String & Binary fields, just subclasses
class OFTString(Field):
pass
class OFTWideString(Field):
pass
class OFTBinary(Field):
pass
# OFTDate, OFTTime, OFTDateTime fields.
class OFTDate(Field):
@property
def value(self):
"Return a Python `date` object for the OFTDate field."
try:
yy, mm, dd, hh, mn, ss, tz = self.as_datetime()
return date(yy.value, mm.value, dd.value)
except (ValueError, GDALException):
return None
class OFTDateTime(Field):
@property
def value(self):
"Return a Python `datetime` object for this OFTDateTime field."
# TODO: Adapt timezone information.
# See http://lists.osgeo.org/pipermail/gdal-dev/2006-February/007990.html
# The `tz` variable has values of: 0=unknown, 1=localtime (ambiguous),
# 100=GMT, 104=GMT+1, 80=GMT-5, etc.
try:
yy, mm, dd, hh, mn, ss, tz = self.as_datetime()
return datetime(yy.value, mm.value, dd.value, hh.value, mn.value, ss.value)
except (ValueError, GDALException):
return None
class OFTTime(Field):
@property
def value(self):
"Return a Python `time` object for this OFTTime field."
try:
yy, mm, dd, hh, mn, ss, tz = self.as_datetime()
return time(hh.value, mn.value, ss.value)
except (ValueError, GDALException):
return None
class OFTInteger64(OFTInteger):
_bit64 = True
# List fields are also just subclasses
class OFTIntegerList(Field):
pass
class OFTRealList(Field):
pass
class OFTStringList(Field):
pass
class OFTWideStringList(Field):
pass
class OFTInteger64List(Field):
pass
# Class mapping dictionary for OFT Types and reverse mapping.
OGRFieldTypes = {
0: OFTInteger,
1: OFTIntegerList,
2: OFTReal,
3: OFTRealList,
4: OFTString,
5: OFTStringList,
6: OFTWideString,
7: OFTWideStringList,
8: OFTBinary,
9: OFTDate,
10: OFTTime,
11: OFTDateTime,
# New 64-bit integer types in GDAL 2
12: OFTInteger64,
13: OFTInteger64List,
}
ROGRFieldTypes = {cls: num for num, cls in OGRFieldTypes.items()}
| |
# -*- coding: utf-8 -*-
from flask import (
render_template,
abort,
jsonify,
request,
url_for,
current_app,
redirect,
Response,
stream_with_context
)
from flask_login import login_required
from notifications_utils.template import (
Template,
WithSubjectTemplate,
)
from app import (
job_api_client,
notification_api_client,
service_api_client,
current_service,
format_datetime_short)
from app.main import main
from app.main.forms import SearchNotificationsForm
from app.utils import (
get_page_from_request,
generate_next_dict,
generate_previous_dict,
user_has_permissions,
generate_notifications_csv,
get_time_left,
get_letter_timings,
parse_filter_args, set_status_filters
)
from app.statistics_utils import add_rate_to_job
@main.route("/services/<service_id>/jobs")
@login_required
@user_has_permissions('view_activity', admin_override=True)
def view_jobs(service_id):
page = int(request.args.get('page', 1))
# all but scheduled and cancelled
statuses_to_display = job_api_client.JOB_STATUSES - {'scheduled', 'cancelled'}
jobs_response = job_api_client.get_jobs(service_id, statuses=statuses_to_display, page=page)
jobs = [
add_rate_to_job(job) for job in jobs_response['data']
]
prev_page = None
if jobs_response['links'].get('prev', None):
prev_page = generate_previous_dict('main.view_jobs', service_id, page)
next_page = None
if jobs_response['links'].get('next', None):
next_page = generate_next_dict('main.view_jobs', service_id, page)
return render_template(
'views/jobs/jobs.html',
jobs=jobs,
page=page,
prev_page=prev_page,
next_page=next_page,
)
@main.route("/services/<service_id>/jobs/<job_id>")
@login_required
@user_has_permissions('view_activity', admin_override=True)
def view_job(service_id, job_id):
job = job_api_client.get_job(service_id, job_id)['data']
if job['job_status'] == 'cancelled':
abort(404)
filter_args = parse_filter_args(request.args)
filter_args['status'] = set_status_filters(filter_args)
total_notifications = job.get('notification_count', 0)
processed_notifications = job.get('notifications_delivered', 0) + job.get('notifications_failed', 0)
template = service_api_client.get_service_template(
service_id=service_id,
template_id=job['template'],
version=job['template_version']
)['data']
return render_template(
'views/jobs/job.html',
finished=(total_notifications == processed_notifications),
uploaded_file_name=job['original_file_name'],
template_id=job['template'],
status=request.args.get('status', ''),
updates_url=url_for(
".view_job_updates",
service_id=service_id,
job_id=job['id'],
status=request.args.get('status', ''),
),
partials=get_job_partials(job, template),
just_sent=bool(
request.args.get('just_sent') == 'yes' and
template['template_type'] == 'letter'
)
)
@main.route("/services/<service_id>/jobs/<job_id>.csv")
@login_required
@user_has_permissions('view_activity', admin_override=True)
def view_job_csv(service_id, job_id):
job = job_api_client.get_job(service_id, job_id)['data']
template = service_api_client.get_service_template(
service_id=service_id,
template_id=job['template'],
version=job['template_version']
)['data']
filter_args = parse_filter_args(request.args)
filter_args['status'] = set_status_filters(filter_args)
return Response(
stream_with_context(
generate_notifications_csv(
service_id=service_id,
job_id=job_id,
status=filter_args.get('status'),
page=request.args.get('page', 1),
page_size=5000,
format_for_csv=True
)
),
mimetype='text/csv',
headers={
'Content-Disposition': 'inline; filename="{} - {}.csv"'.format(
template['name'],
format_datetime_short(job['created_at'])
)
}
)
@main.route("/services/<service_id>/jobs/<job_id>", methods=['POST'])
@login_required
@user_has_permissions('send_texts', 'send_emails', 'send_letters', admin_override=True)
def cancel_job(service_id, job_id):
job_api_client.cancel_job(service_id, job_id)
return redirect(url_for('main.service_dashboard', service_id=service_id))
@main.route("/services/<service_id>/jobs/<job_id>.json")
@user_has_permissions('view_activity', admin_override=True)
def view_job_updates(service_id, job_id):
job = job_api_client.get_job(service_id, job_id)['data']
return jsonify(**get_job_partials(
job,
service_api_client.get_service_template(
service_id=current_service['id'],
template_id=job['template'],
version=job['template_version']
)['data'],
))
@main.route('/services/<service_id>/notifications/<message_type>', methods=['GET', 'POST'])
@login_required
@user_has_permissions('view_activity', admin_override=True)
def view_notifications(service_id, message_type):
return render_template(
'views/notifications.html',
partials=get_notifications(service_id, message_type),
message_type=message_type,
status=request.args.get('status') or 'sending,delivered,failed',
page=request.args.get('page', 1),
to=request.form.get('to', ''),
search_form=SearchNotificationsForm(to=request.form.get('to', '')),
download_link=url_for(
'.download_notifications_csv',
service_id=current_service['id'],
message_type=message_type,
status=request.args.get('status')
)
)
@main.route('/services/<service_id>/notifications/<message_type>.json', methods=['GET', 'POST'])
@user_has_permissions('view_activity', admin_override=True)
def get_notifications_as_json(service_id, message_type):
return jsonify(get_notifications(
service_id, message_type, status_override=request.args.get('status')
))
@main.route('/services/<service_id>/notifications/<message_type>.csv', endpoint="view_notifications_csv")
@user_has_permissions('view_activity', admin_override=True)
def get_notifications(service_id, message_type, status_override=None):
# TODO get the api to return count of pages as well.
page = get_page_from_request()
if page is None:
abort(404, "Invalid page argument ({}) reverting to page 1.".format(request.args['page'], None))
if message_type not in ['email', 'sms', 'letter']:
abort(404)
filter_args = parse_filter_args(request.args)
filter_args['status'] = set_status_filters(filter_args)
if request.path.endswith('csv'):
return Response(
generate_notifications_csv(
service_id=service_id,
page=page,
page_size=5000,
template_type=[message_type],
status=filter_args.get('status'),
limit_days=current_app.config['ACTIVITY_STATS_LIMIT_DAYS']
),
mimetype='text/csv',
headers={
'Content-Disposition': 'inline; filename="notifications.csv"'}
)
notifications = notification_api_client.get_notifications_for_service(
service_id=service_id,
page=page,
template_type=[message_type],
status=filter_args.get('status'),
limit_days=current_app.config['ACTIVITY_STATS_LIMIT_DAYS'],
to=request.form.get('to', ''),
)
url_args = {
'message_type': message_type,
'status': request.args.get('status')
}
prev_page = None
if 'links' in notifications and notifications['links'].get('prev', None):
prev_page = generate_previous_dict('main.view_notifications', service_id, page, url_args=url_args)
next_page = None
if 'links' in notifications and notifications['links'].get('next', None):
next_page = generate_next_dict('main.view_notifications', service_id, page, url_args)
return {
'counts': render_template(
'views/activity/counts.html',
status=request.args.get('status'),
status_filters=get_status_filters(
current_service,
message_type,
service_api_client.get_detailed_service(service_id)['data']['statistics']
)
),
'notifications': render_template(
'views/activity/notifications.html',
notifications=list(add_preview_of_content_to_notifications(
notifications['notifications']
)),
page=page,
prev_page=prev_page,
next_page=next_page,
status=request.args.get('status'),
message_type=message_type,
download_link=url_for(
'.view_notifications_csv',
service_id=current_service['id'],
message_type=message_type,
status=request.args.get('status')
)
),
}
def get_status_filters(service, message_type, statistics):
stats = statistics[message_type]
stats['sending'] = stats['requested'] - stats['delivered'] - stats['failed']
filters = [
# key, label, option
('requested', 'total', 'sending,delivered,failed'),
('sending', 'sending', 'sending'),
('delivered', 'delivered', 'delivered'),
('failed', 'failed', 'failed'),
]
return [
# return list containing label, option, link, count
(
label,
option,
url_for(
'.view_notifications',
service_id=service['id'],
message_type=message_type,
status=option
),
stats[key]
)
for key, label, option in filters
]
def _get_job_counts(job):
sending = 0 if job['job_status'] == 'scheduled' else (
job.get('notification_count', 0) -
job.get('notifications_delivered', 0) -
job.get('notifications_failed', 0)
)
return [
(
label,
query_param,
url_for(
".view_job",
service_id=job['service'],
job_id=job['id'],
status=query_param,
),
count
) for label, query_param, count in [
[
'total', '',
job.get('notification_count', 0)
],
[
'sending', 'sending',
sending
],
[
'delivered', 'delivered',
job.get('notifications_delivered', 0)
],
[
'failed', 'failed',
job.get('notifications_failed', 0)
]
]
]
def get_job_partials(job, template):
filter_args = parse_filter_args(request.args)
filter_args['status'] = set_status_filters(filter_args)
notifications = notification_api_client.get_notifications_for_service(
job['service'], job['id'], status=filter_args['status']
)
if template['template_type'] == 'letter':
counts = render_template(
'partials/jobs/count-letters.html',
total=job.get('notification_count', 0),
delivery_estimate=get_letter_timings(job['created_at']).earliest_delivery,
)
else:
counts = render_template(
'partials/count.html',
counts=_get_job_counts(job),
status=filter_args['status']
)
return {
'counts': counts,
'notifications': render_template(
'partials/jobs/notifications.html',
notifications=list(
add_preview_of_content_to_notifications(notifications['notifications'])
),
more_than_one_page=bool(notifications.get('links', {}).get('next')),
percentage_complete=(job['notifications_requested'] / job['notification_count'] * 100),
download_link=url_for(
'.view_job_csv',
service_id=current_service['id'],
job_id=job['id'],
status=request.args.get('status')
),
time_left=get_time_left(job['created_at']),
job=job,
template=template,
template_version=job['template_version'],
),
'status': render_template(
'partials/jobs/status.html',
job=job
),
}
def add_preview_of_content_to_notifications(notifications):
for notification in notifications:
if notification['template'].get('redact_personalisation'):
notification['personalisation'] = {}
if notification['template']['template_type'] == 'sms':
yield dict(
preview_of_content=str(Template(
notification['template'],
notification['personalisation'],
redact_missing_personalisation=True,
)),
**notification
)
else:
yield dict(
preview_of_content=(
WithSubjectTemplate(
notification['template'],
notification['personalisation'],
redact_missing_personalisation=True,
).subject
),
**notification
)
| |
import re
import pprint
import html
import random
from . import context
try:
import pygments
import pygments.lexers
import pygments.formatters
except ImportError:
pygments = None
# Dictionary of registered filter functions.
filtermap = {}
# Decorator function for registering filters. A filter function should accept at least one
# argument - the value to be filtered - and return the filtered result. It can optionally
# accept any number of additional arguments.
#
# This decorator can be used as:
#
# @register
# @register()
# @register('name')
#
# If no name is supplied the function name will be used.
def register(nameorfunc=None):
if callable(nameorfunc):
filtermap[nameorfunc.__name__] = nameorfunc
return nameorfunc
def register_filter_function(func):
filtermap[nameorfunc or func.__name__] = func
return func
return register_filter_function
@register
def argtest(*args):
""" Test filter: returns arguments as a concatenated string. """
return '|'.join(str(arg) for arg in args)
@register
def default(obj, fallback):
""" Returns `obj` if `obj` is truthy, otherwise `fallback`. """
return obj or fallback
@register
def dtformat(dt, format='%Y-%m-%d %H:%M'):
""" Formats a datetime object using the specified format string. """
return dt.strftime(format)
@register
def endswith(s, suffix):
""" True if the string ends with the specified suffix. """
return s.endswith(suffix)
@register
@register('e')
@register('esc')
def escape(s, quotes=True):
""" Converts html syntax characters to character entities. """
return html.escape(s, quotes)
@register
def first(seq):
""" Returns the first element in the sequence `seq`. """
return seq[0]
@register
def firsth(html):
""" Returns the content of the first heading element. """
match = re.search(r'<h(\d)+[^>]*>(.*?)</h\1>', html, flags=re.DOTALL)
return match.group(2) if match else ''
@register
def firsth1(html):
""" Returns the content of the first h1 element. """
match = re.search(r'<h1[^>]*>(.*?)</h1>', html, flags=re.DOTALL)
return match.group(1) if match else ''
@register
def firstp(html):
""" Returns the content of the first p element. """
match = re.search(r'<p[^>]*>(.*?)</p>', html, flags=re.DOTALL)
return match.group(1) if match else ''
@register('reversed')
def get_reversed(seq):
""" Returns a reverse iterator over the sequence `seq`. """
return reversed(seq)
@register
def index(seq, i):
""" Returns the ith element in the sequence `seq`. """
return seq[i]
@register('divisible_by')
def is_divisible_by(n, d):
""" True if the integer `n` is a multiple of the integer `d`. """
return n % d == 0
@register('even')
def is_even(n):
""" True if the integer `n` is even. """
return n % 2 == 0
@register('odd')
def is_odd(n):
""" True if the integer `n` is odd. """
return n % 2 != 0
@register
def join(seq, sep=''):
""" Joins elements of the sequence `seq` with the string `sep`. """
return sep.join(str(item) for item in seq)
@register
def last(seq):
""" Returns the last element in the sequence `seq`. """
return seq[-1]
@register('len')
def length(seq):
""" Returns the length of the sequence `seq`. """
return len(seq)
@register
def lower(s):
""" Returns the string `s` converted to lowercase. """
return s.lower()
@register('pprint')
def prettyprint(obj):
""" Returns a pretty-printed representation of `obj`. """
return pprint.pformat(obj)
@register
def pygmentize(text, lang=None):
""" Applies syntax highlighting using Pygments.
If no language is specified, Pygments will attempt to guess the correct
lexer to use. If Pygments is not available or if an appropriate lexer
cannot be found then the filter will return the input text with any
html special characters escaped.
"""
if pygments:
if lang:
try:
lexer = pygments.lexers.get_lexer_by_name(lang)
except pygments.util.ClassNotFound:
lexer = None
else:
try:
lexer = pygments.lexers.guess_lexer(text)
except pygments.util.ClassNotFound:
lexer = None
if lexer:
formatter = pygments.formatters.HtmlFormatter(nowrap=True)
text = pygments.highlight(text, lexer, formatter)
else:
text = html.escape(text)
else:
text = html.escape(text)
return text
@register
def random(seq):
""" Returns a random element from the sequence `seq`. """
return random.choice(seq)
@register('repr')
def to_repr(obj):
""" Returns the result of calling repr() on `obj`. """
return repr(obj)
@register
def slice(seq, start, stop=None, step=None):
""" Returns the start:stop:step slice of the sequence `seq`. """
return seq[start:stop:step]
@register
def spaceless(html):
""" Strips all whitespace between html/xml tags. """
return re.sub(r'>\s+<', '><', html)
@register
def startswith(s, prefix):
""" True if the string starts with the specified prefix. """
return s.startswith(prefix)
@register('str')
def to_str(obj):
""" Returns the result of calling str() on `obj`. """
return str(obj)
@register
def striptags(html):
""" Returns the string `html` with all html tags stripped. """
return re.sub(r'<[^>]*>', '', html)
@register
def teaser(s, delimiter='<!-- more -->'):
""" Returns the portion of the string `s` before `delimiter`,
or an empty string if `delimiter` is not found. """
index = s.find(delimiter)
if index == -1:
return ''
else:
return s[:index]
@register
@register('title')
def titlecase(s):
""" Returns the string `s` converted to titlecase. """
return re.sub(
r"[A-Za-z]+('[A-Za-z]+)?",
lambda m: m.group(0)[0].upper() + m.group(0)[1:],
s
)
@register
def truncatechars(s, n, ellipsis='...'):
""" Truncates the string `s` to at most `n` characters. """
if len(s) > n:
return s[:n - 3].rstrip(' .,;:?!') + ellipsis
else:
return s
@register
def truncatewords(s, n, ellipsis=' [...]'):
""" Truncates the string `s` to at most `n` words. """
words = s.split()
if len(words) > n:
return ' '.join(words[:n]) + ellipsis
else:
return ' '.join(words)
@register
def upper(s):
""" Returns the string `s` converted to uppercase. """
return s.upper()
@register
def wrap(s, tag):
""" Wraps a string in opening and closing tags. """
return '<%s>%s</%s>' % (tag, str(s), tag)
@register
def if_undefined(obj, fallback):
""" Returns `obj` if `obj` is defined, otherwise `fallback`. """
return fallback if isinstance(obj, context.Undefined) else obj
@register
def is_defined(obj):
""" Returns true if `obj` is defined, otherwise false. """
return not isinstance(obj, context.Undefined)
| |
from __future__ import absolute_import, division, print_function
import os
import numpy as np
import torch
from six import string_types
from torch import nn
from odin.backend import concatenate, expand_dims, parse_reduction, squeeze
from odin.networks_torch.keras_torch import Conv1D, Dense, Layer
from odin.utils import as_tuple
class TimeDelay(Layer):
""" A generalized implementation of time-delayed neural network by applying
Parameters
----------
fn_layer_creator : `callable`
a function that returns a `keras.Layer`
delay_context : list of `int`
list of time delay taken into account
pooling : {'none', 'sum', 'min', 'max', 'avg', 'stat'} (default='sum')
pooling in time dimension after convolution operator
for 'stat' pooling, mean and standard deviation is calculated along
time-dimension, then output the concatenation of the two.
if None, no pooling is performed, the output is returned in
shape `[n_samples, n_reduced_timestep, n_new_features]`
Input shape
-----------
3D tensor with shape: `(batch_size, timesteps, input_dim)`
Output shape
------------
3D tensor with shape: `(batch_size, new_timesteps, units)`
"""
def __init__(self,
fn_layer_creator,
delay_context=(-2, -1, 0, 1, 2),
pooling='sum',
**kwargs):
super(TimeDelay, self).__init__(**kwargs)
assert callable(fn_layer_creator), \
"fn_layer_creator must be callable and return a torch.nn.Module"
self.fn_layer_creator = fn_layer_creator
# no duplicated frame index
self.delay_context = np.array(sorted(set(int(i) for i in delay_context)))
self.context_length = self.delay_context[-1] - self.delay_context[0] + 1
self.delays = self.delay_context + max(0, -self.delay_context[0])
self.min_delay = max(0, min(self.delays))
# pooling function for aggrevate the time outputs
self.pooling = 'none' if pooling is None else pooling
self.fn_pooling = parse_reduction(pooling)
all_layers = nn.ModuleList()
for time_id in range(len(self.delay_context)):
layer = fn_layer_creator()
assert isinstance(layer, torch.nn.Module), \
"fn_layer_creator must return torch.nn.Module instance, " + \
"but return type is %s" % \
str(type(layer))
# we need to setattr so the Model will manage the Layer
all_layers.append(layer)
self.all_layers = all_layers
def call(self, inputs, training=None):
# anyway, if the smallest value is negative,
# start from 0 (i.e. relative position)
shape = inputs.shape
timestep = shape[1]
y = []
for delay, layer in zip(self.delays, self.all_layers):
start = delay
end = timestep - self.context_length + delay + 1 - self.min_delay
y.append(expand_dims(layer(inputs[:, start:end]), axis=0))
y = concatenate(y, axis=0)
y = self.fn_pooling(y, axis=0)
if isinstance(self.pooling, string_types) and \
'none' in self.pooling.lower() and \
self.context_length == 1:
y = squeeze(y, axis=0)
return y
class TimeDelayDense(TimeDelay):
""" The implementaiton of time delay neural network
Input shape
-----------
3D tensor with shape: `(batch_size, timesteps, input_dim)`
Output shape
------------
3D tensor with shape: `(batch_size, new_timesteps, units)`
"""
def __init__(self,
units,
delay_context=(-2, -1, 0, 1, 2),
pooling='sum',
activation='linear',
use_bias=False,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
**kwargs):
super(TimeDelayDense, self).__init__(fn_layer_creator=lambda: Dense(
units=units,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
),
delay_context=delay_context,
pooling=pooling,
**kwargs)
class TimeDelayConv(TimeDelay):
""" This implementaiton create multiple convolutional neural network for
each time delay.
Parameters
----------
Input shape
-----------
3D tensor with shape: `(batch_size, timesteps, input_dim)`
Output shape
------------
3D tensor with shape: `(batch_size, new_timesteps, units)`
`steps` value might have changed due to padding or strides.
"""
def __init__(self,
units,
kernel_size=3,
delay_context=(-2, -1, 0, 1, 2),
pooling='sum',
activation='linear',
use_bias=False,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
**kwargs):
super(TimeDelayConv, self).__init__(fn_layer_creator=lambda: Conv1D(
filters=units,
kernel_size=kernel_size,
strides=1,
padding='valid',
data_format='channels_last',
dilation_rate=1,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
),
delay_context=delay_context,
pooling=pooling,
**kwargs)
class TimeDelayConvTied(TimeDelay):
""" Time-delayed dense implementation but using a 1D-convolutional
neural network, only support consecutive delay context (given a number
of `delay_strides`).
From the paper, it is suggested to create multiple `TimeDelayedConv`
with variate number of feature map and length of context windows,
then concatenate the outputs for `Dense` layers
For example:
- feature_maps = [50, 100, 150, 200, 200, 200, 200]
- kernels = [1, 2, 3, 4, 5, 6, 7]
Parameters
----------
units : `int`
number of new features
delay_length : `int` (default=5)
length of time delayed context
delay_strides : `int` (default=1)
specifying the strides of time window
"""
def __init__(self,
units,
delay_length=5,
delay_strides=1,
activation='linear',
use_bias=False,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
**kwargs):
super(TimeDelayConvTied, self).__init__(fn_layer_creator=lambda: Conv1D(
filters=units,
kernel_size=delay_length,
strides=delay_strides,
padding='valid',
data_format='channels_last',
dilation_rate=1,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
),
delay_context=(0,),
pooling='none',
**kwargs)
| |
"""
SimpleGladeApp.py
Module that provides an object oriented abstraction to pygtk and libglade.
Copyright (C) 2004 Sandino Flores Moreno
"""
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
# USA
import os
import sys
import re
import tokenize
import gtk
import gtk.glade
import weakref
import inspect
__version__ = "1.0"
__author__ = 'Sandino "tigrux" Flores-Moreno'
def bindtextdomain(app_name, locale_dir=None):
"""
Bind the domain represented by app_name to the locale directory locale_dir.
It has the effect of loading translations, enabling applications for different
languages.
app_name:
a domain to look for translations, tipically the name of an application.
locale_dir:
a directory with locales like locale_dir/lang_isocode/LC_MESSAGES/app_name.mo
If omitted or None, then the current binding for app_name is used.
"""
try:
import locale
import gettext
locale.setlocale(locale.LC_ALL, "")
gtk.glade.bindtextdomain(app_name, locale_dir)
gettext.install(app_name, locale_dir, unicode=1)
except (IOError,locale.Error), e:
#force english as default locale
try:
os.environ["LANGUAGE"] = "en_US.UTF-8"
locale.setlocale(locale.LC_ALL, "en_US.UTF-8")
gtk.glade.bindtextdomain(app_name, locale_dir)
gettext.install(app_name, locale_dir, unicode=1)
return
except:
#english didnt work, just use spanish
try:
__builtins__.__dict__["_"] = lambda x : x
except:
__builtins__["_"] = lambda x : x
class SimpleGladeApp:
def __init__(self, path, root=None, domain=None, **kwargs):
"""
Load a glade file specified by glade_filename, using root as
root widget and domain as the domain for translations.
If it receives extra named arguments (argname=value), then they are used
as attributes of the instance.
path:
path to a glade filename.
If glade_filename cannot be found, then it will be searched in the
same directory of the program (sys.argv[0])
root:
the name of the widget that is the root of the user interface,
usually a window or dialog (a top level widget).
If None or ommited, the full user interface is loaded.
domain:
A domain to use for loading translations.
If None or ommited, no translation is loaded.
**kwargs:
a dictionary representing the named extra arguments.
It is useful to set attributes of new instances, for example:
glade_app = SimpleGladeApp("ui.glade", foo="some value", bar="another value")
sets two attributes (foo and bar) to glade_app.
"""
if os.path.isfile(path):
self.glade_path = path
else:
glade_dir = os.path.dirname( sys.argv[0] )
self.glade_path = os.path.join(glade_dir, path)
for key, value in kwargs.items():
try:
setattr(self, key, weakref.proxy(value) )
except TypeError:
setattr(self, key, value)
self.glade = None
self.install_custom_handler(self.custom_handler)
self.glade = self.create_glade(self.glade_path, root, domain)
if root:
self.main_widget = self.get_widget(root)
else:
self.main_widget = None
self.normalize_names()
self.add_callbacks(self)
self.new()
def __repr__(self):
class_name = self.__class__.__name__
if self.main_widget:
root = gtk.Widget.get_name(self.main_widget)
repr = '%s(path="%s", root="%s")' % (class_name, self.glade_path, root)
else:
repr = '%s(path="%s")' % (class_name, self.glade_path)
return repr
def new(self):
"""
Method called when the user interface is loaded and ready to be used.
At this moment, the widgets are loaded and can be refered as self.widget_name
"""
pass
def add_callbacks(self, callbacks_proxy):
"""
It uses the methods of callbacks_proxy as callbacks.
The callbacks are specified by using:
Properties window -> Signals tab
in glade-2 (or any other gui designer like gazpacho).
Methods of classes inheriting from SimpleGladeApp are used as
callbacks automatically.
callbacks_proxy:
an instance with methods as code of callbacks.
It means it has methods like on_button1_clicked, on_entry1_activate, etc.
"""
self.glade.signal_autoconnect(callbacks_proxy)
def normalize_names(self):
"""
It is internally used to normalize the name of the widgets.
It means a widget named foo:vbox-dialog in glade
is refered self.vbox_dialog in the code.
It also sets a data "prefixes" with the list of
prefixes a widget has for each widget.
"""
for widget in self.get_widgets():
print "widget: ", widget
widget_name = gtk.Widget.get_name(widget)
print "widget_name: ", widget_name
print "============================================="
prefixes_name_l = widget_name.split(":")
prefixes = prefixes_name_l[ : -1]
widget_api_name = prefixes_name_l[-1]
widget_api_name = "_".join( re.findall(tokenize.Name, widget_api_name) )
gtk.Widget.set_name(widget, widget_api_name)
if hasattr(self, widget_api_name):
raise AttributeError("instance %s already has an attribute %s" % (self,widget_api_name))
else:
setattr(self, widget_api_name, widget)
if prefixes:
gtk.Widget.set_data(widget, "prefixes", prefixes)
def add_prefix_actions(self, prefix_actions_proxy):
"""
By using a gui designer (glade-2, gazpacho, etc)
widgets can have a prefix in theirs names
like foo:entry1 or foo:label3
It means entry1 and label3 has a prefix action named foo.
Then, prefix_actions_proxy must have a method named prefix_foo which
is called everytime a widget with prefix foo is found, using the found widget
as argument.
prefix_actions_proxy:
An instance with methods as prefix actions.
It means it has methods like prefix_foo, prefix_bar, etc.
"""
prefix_s = "prefix_"
prefix_pos = len(prefix_s)
is_method = lambda t : callable( t[1] )
is_prefix_action = lambda t : t[0].startswith(prefix_s)
drop_prefix = lambda (k,w): (k[prefix_pos:],w)
members_t = inspect.getmembers(prefix_actions_proxy)
methods_t = filter(is_method, members_t)
prefix_actions_t = filter(is_prefix_action, methods_t)
prefix_actions_d = dict( map(drop_prefix, prefix_actions_t) )
for widget in self.get_widgets():
prefixes = gtk.Widget.get_data(widget, "prefixes")
if prefixes:
for prefix in prefixes:
if prefix in prefix_actions_d:
prefix_action = prefix_actions_d[prefix]
prefix_action(widget)
def custom_handler(self,
glade, function_name, widget_name,
str1, str2, int1, int2):
"""
Generic handler for creating custom widgets, internally used to
enable custom widgets (custom widgets of glade).
The custom widgets have a creation function specified in design time.
Those creation functions are always called with str1,str2,int1,int2 as
arguments, that are values specified in design time.
Methods of classes inheriting from SimpleGladeApp are used as
creation functions automatically.
If a custom widget has create_foo as creation function, then the
method named create_foo is called with str1,str2,int1,int2 as arguments.
"""
try:
handler = getattr(self, function_name)
return handler(str1, str2, int1, int2)
except AttributeError:
return None
def gtk_widget_show(self, widget, *args):
"""
Predefined callback.
The widget is showed.
Equivalent to widget.show()
"""
widget.show()
def gtk_widget_hide(self, widget, *args):
"""
Predefined callback.
The widget is hidden.
Equivalent to widget.hide()
"""
widget.hide()
def gtk_widget_grab_focus(self, widget, *args):
"""
Predefined callback.
The widget grabs the focus.
Equivalent to widget.grab_focus()
"""
widget.grab_focus()
def gtk_widget_destroy(self, widget, *args):
"""
Predefined callback.
The widget is destroyed.
Equivalent to widget.destroy()
"""
widget.destroy()
def gtk_window_activate_default(self, window, *args):
"""
Predefined callback.
The default widget of the window is activated.
Equivalent to window.activate_default()
"""
widget.activate_default()
def gtk_true(self, *args):
"""
Predefined callback.
Equivalent to return True in a callback.
Useful for stopping propagation of signals.
"""
return True
def gtk_false(self, *args):
"""
Predefined callback.
Equivalent to return False in a callback.
"""
return False
def gtk_main_quit(self, *args):
"""
Predefined callback.
Equivalent to self.quit()
"""
self.quit()
def main(self):
"""
Starts the main loop of processing events.
The default implementation calls gtk.main()
Useful for applications that needs a non gtk main loop.
For example, applications based on gstreamer needs to override
this method with gst.main()
Do not directly call this method in your programs.
Use the method run() instead.
"""
gtk.main()
def quit(self):
"""
Quit processing events.
The default implementation calls gtk.main_quit()
Useful for applications that needs a non gtk main loop.
For example, applications based on gstreamer needs to override
this method with gst.main_quit()
"""
gtk.main_quit()
def run(self):
"""
Starts the main loop of processing events checking for Control-C.
The default implementation checks wheter a Control-C is pressed,
then calls on_keyboard_interrupt().
Use this method for starting programs.
"""
try:
self.main()
except KeyboardInterrupt:
self.on_keyboard_interrupt()
def on_keyboard_interrupt(self):
"""
This method is called by the default implementation of run()
after a program is finished by pressing Control-C.
"""
pass
def install_custom_handler(self, custom_handler):
gtk.glade.set_custom_handler(custom_handler)
def create_glade(self, glade_path, root, domain):
return gtk.glade.XML(self.glade_path, root, domain)
def get_widget(self, widget_name):
return self.glade.get_widget(widget_name)
def get_widgets(self):
return self.glade.get_widget_prefix("")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.