gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
# Wrapper module for _ssl, providing some additional facilities
# implemented in Python. Written by Bill Janssen.
"""\
This module provides some more Pythonic support for SSL.
Object types:
SSLSocket -- subtype of socket.socket which does SSL over the socket
Exceptions:
SSLError -- exception raised for I/O errors
Functions:
cert_time_to_seconds -- convert time string used for certificate
notBefore and notAfter functions to integer
seconds past the Epoch (the time values
returned from time.time())
fetch_server_certificate (HOST, PORT) -- fetch the certificate provided
by the server running on HOST at port PORT. No
validation of the certificate is performed.
Integer constants:
SSL_ERROR_ZERO_RETURN
SSL_ERROR_WANT_READ
SSL_ERROR_WANT_WRITE
SSL_ERROR_WANT_X509_LOOKUP
SSL_ERROR_SYSCALL
SSL_ERROR_SSL
SSL_ERROR_WANT_CONNECT
SSL_ERROR_EOF
SSL_ERROR_INVALID_ERROR_CODE
The following group define certificate requirements that one side is
allowing/requiring from the other side:
CERT_NONE - no certificates from the other side are required (or will
be looked at if provided)
CERT_OPTIONAL - certificates are not required, but if provided will be
validated, and if validation fails, the connection will
also fail
CERT_REQUIRED - certificates are required, and will be validated, and
if validation fails, the connection will also fail
The following constants identify various SSL protocol variants:
PROTOCOL_SSLv2
PROTOCOL_SSLv3
PROTOCOL_SSLv23
PROTOCOL_TLSv1
"""
import textwrap
import _ssl # if we can't import it, let the error propagate
from _ssl import SSLError
from _ssl import CERT_NONE, CERT_OPTIONAL, CERT_REQUIRED
from _ssl import PROTOCOL_SSLv2, PROTOCOL_SSLv3, PROTOCOL_SSLv23, PROTOCOL_TLSv1
from _ssl import RAND_status, RAND_egd, RAND_add
from _ssl import \
SSL_ERROR_ZERO_RETURN, \
SSL_ERROR_WANT_READ, \
SSL_ERROR_WANT_WRITE, \
SSL_ERROR_WANT_X509_LOOKUP, \
SSL_ERROR_SYSCALL, \
SSL_ERROR_SSL, \
SSL_ERROR_WANT_CONNECT, \
SSL_ERROR_EOF, \
SSL_ERROR_INVALID_ERROR_CODE
from socket import socket, _fileobject
from socket import getnameinfo as _getnameinfo
import base64 # for DER-to-PEM translation
class SSLSocket (socket):
"""This class implements a subtype of socket.socket that wraps
the underlying OS socket in an SSL context when necessary, and
provides read and write methods over that channel."""
def __init__(self, sock, keyfile=None, certfile=None,
server_side=False, cert_reqs=CERT_NONE,
ssl_version=PROTOCOL_SSLv23, ca_certs=None,
do_handshake_on_connect=True,
suppress_ragged_eofs=True):
socket.__init__(self, _sock=sock._sock)
# the initializer for socket trashes the methods (tsk, tsk), so...
self.send = lambda data, flags=0: SSLSocket.send(self, data, flags)
self.sendto = lambda data, addr, flags=0: SSLSocket.sendto(self, data, addr, flags)
self.recv = lambda buflen=1024, flags=0: SSLSocket.recv(self, buflen, flags)
self.recvfrom = lambda addr, buflen=1024, flags=0: SSLSocket.recvfrom(self, addr, buflen, flags)
self.recv_into = lambda buffer, nbytes=None, flags=0: SSLSocket.recv_into(self, buffer, nbytes, flags)
self.recvfrom_into = lambda buffer, nbytes=None, flags=0: SSLSocket.recvfrom_into(self, buffer, nbytes, flags)
if certfile and not keyfile:
keyfile = certfile
# see if it's connected
try:
socket.getpeername(self)
except:
# no, no connection yet
self._sslobj = None
else:
# yes, create the SSL object
self._sslobj = _ssl.sslwrap(self._sock, server_side,
keyfile, certfile,
cert_reqs, ssl_version, ca_certs)
if do_handshake_on_connect:
timeout = self.gettimeout()
try:
self.settimeout(None)
self.do_handshake()
finally:
self.settimeout(timeout)
self.keyfile = keyfile
self.certfile = certfile
self.cert_reqs = cert_reqs
self.ssl_version = ssl_version
self.ca_certs = ca_certs
self.do_handshake_on_connect = do_handshake_on_connect
self.suppress_ragged_eofs = suppress_ragged_eofs
self._makefile_refs = 0
def read(self, len=1024):
"""Read up to LEN bytes and return them.
Return zero-length string on EOF."""
try:
return self._sslobj.read(len)
except SSLError, x:
if x.args[0] == SSL_ERROR_EOF and self.suppress_ragged_eofs:
return ''
else:
raise
def write(self, data):
"""Write DATA to the underlying SSL channel. Returns
number of bytes of DATA actually transmitted."""
return self._sslobj.write(data)
def getpeercert(self, binary_form=False):
"""Returns a formatted version of the data in the
certificate provided by the other end of the SSL channel.
Return None if no certificate was provided, {} if a
certificate was provided, but not validated."""
return self._sslobj.peer_certificate(binary_form)
def cipher (self):
if not self._sslobj:
return None
else:
return self._sslobj.cipher()
def send (self, data, flags=0):
if self._sslobj:
if flags != 0:
raise ValueError(
"non-zero flags not allowed in calls to send() on %s" %
self.__class__)
while True:
try:
v = self._sslobj.write(data)
except SSLError, x:
if x.args[0] == SSL_ERROR_WANT_READ:
return 0
elif x.args[0] == SSL_ERROR_WANT_WRITE:
return 0
else:
raise
else:
return v
else:
return socket.send(self, data, flags)
def sendto (self, data, addr, flags=0):
if self._sslobj:
raise ValueError("sendto not allowed on instances of %s" %
self.__class__)
else:
return socket.sendto(self, data, addr, flags)
def sendall (self, data, flags=0):
if self._sslobj:
if flags != 0:
raise ValueError(
"non-zero flags not allowed in calls to sendall() on %s" %
self.__class__)
amount = len(data)
count = 0
while (count < amount):
v = self.send(data[count:])
count += v
return amount
else:
return socket.sendall(self, data, flags)
def recv (self, buflen=1024, flags=0):
if self._sslobj:
if flags != 0:
raise ValueError(
"non-zero flags not allowed in calls to sendall() on %s" %
self.__class__)
while True:
try:
return self.read(buflen)
except SSLError, x:
if x.args[0] == SSL_ERROR_WANT_READ:
continue
else:
raise x
else:
return socket.recv(self, buflen, flags)
def recv_into (self, buffer, nbytes=None, flags=0):
if buffer and (nbytes is None):
nbytes = len(buffer)
elif nbytes is None:
nbytes = 1024
if self._sslobj:
if flags != 0:
raise ValueError(
"non-zero flags not allowed in calls to recv_into() on %s" %
self.__class__)
while True:
try:
tmp_buffer = self.read(nbytes)
v = len(tmp_buffer)
buffer[:v] = tmp_buffer
return v
except SSLError as x:
if x.args[0] == SSL_ERROR_WANT_READ:
continue
else:
raise x
else:
return socket.recv_into(self, buffer, nbytes, flags)
def recvfrom (self, addr, buflen=1024, flags=0):
if self._sslobj:
raise ValueError("recvfrom not allowed on instances of %s" %
self.__class__)
else:
return socket.recvfrom(self, addr, buflen, flags)
def recvfrom_into (self, buffer, nbytes=None, flags=0):
if self._sslobj:
raise ValueError("recvfrom_into not allowed on instances of %s" %
self.__class__)
else:
return socket.recvfrom_into(self, buffer, nbytes, flags)
def pending (self):
if self._sslobj:
return self._sslobj.pending()
else:
return 0
def unwrap (self):
if self._sslobj:
s = self._sslobj.shutdown()
self._sslobj = None
return s
else:
raise ValueError("No SSL wrapper around " + str(self))
def shutdown (self, how):
self._sslobj = None
socket.shutdown(self, how)
def close (self):
if self._makefile_refs < 1:
self._sslobj = None
socket.close(self)
else:
self._makefile_refs -= 1
def do_handshake (self):
"""Perform a TLS/SSL handshake."""
self._sslobj.do_handshake()
def connect(self, addr):
"""Connects to remote ADDR, and then wraps the connection in
an SSL channel."""
# Here we assume that the socket is client-side, and not
# connected at the time of the call. We connect it, then wrap it.
if self._sslobj:
raise ValueError("attempt to connect already-connected SSLSocket!")
socket.connect(self, addr)
self._sslobj = _ssl.sslwrap(self._sock, False, self.keyfile, self.certfile,
self.cert_reqs, self.ssl_version,
self.ca_certs)
if self.do_handshake_on_connect:
self.do_handshake()
def accept(self):
"""Accepts a new connection from a remote client, and returns
a tuple containing that new connection wrapped with a server-side
SSL channel, and the address of the remote client."""
newsock, addr = socket.accept(self)
return (SSLSocket(newsock,
keyfile=self.keyfile,
certfile=self.certfile,
server_side=True,
cert_reqs=self.cert_reqs,
ssl_version=self.ssl_version,
ca_certs=self.ca_certs,
do_handshake_on_connect=self.do_handshake_on_connect,
suppress_ragged_eofs=self.suppress_ragged_eofs),
addr)
def makefile(self, mode='r', bufsize=-1):
"""Make and return a file-like object that
works with the SSL connection. Just use the code
from the socket module."""
self._makefile_refs += 1
return _fileobject(self, mode, bufsize)
def wrap_socket(sock, keyfile=None, certfile=None,
server_side=False, cert_reqs=CERT_NONE,
ssl_version=PROTOCOL_SSLv23, ca_certs=None,
do_handshake_on_connect=True,
suppress_ragged_eofs=True):
return SSLSocket(sock, keyfile=keyfile, certfile=certfile,
server_side=server_side, cert_reqs=cert_reqs,
ssl_version=ssl_version, ca_certs=ca_certs,
do_handshake_on_connect=do_handshake_on_connect,
suppress_ragged_eofs=suppress_ragged_eofs)
# some utility functions
def cert_time_to_seconds(cert_time):
"""Takes a date-time string in standard ASN1_print form
("MON DAY 24HOUR:MINUTE:SEC YEAR TIMEZONE") and return
a Python time value in seconds past the epoch."""
import time
return time.mktime(time.strptime(cert_time, "%b %d %H:%M:%S %Y GMT"))
PEM_HEADER = "-----BEGIN CERTIFICATE-----"
PEM_FOOTER = "-----END CERTIFICATE-----"
def DER_cert_to_PEM_cert(der_cert_bytes):
"""Takes a certificate in binary DER format and returns the
PEM version of it as a string."""
if hasattr(base64, 'standard_b64encode'):
# preferred because older API gets line-length wrong
f = base64.standard_b64encode(der_cert_bytes)
return (PEM_HEADER + '\n' +
textwrap.fill(f, 64) +
PEM_FOOTER + '\n')
else:
return (PEM_HEADER + '\n' +
base64.encodestring(der_cert_bytes) +
PEM_FOOTER + '\n')
def PEM_cert_to_DER_cert(pem_cert_string):
"""Takes a certificate in ASCII PEM format and returns the
DER-encoded version of it as a byte sequence"""
if not pem_cert_string.startswith(PEM_HEADER):
raise ValueError("Invalid PEM encoding; must start with %s"
% PEM_HEADER)
if not pem_cert_string.strip().endswith(PEM_FOOTER):
raise ValueError("Invalid PEM encoding; must end with %s"
% PEM_FOOTER)
d = pem_cert_string.strip()[len(PEM_HEADER):-len(PEM_FOOTER)]
return base64.decodestring(d)
def get_server_certificate (addr, ssl_version=PROTOCOL_SSLv3, ca_certs=None):
"""Retrieve the certificate from the server at the specified address,
and return it as a PEM-encoded string.
If 'ca_certs' is specified, validate the server cert against it.
If 'ssl_version' is specified, use it in the connection attempt."""
host, port = addr
if (ca_certs is not None):
cert_reqs = CERT_REQUIRED
else:
cert_reqs = CERT_NONE
s = wrap_socket(socket(), ssl_version=ssl_version,
cert_reqs=cert_reqs, ca_certs=ca_certs)
s.connect(addr)
dercert = s.getpeercert(True)
s.close()
return DER_cert_to_PEM_cert(dercert)
def get_protocol_name (protocol_code):
if protocol_code == PROTOCOL_TLSv1:
return "TLSv1"
elif protocol_code == PROTOCOL_SSLv23:
return "SSLv23"
elif protocol_code == PROTOCOL_SSLv2:
return "SSLv2"
elif protocol_code == PROTOCOL_SSLv3:
return "SSLv3"
else:
return "<unknown>"
# a replacement for the old socket.ssl function
def sslwrap_simple (sock, keyfile=None, certfile=None):
"""A replacement for the old socket.ssl function. Designed
for compability with Python 2.5 and earlier. Will disappear in
Python 3.0."""
if hasattr(sock, "_sock"):
sock = sock._sock
ssl_sock = _ssl.sslwrap(sock, 0, keyfile, certfile, CERT_NONE,
PROTOCOL_SSLv23, None)
try:
sock.getpeername()
except:
# no, no connection yet
pass
else:
# yes, do the handshake
ssl_sock.do_handshake()
return ssl_sock
|
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.examples.tutorials.mnist import input_data
import tabular_logger as tlogger
import tensorflow as tf
import numpy as np
import argparse
import time
import sys
import os
def normal(x, mu, sigma):
pi = tf.constant(np.pi)
Z = (2*pi*sigma**2)**0.5
return tf.exp(-0.5*(x - mu)**2 / sigma**2) / Z
def log_normal(x, mu, sigma):
pi = tf.constant(np.pi)
return -0.5*tf.log(2*pi) - tf.log(sigma) - 0.5*tf.square(x-mu) / (sigma**2)
def log_prior(x, isScaled=True):
if isScaled:
sigma1 = tf.exp(-1.0)
sigma2 = tf.exp(-7.0)
pi = 0.5
return tf.log(pi*normal(x,0.0,sigma1)+(1-pi)*normal(x,0.0,sigma2))
else:
sigma = tf.exp(-1.0)
return log_normal(x, 0.0, sigma)
class bayes():
def __init__(self, args):
self.args = args
self.data_dir = args.data_dir
self.mnist = input_data.read_data_sets(self.data_dir,one_hot=True,fake_data=False)
self.batch_size = args.batch_size
self.log_dir = args.log_dir
self.model = 'models'
self.scale = args.scale
self.w_prior_std = 1.0
self.isFlip = args.isFlip
self.LRT = args.LRT
if args.lr_decay:
self.learning_rate = tf.Variable(0.0, trainable=False)
else:
self.learning_rate = args.learning_rate
def bayesian_nn_layer(self, input_tensor, input_dim, output_dim, isTrain, layer_name, nonlinearity=None):
eps = 1e-35
with tf.name_scope(layer_name):
# Initialize the variational parameters. Reference: weight uncertainty in neural networks.
with tf.name_scope('weights_mean'):
mu_w = tf.Variable(tf.random_normal([input_dim, output_dim], stddev=0.1))
with tf.name_scope('weights_sd'):
rho_w = tf.Variable(tf.random_normal([input_dim, output_dim], mean=-3.0, stddev=0.1))
sigma_w = tf.log(1+tf.exp(rho_w))
with tf.name_scope('bias_mean'):
biases = tf.Variable(tf.zeros([output_dim]))
def closed_form_kl():
dim = input_dim * output_dim
return (tf.log(self.w_prior_std)*dim - \
tf.reduce_sum(tf.log(sigma_w+eps)) + \
0.5*(-dim+1.0/(self.w_prior_std**2)*(tf.reduce_sum(sigma_w**2) +\
tf.reduce_sum(mu_w**2))))
def train_forward():
epsilon_w = tf.random_normal([input_dim, output_dim], stddev=1.0)
weights = mu_w + tf.multiply(sigma_w, epsilon_w)
with tf.name_scope('Wx_plus_b'):
preactivate = tf.matmul(input_tensor, weights) + biases
if nonlinearity is not None:
preactivate = nonlinearity(preactivate)
return preactivate, closed_form_kl()
def map_inference():
weights = mu_w
# biases = mu_b
with tf.name_scope('Wx_plus_b'):
preactivate = tf.matmul(input_tensor, weights) + biases
if nonlinearity is not None:
preactivate = nonlinearity(preactivate)
return preactivate, closed_form_kl()
return tf.cond(isTrain, lambda: train_forward(), lambda: map_inference())
# Only be called during training.
def flipoutlayerFC(self, x, W_0, delta_W):
weight_dim = W_0.shape.as_list()
# x is n*m where m is the dimension, n is the mini-batch size.
# W_0 is m*h where h is the num of hidden units.
epsilon = tf.random_normal(weight_dim, stddev=1.0)
def generate_flipping_factor(dim):
shape = tf.stack([tf.shape(x)[0], dim])
random = tf.random_normal(shape)
positives = tf.ones(shape)
negatives = tf.zeros(shape)-1
return tf.where(random>0, positives, negatives)
E1 = generate_flipping_factor(weight_dim[1])
E2 = generate_flipping_factor(weight_dim[0])
pert_x = tf.multiply(tf.matmul(tf.multiply(x, E2), delta_W), E1)
return (tf.matmul(x, W_0) + pert_x)
def bayesian_nn_layer_flip(self, input_tensor, input_dim, output_dim, isTrain, layer_name, nonlinearity=None):
eps = 1e-35
with tf.name_scope(layer_name):
# Initialize the variational parameters. Reference: weight uncertainty in neural networks.
with tf.name_scope('weights_mean'):
mu_w = tf.Variable(tf.random_normal([input_dim, output_dim], stddev=0.1))
with tf.name_scope('weights_sd'):
rho_w = tf.Variable(tf.random_normal([input_dim, output_dim], mean=-3.0, stddev=0.1))
sigma_w = tf.log(1+tf.exp(rho_w))
with tf.name_scope('bias_mean'):
biases = tf.Variable(tf.zeros([output_dim]))
def closed_form_kl():
dim = input_dim * output_dim
return (tf.log(self.w_prior_std)*dim - \
tf.reduce_sum(tf.log(sigma_w+eps)) + \
0.5*(-dim+1.0/(self.w_prior_std**2)*(tf.reduce_sum(sigma_w**2) +\
tf.reduce_sum(mu_w**2))))
def train_forward():
with tf.name_scope('perturbation'):
epsilon_w = tf.random_normal([input_dim, output_dim], stddev=1.0)
delta_W = tf.multiply(sigma_w, epsilon_w)
weights = mu_w + delta_W
with tf.name_scope('flipout'):
flipoutFC = self.flipoutlayerFC(input_tensor, mu_w, delta_W)
with tf.name_scope('Wx_plus_b'):
preactivate = flipoutFC + biases
if nonlinearity is not None:
preactivate = nonlinearity(preactivate)
return preactivate, closed_form_kl()
def map_inference():
weights = mu_w
with tf.name_scope('Wx_plus_b'):
preactivate = tf.matmul(input_tensor, weights) + biases
if nonlinearity is not None:
preactivate = nonlinearity(preactivate)
return preactivate, closed_form_kl()
return tf.cond(isTrain, lambda: train_forward(), lambda: map_inference())
def bayesian_nn_layer_LRT(self, input_tensor, input_dim, output_dim, isTrain, layer_name, nonlinearity=None):
eps = 1e-35
with tf.name_scope(layer_name):
with tf.name_scope('weights_mean'):
mu_w = tf.Variable(tf.random_normal([input_dim, output_dim], stddev=0.1))
with tf.name_scope('weights_sd'):
rho_w = tf.Variable(tf.random_normal([input_dim, output_dim], mean=-3.0, stddev=0.1))
sigma_w = tf.log(1+tf.exp(rho_w))
with tf.name_scope('bias_mean'):
biases = tf.Variable(tf.zeros([output_dim]))
def closed_form_kl():
dim = input_dim * output_dim
return (tf.log(self.w_prior_std)*dim - \
tf.reduce_sum(tf.log(sigma_w+eps)) + \
0.5*(-dim+1.0/(self.w_prior_std**2)*(tf.reduce_sum(sigma_w**2) +\
tf.reduce_sum(mu_w**2))))
def train_forward():
mu_b = tf.matmul(input_tensor, mu_w)
sigma_b = tf.sqrt(tf.matmul(tf.square(input_tensor), tf.square(sigma_w))+eps)
output_shape = tf.stack([tf.shape(input_tensor)[0], output_dim])
epsilon = tf.random_normal(output_shape, stddev=1.0)
preactivate = mu_b + tf.multiply(sigma_b, epsilon)
if nonlinearity is not None:
preactivate = nonlinearity(preactivate)
return preactivate, closed_form_kl()
def map_inference():
weights = mu_w
with tf.name_scope('Wx_plus_b'):
preactivate = tf.matmul(input_tensor, weights) + biases
if nonlinearity is not None:
preactivate = nonlinearity(preactivate)
return preactivate, closed_form_kl()
return tf.cond(isTrain, lambda: train_forward(), lambda: map_inference())
def build_model(self):
self.isTrain = tf.placeholder(tf.bool, name='isTrain')
with tf.name_scope('input'):
self.x = tf.placeholder(tf.float32, [None, 784], name='x-input')
self.y_ = tf.placeholder(tf.float32, [None, 10], name='y-input')
self.M = tf.placeholder(tf.float32, shape=(), name='number_mini_batches')
self.n = tf.placeholder(tf.float32, shape=(), name='mini_batch_size')
if self.args.LRT:
hidden1, kl1 = self.bayesian_nn_layer_LRT(self.x , 784, 400, self.isTrain, 'layer1', nonlinearity=tf.nn.relu)
hidden2, kl2 = self.bayesian_nn_layer_LRT(hidden1, 400, 400, self.isTrain, 'layer2', nonlinearity=tf.nn.relu)
y, kl3 = self.bayesian_nn_layer_LRT(hidden2, 400, 10, self.isTrain, 'layer3')
elif self.isFlip:
hidden1, kl1 = self.bayesian_nn_layer_flip(self.x , 784, 400, self.isTrain, 'layer1', nonlinearity=tf.nn.relu)
hidden2, kl2 = self.bayesian_nn_layer_flip(hidden1, 400, 400, self.isTrain, 'layer2', nonlinearity=tf.nn.relu)
y, kl3 = self.bayesian_nn_layer_flip(hidden2, 400, 10, self.isTrain, 'layer3')
else:
hidden1, kl1 = self.bayesian_nn_layer(self.x , 784, 400, self.isTrain, 'layer1', nonlinearity=tf.nn.relu)
hidden2, kl2 = self.bayesian_nn_layer(hidden1, 400, 400, self.isTrain, 'layer2', nonlinearity=tf.nn.relu)
y, kl3 = self.bayesian_nn_layer(hidden2, 400, 10, self.isTrain, 'layer3')
with tf.name_scope('cross-entropy'):
self.cross_entropy = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(labels=self.y_, logits=y))
with tf.name_scope('KL'):
self.KL = (kl1 + kl2 + kl3) / self.M
# tf.summary.scalar('KL', self.KL)
with tf.name_scope('loss'):
self.loss = self.KL + self.cross_entropy
# tf.summary.scalar('loss', self.loss)
with tf.name_scope('train'):
self.train_step = tf.train.AdamOptimizer(self.learning_rate).minimize(
self.loss)
# if self.args.lr_decay:
# tf.summary.scalar('lr', self.learning_rate)
with tf.name_scope('correct_prediction'):
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(self.y_, 1))
with tf.name_scope('accuracy'):
self.accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# tf.summary.scalar('accuracy', self.accuracy)
def run_model(self):
self.build_model()
mnist = input_data.read_data_sets(self.data_dir, one_hot=True)
log_dir = self.log_dir + '/Flip{}batch{}scale{}lr{}'.format(self.isFlip, self.batch_size, self.scale, self.learning_rate)
tlogger.start(log_dir)
for k, v in self.args.__dict__.items():
tlogger.log('{}: {}'.format(k, v))
with tf.Session() as sess:
# merged = tf.summary.merge_all()
# train_writer = tf.summary.FileWriter(self.log_dir + '/Flip{}train{}scale{}lr{}'
# .format(self.isFlip, self.batch_size, self.scale, self.learning_rate), sess.graph)
# test_writer = tf.summary.FileWriter(self.log_dir + '/Flip{}test{}scale{}lr{}'
# .format(self.isFlip, self.batch_size, self.scale, self.learning_rate))
# saver = tf.train.Saver(max_to_keep=40)
sess.run(tf.global_variables_initializer())
# M = mnist.train.labels.shape[0] // self.batch_size
M = 55000
tstart = time.time()
for i in range(self.args.num_iterations):
start = time.time()
if self.args.lr_decay:
step_size = self.piecewise_learning_rate(i)
sess.run(tf.assign(self.learning_rate, step_size))
batch = mnist.train.next_batch(self.batch_size)
_, train_KL, train_accuracy, train_loss, train_cross = sess.run([self.train_step,
self.KL, self.accuracy, self.loss, self.cross_entropy], feed_dict={self.x: batch[0],
self.y_: batch[1], self.M: M, self.n: batch[0].shape[0], self.isTrain: True})
if i % 100 == 0:
# train_writer.add_summary(train_summary, i)
tlogger.log('********** Iteration {} **********'.format(i))
tlogger.record_tabular("train_loss", train_loss)
tlogger.record_tabular("train_cross", train_cross)
tlogger.record_tabular("train_KL", train_KL)
tlogger.record_tabular("train_acc", train_accuracy)
# print('Train accuracy, Loss at step %s: %s, %s' % (i, train_accuracy, train_loss))
xs, ys = mnist.test.images, mnist.test.labels
test_accuracy, test_loss, test_KL, test_cross = sess.run([self.accuracy, self.loss,
self.KL, self.cross_entropy], feed_dict={ self.x: xs, self.y_: ys, self.M: M,
self.n: xs.shape[0], self.isTrain: False})
# test_writer.add_summary(test_summary, i)
# print('Test accuracy at step %s: %s' % (i, test_accuracy))
tlogger.record_tabular("test_loss", test_loss)
tlogger.record_tabular("test_cross", test_cross)
tlogger.record_tabular("test_KL", test_KL)
tlogger.record_tabular("test_acc", test_accuracy)
tlogger.record_tabular("TimeElapsed", time.time() - tstart)
tlogger.dump_tabular()
tlogger.stop()
# print('test accuracy %g' % self.accuracy.eval(feed_dict={
# self.x: mnist.test.images, self.y_: mnist.test.labels, \
# self.M: 1.0, self.n: mnist.test.images.shape[0], self.isTrain: False}))
# if os.path.exists(self.model):
# saver.save(sess, os.path.join(self.model, '{}{}scale{}'.format(self.batch_size, self.scale)), global_step=i)
# train_writer.close()
# test_writer.close()
def piecewise_learning_rate(self, step):
init_lr = self.args.learning_rate
num_iterations = self.args.num_iterations
if step <= (num_iterations/3):
return init_lr
elif (num_iterations/3)< step <= (2*num_iterations/3):
return 0.1*init_lr
else:
return 0.1*0.1*init_lr
if __name__ == '__main__':
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--data_dir', type=str, default='/ais/gobi5/wenyemin/summerproject/data/mnist',
help='data file dir')
parser.add_argument('--log_dir', type=str, default='NEW_log',
help='log file dir')
parser.add_argument('--learning_rate', type=float, default=1e-4,
help='learning rate')
parser.add_argument('--scale', type=float, default=0.1,
help='scale the KL term')
parser.add_argument('--batch_size', type=int, default=128,
help='minibatch size')
parser.add_argument('--num_iterations', type=int, default=30000,
help='number of iterations')
parser.add_argument('--isFlip', action='store_true', default=False,
help='whether use flipout')
parser.add_argument('--lr_decay', action='store_true', default=False,
help='whether use learning rate decay')
parser.add_argument('--LRT', action='store_true', default=False,
help='whether use local reparametrization tric')
args = parser.parse_args()
BBB = bayes(args)
BBB.run_model()
|
|
#!/usr/bin/env python
# Copyright 2014 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Performance runner for d8.
Call e.g. with tools/run-benchmarks.py --arch ia32 some_suite.json
The suite json format is expected to be:
{
"path": <relative path chunks to benchmark resources and main file>,
"name": <optional suite name, file name is default>,
"archs": [<architecture name for which this suite is run>, ...],
"binary": <name of binary to run, default "d8">,
"flags": [<flag to d8>, ...],
"run_count": <how often will this suite run (optional)>,
"run_count_XXX": <how often will this suite run for arch XXX (optional)>,
"resources": [<js file to be loaded before main>, ...]
"main": <main js benchmark runner file>,
"results_regexp": <optional regexp>,
"results_processor": <optional python results processor script>,
"units": <the unit specification for the performance dashboard>,
"benchmarks": [
{
"name": <name of the benchmark>,
"results_regexp": <optional more specific regexp>,
"results_processor": <optional python results processor script>,
"units": <the unit specification for the performance dashboard>,
}, ...
]
}
The benchmarks field can also nest other suites in arbitrary depth. A suite
with a "main" file is a leaf suite that can contain one more level of
benchmarks.
A suite's results_regexp is expected to have one string place holder
"%s" for the benchmark name. A benchmark's results_regexp overwrites suite
defaults.
A suite's results_processor may point to an optional python script. If
specified, it is called after running the benchmarks like this (with a path
relatve to the suite level's path):
<results_processor file> <same flags as for d8> <suite level name> <output>
The <output> is a temporary file containing d8 output. The results_regexp will
be applied to the output of this script.
A suite without "benchmarks" is considered a benchmark itself.
Full example (suite with one runner):
{
"path": ["."],
"flags": ["--expose-gc"],
"archs": ["ia32", "x64"],
"run_count": 5,
"run_count_ia32": 3,
"main": "run.js",
"results_regexp": "^%s: (.+)$",
"units": "score",
"benchmarks": [
{"name": "Richards"},
{"name": "DeltaBlue"},
{"name": "NavierStokes",
"results_regexp": "^NavierStokes: (.+)$"}
]
}
Full example (suite with several runners):
{
"path": ["."],
"flags": ["--expose-gc"],
"archs": ["ia32", "x64"],
"run_count": 5,
"units": "score",
"benchmarks": [
{"name": "Richards",
"path": ["richards"],
"main": "run.js",
"run_count": 3,
"results_regexp": "^Richards: (.+)$"},
{"name": "NavierStokes",
"path": ["navier_stokes"],
"main": "run.js",
"results_regexp": "^NavierStokes: (.+)$"}
]
}
Path pieces are concatenated. D8 is always run with the suite's path as cwd.
"""
import json
import optparse
import os
import re
import sys
from testrunner.local import commands
from testrunner.local import utils
ARCH_GUESS = utils.DefaultArch()
SUPPORTED_ARCHS = ["android_arm",
"android_arm64",
"android_ia32",
"arm",
"ia32",
"mips",
"mipsel",
"nacl_ia32",
"nacl_x64",
"x64",
"arm64"]
class Results(object):
"""Place holder for result traces."""
def __init__(self, traces=None, errors=None):
self.traces = traces or []
self.errors = errors or []
def ToDict(self):
return {"traces": self.traces, "errors": self.errors}
def WriteToFile(self, file_name):
with open(file_name, "w") as f:
f.write(json.dumps(self.ToDict()))
def __add__(self, other):
self.traces += other.traces
self.errors += other.errors
return self
def __str__(self): # pragma: no cover
return str(self.ToDict())
class Node(object):
"""Represents a node in the benchmark suite tree structure."""
def __init__(self, *args):
self._children = []
def AppendChild(self, child):
self._children.append(child)
class DefaultSentinel(Node):
"""Fake parent node with all default values."""
def __init__(self):
super(DefaultSentinel, self).__init__()
self.binary = "d8"
self.run_count = 10
self.path = []
self.graphs = []
self.flags = []
self.resources = []
self.results_regexp = None
self.stddev_regexp = None
self.units = "score"
class Graph(Node):
"""Represents a benchmark suite definition.
Can either be a leaf or an inner node that provides default values.
"""
def __init__(self, suite, parent, arch):
super(Graph, self).__init__()
self._suite = suite
assert isinstance(suite.get("path", []), list)
assert isinstance(suite["name"], basestring)
assert isinstance(suite.get("flags", []), list)
assert isinstance(suite.get("resources", []), list)
# Accumulated values.
self.path = parent.path[:] + suite.get("path", [])
self.graphs = parent.graphs[:] + [suite["name"]]
self.flags = parent.flags[:] + suite.get("flags", [])
self.resources = parent.resources[:] + suite.get("resources", [])
# Descrete values (with parent defaults).
self.binary = suite.get("binary", parent.binary)
self.run_count = suite.get("run_count", parent.run_count)
self.run_count = suite.get("run_count_%s" % arch, self.run_count)
self.units = suite.get("units", parent.units)
# A regular expression for results. If the parent graph provides a
# regexp and the current suite has none, a string place holder for the
# suite name is expected.
# TODO(machenbach): Currently that makes only sense for the leaf level.
# Multiple place holders for multiple levels are not supported.
if parent.results_regexp:
regexp_default = parent.results_regexp % re.escape(suite["name"])
else:
regexp_default = None
self.results_regexp = suite.get("results_regexp", regexp_default)
# A similar regular expression for the standard deviation (optional).
if parent.stddev_regexp:
stddev_default = parent.stddev_regexp % re.escape(suite["name"])
else:
stddev_default = None
self.stddev_regexp = suite.get("stddev_regexp", stddev_default)
class Trace(Graph):
"""Represents a leaf in the benchmark suite tree structure.
Handles collection of measurements.
"""
def __init__(self, suite, parent, arch):
super(Trace, self).__init__(suite, parent, arch)
assert self.results_regexp
self.results = []
self.errors = []
self.stddev = ""
def ConsumeOutput(self, stdout):
try:
self.results.append(
re.search(self.results_regexp, stdout, re.M).group(1))
except:
self.errors.append("Regexp \"%s\" didn't match for benchmark %s."
% (self.results_regexp, self.graphs[-1]))
try:
if self.stddev_regexp and self.stddev:
self.errors.append("Benchmark %s should only run once since a stddev "
"is provided by the benchmark." % self.graphs[-1])
if self.stddev_regexp:
self.stddev = re.search(self.stddev_regexp, stdout, re.M).group(1)
except:
self.errors.append("Regexp \"%s\" didn't match for benchmark %s."
% (self.stddev_regexp, self.graphs[-1]))
def GetResults(self):
return Results([{
"graphs": self.graphs,
"units": self.units,
"results": self.results,
"stddev": self.stddev,
}], self.errors)
class Runnable(Graph):
"""Represents a runnable benchmark suite definition (i.e. has a main file).
"""
@property
def main(self):
return self._suite["main"]
def ChangeCWD(self, suite_path):
"""Changes the cwd to to path defined in the current graph.
The benchmarks are supposed to be relative to the suite configuration.
"""
suite_dir = os.path.abspath(os.path.dirname(suite_path))
bench_dir = os.path.normpath(os.path.join(*self.path))
os.chdir(os.path.join(suite_dir, bench_dir))
def GetCommand(self, shell_dir):
# TODO(machenbach): This requires +.exe if run on windows.
return (
[os.path.join(shell_dir, self.binary)] +
self.flags +
self.resources +
[self.main]
)
def Run(self, runner):
"""Iterates over several runs and handles the output for all traces."""
for stdout in runner():
for trace in self._children:
trace.ConsumeOutput(stdout)
return reduce(lambda r, t: r + t.GetResults(), self._children, Results())
class RunnableTrace(Trace, Runnable):
"""Represents a runnable benchmark suite definition that is a leaf."""
def __init__(self, suite, parent, arch):
super(RunnableTrace, self).__init__(suite, parent, arch)
def Run(self, runner):
"""Iterates over several runs and handles the output."""
for stdout in runner():
self.ConsumeOutput(stdout)
return self.GetResults()
def MakeGraph(suite, arch, parent):
"""Factory method for making graph objects."""
if isinstance(parent, Runnable):
# Below a runnable can only be traces.
return Trace(suite, parent, arch)
elif suite.get("main"):
# A main file makes this graph runnable.
if suite.get("benchmarks"):
# This graph has subbenchmarks (traces).
return Runnable(suite, parent, arch)
else:
# This graph has no subbenchmarks, it's a leaf.
return RunnableTrace(suite, parent, arch)
elif suite.get("benchmarks"):
# This is neither a leaf nor a runnable.
return Graph(suite, parent, arch)
else: # pragma: no cover
raise Exception("Invalid benchmark suite configuration.")
def BuildGraphs(suite, arch, parent=None):
"""Builds a tree structure of graph objects that corresponds to the suite
configuration.
"""
parent = parent or DefaultSentinel()
# TODO(machenbach): Implement notion of cpu type?
if arch not in suite.get("archs", ["ia32", "x64"]):
return None
graph = MakeGraph(suite, arch, parent)
for subsuite in suite.get("benchmarks", []):
BuildGraphs(subsuite, arch, graph)
parent.AppendChild(graph)
return graph
def FlattenRunnables(node):
"""Generator that traverses the tree structure and iterates over all
runnables.
"""
if isinstance(node, Runnable):
yield node
elif isinstance(node, Node):
for child in node._children:
for result in FlattenRunnables(child):
yield result
else: # pragma: no cover
raise Exception("Invalid benchmark suite configuration.")
# TODO: Implement results_processor.
def Main(args):
parser = optparse.OptionParser()
parser.add_option("--arch",
help=("The architecture to run tests for, "
"'auto' or 'native' for auto-detect"),
default="x64")
parser.add_option("--buildbot",
help="Adapt to path structure used on buildbots",
default=False, action="store_true")
parser.add_option("--json-test-results",
help="Path to a file for storing json results.")
parser.add_option("--outdir", help="Base directory with compile output",
default="out")
(options, args) = parser.parse_args(args)
if len(args) == 0: # pragma: no cover
parser.print_help()
return 1
if options.arch in ["auto", "native"]: # pragma: no cover
options.arch = ARCH_GUESS
if not options.arch in SUPPORTED_ARCHS: # pragma: no cover
print "Unknown architecture %s" % options.arch
return 1
workspace = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
if options.buildbot:
shell_dir = os.path.join(workspace, options.outdir, "Release")
else:
shell_dir = os.path.join(workspace, options.outdir,
"%s.release" % options.arch)
results = Results()
for path in args:
path = os.path.abspath(path)
if not os.path.exists(path): # pragma: no cover
results.errors.append("Benchmark file %s does not exist." % path)
continue
with open(path) as f:
suite = json.loads(f.read())
# If no name is given, default to the file name without .json.
suite.setdefault("name", os.path.splitext(os.path.basename(path))[0])
for runnable in FlattenRunnables(BuildGraphs(suite, options.arch)):
print ">>> Running suite: %s" % "/".join(runnable.graphs)
runnable.ChangeCWD(path)
def Runner():
"""Output generator that reruns several times."""
for i in xrange(0, max(1, runnable.run_count)):
# TODO(machenbach): Make timeout configurable in the suite definition.
# Allow timeout per arch like with run_count per arch.
output = commands.Execute(runnable.GetCommand(shell_dir), timeout=60)
print ">>> Stdout (#%d):" % (i + 1)
print output.stdout
if output.stderr: # pragma: no cover
# Print stderr for debugging.
print ">>> Stderr (#%d):" % (i + 1)
print output.stderr
yield output.stdout
# Let runnable iterate over all runs and handle output.
results += runnable.Run(Runner)
if options.json_test_results:
results.WriteToFile(options.json_test_results)
else: # pragma: no cover
print results
return min(1, len(results.errors))
if __name__ == "__main__": # pragma: no cover
sys.exit(Main(sys.argv[1:]))
|
|
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Support for analytics on course dashboard pages."""
__author__ = ['Michael Gainer (mgainer@google.com)']
import collections
import logging
import zlib
from mapreduce import context
from common import schema_fields
from common import utils as common_utils
from controllers import sites
from models import courses
from models import data_sources
from models import entities
from models import jobs
from models import models
from models import transforms
from google.appengine.api import datastore_types
from google.appengine.ext import db
class AbstractStudentAggregationComponent(object):
"""Allows modules to contribute to map/reduce on EventEntity by Student.
Extension modules that generate events data relating to students may wish
to make this information available via the data pump to BigQuery. This
can be done by having the individual module produce its own data source,
or by contributing to the Student aggregate .
Adding to the aggregate is slightly preferred, as that removes the need for
course administrators to separately push those data sources and to write
BigQuery SQL to do joins. Further, adding functionality here will gain some
cost savings by reducing the number of passes over the EventEntity table.
Note that any of the functions below can be provided either as
@classmethod or instance method. If using @classmethod, all functions
must be overridden to keep Python happy.
"""
def get_name(self):
"""Get short name for component.
Note that while we could use __name__ to get a usable name to
ensure registered components are unique, having get_name()
explicitly in the interface permits this interface to be
implemented as an instance.
"""
raise NotImplementedError()
def get_event_sources_wanted(self):
"""Give the matches to "source" in EventEntity this component wants.
E.g, "enter-page", "attempt-lesson" and so on.
Returns:
list of strings for event types we can handle.
"""
return []
# pylint: disable=unused-argument
def build_static_params(self, app_context):
"""Build any expensive-to-calculate items at course level.
This function is called once at the start of the map/reduce job so
that implementers can pre-calculate any facts that would be expensive
to regenerate on each call to process_event(). If no such facts are
required, return None. Any type of object may be returned.
Args:
app_context: A standard CB application context object.
Returns:
Any.
"""
return None
# pylint: disable=unused-argument
def process_event(self, event, static_params):
"""Handle one EventEntity. Called from map phase of map/reduce job.
This method is called once for each Event which has a "source" field
matching one of the strings returned from get_event_sources_wanted().
This function should produce a record that will be used below in
produce_aggregate(). The list of all items returned from this
function for each Student are provided to produce_aggregate().
Args:
event: an EventEntity.
static_params: the value from build_static_params(), if any.
Returns:
Any object that can be converted to a string via transforms.dumps(),
or None.
"""
return None
def produce_aggregate(self, course, student, static_params, event_items):
"""Aggregate event-item outputs. Called from reduce phase of M/R job.
For each Student in the course for which there were any EventEntity
recorded, this function is called with the accumulated return values
produced by process_event(), above. Note that since even the act of
registration generates events, every registered student will be
handled. Also note that this function will be called even for Student
entries for which no output was produced by process_event().
This method must produce a dict corresponding to the schema returned
from get_schema(), or return None.
Args:
course: The Course in which the student and the events are found.
student: the Student for which the events occurred.
static_params: the value from build_static_params(), if any.
event_items: a list of all the items produced by process_event()
for the given Student.
Returns:
A dict corresponding to the declared schema.
"""
raise NotImplementedError()
def get_schema(self):
"""Provide the partial schema for results produced.
This function may return a SchemaField, FieldArray or FieldRegistry.
This schema element will appear as a top-level component in the master
schema in the aggregate data source.
"""
raise NotImplementedError()
class StudentAggregateEntity(entities.BaseEntity):
"""Holds data aggregated from Event entites for a single Student.
As we run the registered sub-aggregators for the various event types,
the reduce step of our master map/reduce job will be presented with
summarized data for all events pertaining to a single Student. Rather
than write this large volume of data out to, say, BlobStore, we instead
prefer to write each Student's aggregated data to one record in the DB.
Doing this permits us to use existing paginated-rest-data-source logic
to provide the aggregated student data as a feed to the data pump."""
data = db.BlobProperty()
@classmethod
def safe_key(cls, db_key, transform_fn):
return db.Key.from_path(cls.kind(), transform_fn(db_key.id_or_name()))
class StudentAggregateGenerator(jobs.MapReduceJob):
"""M/R job to aggregate data by student using registered plug-ins.
This class coordinates the work of plugin classes registered with
StudentAggregateComponentRegistry and combines their work into a single
StudentAggregateEntity record in the datastore. Plugin classes are
insulated from one another, and are permitted to fail individually without
compromising the results contributed for a Student by other plugins.
"""
@staticmethod
def get_description():
return 'student_aggregate'
@staticmethod
def entity_class():
return models.EventEntity
def build_additional_mapper_params(self, app_context):
schemas = {}
schema_names = {}
ret = {
'course_namespace': app_context.get_namespace_name(),
'schemas': schemas,
'schema_names': schema_names,
}
for component in StudentAggregateComponentRegistry.get_components():
component_name = component.get_name()
static_value = component.build_static_params(app_context)
if static_value:
ret[component_name] = static_value
schema = component.get_schema()
if hasattr(schema, 'title'):
schema_name = schema.title
else:
schema_name = schema.name
schema_names[component_name] = schema_name
schemas[component_name] = schema.get_json_schema_dict()
return ret
@staticmethod
def map(event):
for component in (StudentAggregateComponentRegistry.
get_components_for_event_source(event.source)):
component_name = component.get_name()
params = context.get().mapreduce_spec.mapper.params
static_data = params.get(component_name)
value = None
try:
value = component.process_event(event, static_data)
# pylint: disable=broad-except
except Exception, ex:
common_utils.log_exception_origin()
logging.critical('Student aggregation map function '
'component handler %s failed: %s',
component_name, str(ex))
if value:
value_str = '%s:%s' % (component_name, transforms.dumps(value))
yield event.user_id, value_str
@staticmethod
def reduce(user_id, values):
# Convenience for collections: Pre-load Student and Course objects.
student = None
try:
student = models.Student.get_by_user_id(user_id)
# pylint: disable=broad-except
except Exception:
common_utils.log_exception_origin()
if not student:
logging.warning(
'Student for student aggregation with user ID %s '
'was not loaded. Ignoring records for this student.', user_id)
return
params = context.get().mapreduce_spec.mapper.params
ns = params['course_namespace']
app_context = sites.get_course_index().get_app_context_for_namespace(ns)
course = courses.Course(None, app_context=app_context)
# Bundle items together into lists by collection name
event_items = collections.defaultdict(list)
for value in values:
component_name, payload = value.split(':', 1)
event_items[component_name].append(transforms.loads(payload))
# Build up per-Student aggregate by calling each component. Note that
# we call each component whether or not its mapper produced any
# output.
aggregate = {}
for component in StudentAggregateComponentRegistry.get_components():
component_name = component.get_name()
static_value = params.get(component_name)
value = {}
try:
value = component.produce_aggregate(
course, student, static_value,
event_items.get(component_name, []))
if not value:
continue
# pylint: disable=broad-except
except Exception, ex:
common_utils.log_exception_origin()
logging.critical('Student aggregation reduce function '
'component handler %s failed: %s',
component_name, str(ex))
continue
schema_name = params['schema_names'][component_name]
if schema_name not in value:
logging.critical(
'Student aggregation reduce handler %s produced '
'a dict which does not contain the top-level '
'name (%s) from its registered schema.',
component_name, schema_name)
continue
variances = transforms.validate_object_matches_json_schema(
value[schema_name], params['schemas'][component_name])
if variances:
logging.critical(
'Student aggregation reduce handler %s produced '
'a value which does not match its schema: %s',
component_name, ' '.join(variances))
continue
aggregate.update(value)
# Overwrite any previous value.
# TODO(mgainer): Consider putting records into blobstore. Some
# light activity manually producing test data is about 10K unzipped
# and 1K zipped. Unlikely that we'd see 1000x this amount of
# activity, but possible eventually.
data = zlib.compress(transforms.dumps(aggregate))
# pylint: disable=protected-access
if len(data) > datastore_types._MAX_RAW_PROPERTY_BYTES:
# TODO(mgainer): Add injection and collection of counters to
# map/reduce job. Have overridable method to verify no issues
# occurred when job completes. If critical issues, mark job
# as failed, even though M/R completed.
logging.critical(
'Aggregated compressed student data is over %d bytes; '
'cannot store this in one field; ignoring this record!')
else:
StudentAggregateEntity(key_name=user_id, data=data).put()
class StudentAggregateComponentRegistry(
data_sources.AbstractDbTableRestDataSource):
_components = []
_components_by_name = {}
_components_by_schema = {}
_components_for_event_source = collections.defaultdict(list)
@classmethod
def get_name(cls):
return 'student_aggregate'
@classmethod
def get_title(cls):
return 'Student Aggregate'
@classmethod
def get_entity_class(cls):
return StudentAggregateEntity
@classmethod
def required_generators(cls):
return [StudentAggregateGenerator]
@classmethod
def exportable(cls):
return True
@classmethod
def get_default_chunk_size(cls):
return 100
@classmethod
def get_schema(cls, app_context, log, data_source_context):
ret = schema_fields.FieldRegistry('student_aggregation')
for component in cls._components:
ret.add_property(component.get_schema())
if data_source_context.send_uncensored_pii_data:
obfuscation = 'Un-Obfuscated'
else:
obfuscation = 'Obfuscated'
description = (obfuscation + ' version of user ID. Usable to join '
'to other tables also keyed on obfuscated user ID.')
ret.add_property(schema_fields.SchemaField(
'user_id', 'User ID', 'string', description=description))
return ret.get_json_schema_dict()['properties']
@classmethod
def _postprocess_rows(cls, app_context, data_source_context, schema,
log, page_number, rows):
if data_source_context.send_uncensored_pii_data:
transform_fn = lambda x: x
else:
transform_fn = cls._build_transform_fn(data_source_context)
ret = []
for row in rows:
item = transforms.loads(zlib.decompress(row.data))
item['user_id'] = transform_fn(row.key().id_or_name())
ret.append(item)
return ret
@classmethod
def get_schema_name(cls, component):
schema = component.get_schema()
if hasattr(schema, 'name'):
return schema.name
return schema.title
@classmethod
def register_component(cls, component):
component_name = component.get_name()
if ':' in component_name:
raise ValueError('Component names may not contain colons.')
if component_name in cls._components_by_name:
raise ValueError(
'There is already a student aggregation component '
'named "%s" registered. ' % component_name)
schema_name = cls.get_schema_name(component)
if schema_name in cls._components_by_schema:
raise ValueError(
'There is already a student aggregation component schema '
'member named "%s" registered by %s.' % (
schema_name,
cls._components_by_schema[schema_name].get_name()))
cls._components.append(component)
cls._components_by_name[component_name] = component
cls._components_by_schema[schema_name] = component
for event_source in component.get_event_sources_wanted():
cls._components_for_event_source[event_source].append(component)
@classmethod
def get_components_for_event_source(cls, source):
return cls._components_for_event_source.get(source, [])
@classmethod
def get_components(cls):
return cls._components
|
|
# -*- coding: utf-8 -*-
"""
Dimension reduction with optimal transport
"""
# Author: Remi Flamary <remi.flamary@unice.fr>
#
# License: MIT License
from scipy import linalg
import autograd.numpy as np
from pymanopt.manifolds import Stiefel
from pymanopt import Problem
from pymanopt.solvers import SteepestDescent, TrustRegions
def dist(x1, x2):
""" Compute squared euclidean distance between samples (autograd)
"""
x1p2 = np.sum(np.square(x1), 1)
x2p2 = np.sum(np.square(x2), 1)
return x1p2.reshape((-1, 1)) + x2p2.reshape((1, -1)) - 2 * np.dot(x1, x2.T)
def sinkhorn(w1, w2, M, reg, k):
"""Sinkhorn algorithm with fixed number of iteration (autograd)
"""
K = np.exp(-M / reg)
ui = np.ones((M.shape[0],))
vi = np.ones((M.shape[1],))
for i in range(k):
vi = w2 / (np.dot(K.T, ui))
ui = w1 / (np.dot(K, vi))
G = ui.reshape((M.shape[0], 1)) * K * vi.reshape((1, M.shape[1]))
return G
def split_classes(X, y):
"""split samples in X by classes in y
"""
lstsclass = np.unique(y)
return [X[y == i, :].astype(np.float32) for i in lstsclass]
def fda(X, y, p=2, reg=1e-16):
"""
Fisher Discriminant Analysis
Parameters
----------
X : numpy.ndarray (n,d)
Training samples
y : np.ndarray (n,)
labels for training samples
p : int, optional
size of dimensionnality reduction
reg : float, optional
Regularization term >0 (ridge regularization)
Returns
-------
P : (d x p) ndarray
Optimal transportation matrix for the given parameters
proj : fun
projection function including mean centering
"""
mx = np.mean(X)
X -= mx.reshape((1, -1))
# data split between classes
d = X.shape[1]
xc = split_classes(X, y)
nc = len(xc)
p = min(nc - 1, p)
Cw = 0
for x in xc:
Cw += np.cov(x, rowvar=False)
Cw /= nc
mxc = np.zeros((d, nc))
for i in range(nc):
mxc[:, i] = np.mean(xc[i])
mx0 = np.mean(mxc, 1)
Cb = 0
for i in range(nc):
Cb += (mxc[:, i] - mx0).reshape((-1, 1)) * \
(mxc[:, i] - mx0).reshape((1, -1))
w, V = linalg.eig(Cb, Cw + reg * np.eye(d))
idx = np.argsort(w.real)
Popt = V[:, idx[-p:]]
def proj(X):
return (X - mx.reshape((1, -1))).dot(Popt)
return Popt, proj
def wda(X, y, p=2, reg=1, k=10, solver=None, maxiter=100, verbose=0, P0=None):
"""
Wasserstein Discriminant Analysis [11]_
The function solves the following optimization problem:
.. math::
P = \\text{arg}\min_P \\frac{\\sum_i W(PX^i,PX^i)}{\\sum_{i,j\\neq i} W(PX^i,PX^j)}
where :
- :math:`P` is a linear projection operator in the Stiefel(p,d) manifold
- :math:`W` is entropic regularized Wasserstein distances
- :math:`X^i` are samples in the dataset corresponding to class i
Parameters
----------
X : numpy.ndarray (n,d)
Training samples
y : np.ndarray (n,)
labels for training samples
p : int, optional
size of dimensionnality reduction
reg : float, optional
Regularization term >0 (entropic regularization)
solver : str, optional
None for steepest decsent or 'TrustRegions' for trust regions algorithm
else shoudl be a pymanopt.solvers
P0 : numpy.ndarray (d,p)
Initial starting point for projection
verbose : int, optional
Print information along iterations
Returns
-------
P : (d x p) ndarray
Optimal transportation matrix for the given parameters
proj : fun
projection function including mean centering
References
----------
.. [11] Flamary, R., Cuturi, M., Courty, N., & Rakotomamonjy, A. (2016). Wasserstein Discriminant Analysis. arXiv preprint arXiv:1608.08063.
""" # noqa
mx = np.mean(X)
X -= mx.reshape((1, -1))
# data split between classes
d = X.shape[1]
xc = split_classes(X, y)
# compute uniform weighs
wc = [np.ones((x.shape[0]), dtype=np.float32) / x.shape[0] for x in xc]
def cost(P):
# wda loss
loss_b = 0
loss_w = 0
for i, xi in enumerate(xc):
xi = np.dot(xi, P)
for j, xj in enumerate(xc[i:]):
xj = np.dot(xj, P)
M = dist(xi, xj)
G = sinkhorn(wc[i], wc[j + i], M, reg, k)
if j == 0:
loss_w += np.sum(G * M)
else:
loss_b += np.sum(G * M)
# loss inversed because minimization
return loss_w / loss_b
# declare manifold and problem
manifold = Stiefel(d, p)
problem = Problem(manifold=manifold, cost=cost)
# declare solver and solve
if solver is None:
solver = SteepestDescent(maxiter=maxiter, logverbosity=verbose)
elif solver in ['tr', 'TrustRegions']:
solver = TrustRegions(maxiter=maxiter, logverbosity=verbose)
Popt = solver.solve(problem, x=P0)
def proj(X):
return (X - mx.reshape((1, -1))).dot(Popt)
return Popt, proj
|
|
"""Tests for ``tinymemory``."""
import itertools as it
from multiprocessing.pool import ThreadPool
import random
import pytest
from tinymr import MapReduce
from tinymr.errors import KeyCountError, ClosedTaskError
from tinymr.tools import single_key_output
class _WordCount(MapReduce):
"""Define outside a function so other tests can subclass to test
concurrency and parallelism.
"""
def mapper(self, item):
return zip(item.lower().split(), it.repeat(1))
def reducer(self, key, values):
yield key, sum(values)
def output(self, items):
return single_key_output(items)
@pytest.fixture(scope='class')
def wordcount():
return _WordCount
@pytest.mark.parametrize('method_name', ['init_map', 'init_reduce'])
def test_init_phases(tiny_text, wordcount, method_name):
"""Test ``init_map()`` and ``init_reduce()``."""
class WordCount(wordcount):
def __init__(self):
self.initialized = False
def initializer(self):
self.initialized = True
wc = WordCount()
setattr(wc, method_name, wc.initializer)
assert not wc.initialized
tuple(wc(tiny_text.splitlines()))
assert wc.initialized
def test_serial_sort():
"""Make sure enabling sorting actually sorts."""
text = [
'key2 sort2 data2',
'key2 sort1 data1',
'key3 sort2 data2',
'key3 sort1 data1',
'key1 sort2 data2',
'key1 sort1 data1'
]
class GroupSort(MapReduce):
n_sort_keys = 1
def mapper(self, item):
yield item.split()
def reducer(self, key, values):
return zip(it.repeat(key), values)
gs = GroupSort()
results = {k: tuple(v) for k, v in gs(text)}
assert len(results) == 3
assert results == {
'key1': ('data1', 'data2'),
'key2': ('data1', 'data2'),
'key3': ('data1', 'data2')}
def test_serial_no_sort():
"""Make sure that disabling sorting actually disables sorting."""
text = [
'1 6',
'1 5',
'1 4',
'1 3',
'1 2',
'1 1']
class Grouper(MapReduce):
def mapper(self, item):
yield item.split()
def reducer(self, key, values):
return zip(it.repeat(key), values)
g = Grouper()
results = {k: tuple(v) for k, v in g(text)}
assert results == {'1': ('6', '5', '4', '3', '2', '1')}
class _WCParallelSort(MapReduce):
"""Define out here so we can pickle it in multiprocessing."""
# Make sure everything gets sent to a single map + combine
chunksize = 10
jobs = 4
n_sort_keys = 1
def mapper(self, item):
yield item.split()
def reducer(self, key, values):
return zip(it.repeat(key), values)
def test_parallel_sort():
"""Process in parallel with sorting."""
text = [
'key2 sort2 data2',
'key2 sort1 data1',
'key3 sort2 data2',
'key3 sort1 data1',
'key1 sort2 data2',
'key1 sort1 data1'
]
wc = _WCParallelSort()
results = {k: sorted(v) for k, v in wc(text)}
assert results == {
'key1': ['data1', 'data2'],
'key2': ['data1', 'data2'],
'key3': ['data1', 'data2']}
def test_composite_partition_sort():
"""Composite key with sorting."""
class GroupSort(MapReduce):
n_partition_keys = 2
n_sort_keys = 2
def mapper(self, item):
yield item
def reducer(self, key, values):
return zip(it.repeat(key), values)
data = [
('p1', 'p2', 's1', 's2', 'd1'),
('p1', 'p2', 's3', 's4', 'd2'),
('p3', 'p4', 's1', 's2', 'd1'),
('p3', 'p4', 's3', 's4', 'd2'),
('p5', 'p6', 's1', 's2', 'd1'),
('p5', 'p6', 's3', 's4', 'd2')]
random.shuffle(data)
gs = GroupSort()
results = {k: tuple(v) for k, v in gs(data)}
assert results == {
'p1': ('d1', 'd2'),
'p3': ('d1', 'd2'),
'p5': ('d1', 'd2'),
}
def test_MemMapReduce_exceptions():
class TooManyMapperKeys(MapReduce):
def mapper(self, item):
yield 1, 2, 3
tmmk = TooManyMapperKeys()
with pytest.raises(KeyCountError):
tmmk([1])
class TooManyReducerKeys(MapReduce):
def mapper(self, item):
yield 1, 2
def reducer(self, key, values):
yield 1, 2, 3
tmrk = TooManyReducerKeys()
with pytest.raises(KeyCountError):
tmrk([1])
def test_run_map_method(wordcount):
"""``tinymr.MapReduce._run_map()`` isn't always called."""
wc = wordcount()
expected = (
('key', 1),
('value', 1)
)
assert expected == wc._run_map('key value')
class _WCThreaded(_WordCount):
threaded = True
jobs = 2
def test_threaded(tiny_text, tiny_text_wc_output):
wc = _WCThreaded()
assert isinstance(wc._map_job_pool, ThreadPool)
assert isinstance(wc._reduce_job_pool, ThreadPool)
assert dict(wc(tiny_text.splitlines())) == dict(tiny_text_wc_output)
def test_context_manager(wordcount, tiny_text, tiny_text_wc_output):
"""Test context manager and ensure default implementation exists."""
class WordCount(wordcount):
def __init__(self):
self.closed = False
def close(self):
self.closed = True
with WordCount() as wc:
assert not wc.closed
assert dict(tiny_text_wc_output) == dict(wc(tiny_text.splitlines()))
assert wc.closed
def test_MemMapReduce_properties(wordcount):
wc = wordcount()
assert wc.chunksize == 1
assert not wc.threaded
assert wc.close() is None
class WordCount(wordcount):
chunksize = 2
wc = WordCount()
assert wc.chunksize == 2
assert wc.map_chunksize == 2
assert wc.reduce_chunksize == 2
@pytest.mark.parametrize(
'method_name', ['check_map_keys', 'check_reduce_keys'])
def test_MemMapReduce_check_keys(wordcount, tiny_text, method_name):
"""Tests both ``MR.check_map_keys()`` and ``MR.check_reduce_keys()``."""
class WordCount(wordcount):
def checker(self, keys):
# Its possible something like a ValueError, KeyError, or subclass
# of KeyError will be raised accidentally if something breaks in
# the code around where this check actually happens, but a
# NameError is much less likely, and even less likely to go
# unnoticed.
raise NameError(keys)
wc = WordCount()
setattr(wc, method_name, wc.checker)
with pytest.raises(NameError):
wc(tiny_text)
def test_closed(wordcount):
"""An instance of a task that has been closed should raise an exception
if it is used twice.
"""
wc = wordcount()
assert not wc.closed
wc.close()
assert wc.closed
with pytest.raises(ClosedTaskError):
wc('')
with wordcount() as wc:
assert not wc.closed
assert wc.closed
with pytest.raises(ClosedTaskError):
wc('')
def test_not_implemented_methods():
mr = MapReduce()
with pytest.raises(NotImplementedError):
mr.mapper(None)
with pytest.raises(NotImplementedError):
mr.reducer(None, None)
def test_default_methods():
mr = MapReduce()
expected = [(i, tuple(range(i))) for i in range(1, 10)]
assert list(mr.output(expected)) == expected
assert mr._sort_key_idx is None
with pytest.raises(NotImplementedError):
mr([None])
def test_set_properties():
"""All of the MapReduce level configuration happens in properties, but
since the entire ``MapReduce.__init__()`` is handed over to the user,
each of these properties needs to have a getter _and_ setter.
"""
# Collect all the public properties
props = []
for attr in dir(MapReduce):
obj = getattr(MapReduce, attr)
if not attr.startswith('_') and isinstance(obj, property):
props.append(attr)
mr = MapReduce()
for p in props:
# Make sure the attribute isn't already set for some reason but
# cache the value so it can be replaced once this property is tested
original = getattr(mr, p)
assert original != 'whatever', \
"Property '{}' has already been set?".format(p)
# This calls the setter
try:
setattr(mr, p, 'whatever')
# The default error message isn't very helpful for debugging tests.
except AttributeError:
raise AttributeError(
"Can't set property '{}' on '{}'".format(
p, mr.__class__.__name__))
assert getattr(mr, p) == 'whatever'
# Some properties default to other properties, for instance 'map_jobs'
# and 'reduce_jobs' both default to 'jobs'. If 'jobs' is tested
# first then 'map_jobs' and 'reduce_jobs' will inherit its new
# value. By explicitly resetting the property's value to the
# original state this test is a little more sensitive to human
# errors, like if 'jobs.setter' actually points to 'chunksize'.
setattr(mr, p, original)
|
|
# Import flask dependencies
from flask import (Blueprint, request, render_template,
flash, g, session, redirect, url_for)
# Import password / encryption helper tools
from werkzeug import check_password_hash, generate_password_hash
# Import the database object from the main app module
from app import db
# Define the blueprint: 'auth', set its url prefix: app.url/auth
products = Blueprint('products', __name__, url_prefix='/backend/products')
# Import helpers
from ..helpers.helpers import read_setting
# Import the forms
from .forms import AddProduct, AddCategory
# Import the models
from .models import Product, Category, StockHistory
from ..suppliers.models import Supplier
from ..settings.models import VAT
# Import helpers
from ..helpers.helpers import (to_int, to_dec, to_dec_string, add_vat,
calc_margin)
# Import Babel
from app import babel
from config import LANGUAGES
# Set the route and accepted methods
@products.route('/')
def products_list():
products = Product.query.order_by(Product.reference)
for product in products:
product.stock = to_dec_string(product.stock)
product.selling_price = to_dec_string(product.selling_price)
return render_template('products/list.html',
title='Products',
products=products)
@products.route('/add', methods=['GET', 'POST'])
def product_add():
form = AddProduct()
vat = [(v.id, to_dec(v.amount)) for v in VAT.query.order_by(VAT.name)]
suppliers = [(s.id, s.name) for s in Supplier.query.order_by(Supplier.name)]
categories = [(c.id, c.name) for c in Category.query.order_by(Category.code)]
units = [(k, v) for k, v in read_setting("units").iteritems()]
conditioning_units = [(k, v) for k, v in read_setting("conditioning").iteritems()]
form.vat.choices = vat
form.suppliers.choices = suppliers
form.categories.choices = categories
form.unit.choices = units
form.conditioning_unit.choices = conditioning_units
if form.validate_on_submit():
# Storing the buying price as an integer
buying_price_int = to_int(form.buying_price.data)
stock_int = to_int(form.stock.data)
selling_price_no_tax_int = to_int(form.selling_price_no_tax.data)
selling_price = int(add_vat(to_int(form.selling_price_no_tax.data), form.vat.data))
product = Product(name=form.name.data, reference=form.reference.data,
unit=form.unit.data, packing=to_int(form.packing.data),
conditioning=to_int(form.conditioning.data),
conditioning_unit=form.conditioning_unit.data,
supplier_reference=form.supplier_reference.data,
ean=form.ean.data, description=form.description.data,
buying_price=buying_price_int,
selling_price_no_tax=selling_price_no_tax_int,
selling_price=selling_price,
stock=stock_int,
vat_id=form.vat.data)
db.session.add(product)
db.session.commit()
# Add stock to History
stock_history = StockHistory(amount=stock_int, product=product)
db.session.add(stock_history)
db.session.commit()
for s in form.suppliers.data:
supp = Supplier.query.filter_by(id=s).first()
supp.products.append(product)
db.session.add(supp)
db.session.commit()
c = form.categories.data
cat = Category.query.filter_by(id=c).first()
cat.products.append(product)
db.session.add(cat)
db.session.commit()
flash('Product %s added!' % form.name.data, 'success')
return redirect(url_for('products.products_list'))
return render_template('products/add.html',
title='Add a Product',
action='add',
form=form)
@products.route('/edit/<int:id>', methods=['GET', 'POST'])
def product_edit(id):
# Initialize form
product = Product.query.get_or_404(id)
form = AddProduct()
# Create lists
vat_query = VAT.query.order_by(VAT.name)
for v in vat_query:
v.amount = to_dec(v.amount)
form.vat.choices = [(v.id, v.amount) for v in vat_query]
form.suppliers.choices = [(s.id, s.name) for s in Supplier.query.order_by(Supplier.name)]
form.categories.choices = [(c.id, c.name) for c in Category.query.order_by(Category.code)]
if form.validate_on_submit():
product.name = form.name.data
product.reference = form.reference.data
product.unit = form.unit.data
product.packing = to_int(form.packing.data)
product.conditioning = to_int(form.conditioning.data)
product.conditioning_unit = form.conditioning_unit.data
product.supplier_reference = form.supplier_reference.data
product.ean = form.ean.data
product.description = form.description.data
product.buying_price = to_int(form.buying_price.data)
product.selling_price_no_tax = to_int(form.selling_price_no_tax.data)
product.selling_price = int(add_vat(to_int(form.selling_price_no_tax.data), form.vat.data))
product.vat_id=form.vat.data
product.stock = to_int(form.stock.data)
product.supplier = []
#product.category = []
db.session.add(product)
db.session.commit()
stock_history = StockHistory(amount=to_int(form.stock.data), product=product)
db.session.add(stock_history)
db.session.commit()
for s in form.suppliers.data:
supp = Supplier.query.filter_by(id=s).first()
supp.products.append(product)
db.session.add(supp)
db.session.commit()
c = form.categories.data
cat = Category.query.filter_by(id=c).first()
cat.products.append(product)
db.session.add(cat)
db.session.commit()
flash('Product %s (Reference %s) modified!' % (product.name, product.reference), 'success')
return redirect(url_for('products.products_list'))
#Populate the fields
form.suppliers.data = [s.id for s in product.supplier]
form.categories.data = [0, product.category.id]
form.name.data = product.name
form.reference.data = product.reference
form.unit.data = product.unit
form.packing.data = to_dec(product.packing)
form.conditioning.data = to_dec(product.conditioning)
form.conditioning_unit.data = product.conditioning_unit
form.supplier_reference.data = product.supplier_reference
form.ean.data = product.ean
form.description.data = product.description
form.buying_price.data = to_dec(product.buying_price)
form.selling_price_no_tax.data = to_dec(product.selling_price_no_tax)
form.vat.data = product.vat_id
form.stock.data = to_dec(product.stock)
return render_template('products/add.html',
title='Edit product',
action='edit',
form=form)
@products.route('/delete/<int:id>', methods=['GET', 'POST'])
def product_delete(id):
product = Product.query.get_or_404(id)
db.session.delete(product)
db.session.commit()
flash('Product ' + product.name + ' deleted!', 'success')
return redirect(url_for('products.products_list'))
@products.route('/view/<int:id>', methods=['GET', 'POST'])
def product_view(id):
product = Product.query.get_or_404(id)
product.packing = to_dec(product.packing)
product.conditioning = to_dec(product.conditioning)
product.buying_price = to_dec(product.buying_price)
product.stock = to_dec(product.stock)
product.vat.amount = to_dec_string(product.vat.amount)
product.selling_price_no_tax = to_dec_string(product.selling_price_no_tax)
product.selling_price = to_dec_string(product.selling_price)
margin = calc_margin(product.buying_price, product.selling_price_no_tax)
return render_template('products/view.html', title='Product',
product=product, margin=margin)
@products.route('/categories', methods=['GET', 'POST'])
def categories():
form = AddCategory()
categories = Category.query.order_by(Category.code)
if form.validate_on_submit():
category = Category(name=form.name.data, code=form.code.data)
db.session.add(category)
db.session.commit()
flash('Category %s added!' % form.name.data, 'success')
return redirect(url_for('products.categories'))
return render_template('products/categories.html',
title='Categories',
categories=categories,
form=form)
@products.route('/categories/delete/<int:id>', methods=['GET', 'POST'])
def category_delete(id):
category = Category.query.get_or_404(id)
db.session.delete(category)
db.session.commit()
flash('Category %s deleted!' % category.name, 'success')
return redirect(url_for('products.categories'))
|
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import gapic_v1
from google.api_core import grpc_helpers_async
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from grpc.experimental import aio # type: ignore
from google.cloud.logging_v2.types import logging
from google.protobuf import empty_pb2 # type: ignore
from .base import LoggingServiceV2Transport, DEFAULT_CLIENT_INFO
from .grpc import LoggingServiceV2GrpcTransport
class LoggingServiceV2GrpcAsyncIOTransport(LoggingServiceV2Transport):
"""gRPC AsyncIO backend transport for LoggingServiceV2.
Service for ingesting and querying logs.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_grpc_channel: aio.Channel
_stubs: Dict[str, Callable] = {}
@classmethod
def create_channel(cls,
host: str = 'logging.googleapis.com',
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs) -> aio.Channel:
"""Create and return a gRPC AsyncIO channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
aio.Channel: A gRPC AsyncIO channel object.
"""
return grpc_helpers_async.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs
)
def __init__(self, *,
host: str = 'logging.googleapis.com',
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
channel: aio.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id=None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
channel (Optional[aio.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for the grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure a mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
# use the credentials which are saved
credentials=self._credentials,
# Set ``credentials_file`` to ``None`` here as
# the credentials that we saved earlier should be used.
credentials_file=None,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@property
def grpc_channel(self) -> aio.Channel:
"""Create the channel designed to connect to this service.
This property caches on the instance; repeated calls return
the same channel.
"""
# Return the channel from cache.
return self._grpc_channel
@property
def delete_log(self) -> Callable[
[logging.DeleteLogRequest],
Awaitable[empty_pb2.Empty]]:
r"""Return a callable for the delete log method over gRPC.
Deletes all the log entries in a log. The log
reappears if it receives new entries. Log entries
written shortly before the delete operation might not be
deleted. Entries received after the delete operation
with a timestamp before the operation will be deleted.
Returns:
Callable[[~.DeleteLogRequest],
Awaitable[~.Empty]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'delete_log' not in self._stubs:
self._stubs['delete_log'] = self.grpc_channel.unary_unary(
'/google.logging.v2.LoggingServiceV2/DeleteLog',
request_serializer=logging.DeleteLogRequest.serialize,
response_deserializer=empty_pb2.Empty.FromString,
)
return self._stubs['delete_log']
@property
def write_log_entries(self) -> Callable[
[logging.WriteLogEntriesRequest],
Awaitable[logging.WriteLogEntriesResponse]]:
r"""Return a callable for the write log entries method over gRPC.
Writes log entries to Logging. This API method is the
only way to send log entries to Logging. This method is
used, directly or indirectly, by the Logging agent
(fluentd) and all logging libraries configured to use
Logging. A single request may contain log entries for a
maximum of 1000 different resources (projects,
organizations, billing accounts or folders)
Returns:
Callable[[~.WriteLogEntriesRequest],
Awaitable[~.WriteLogEntriesResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'write_log_entries' not in self._stubs:
self._stubs['write_log_entries'] = self.grpc_channel.unary_unary(
'/google.logging.v2.LoggingServiceV2/WriteLogEntries',
request_serializer=logging.WriteLogEntriesRequest.serialize,
response_deserializer=logging.WriteLogEntriesResponse.deserialize,
)
return self._stubs['write_log_entries']
@property
def list_log_entries(self) -> Callable[
[logging.ListLogEntriesRequest],
Awaitable[logging.ListLogEntriesResponse]]:
r"""Return a callable for the list log entries method over gRPC.
Lists log entries. Use this method to retrieve log entries that
originated from a project/folder/organization/billing account.
For ways to export log entries, see `Exporting
Logs <https://cloud.google.com/logging/docs/export>`__.
Returns:
Callable[[~.ListLogEntriesRequest],
Awaitable[~.ListLogEntriesResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'list_log_entries' not in self._stubs:
self._stubs['list_log_entries'] = self.grpc_channel.unary_unary(
'/google.logging.v2.LoggingServiceV2/ListLogEntries',
request_serializer=logging.ListLogEntriesRequest.serialize,
response_deserializer=logging.ListLogEntriesResponse.deserialize,
)
return self._stubs['list_log_entries']
@property
def list_monitored_resource_descriptors(self) -> Callable[
[logging.ListMonitoredResourceDescriptorsRequest],
Awaitable[logging.ListMonitoredResourceDescriptorsResponse]]:
r"""Return a callable for the list monitored resource
descriptors method over gRPC.
Lists the descriptors for monitored resource types
used by Logging.
Returns:
Callable[[~.ListMonitoredResourceDescriptorsRequest],
Awaitable[~.ListMonitoredResourceDescriptorsResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'list_monitored_resource_descriptors' not in self._stubs:
self._stubs['list_monitored_resource_descriptors'] = self.grpc_channel.unary_unary(
'/google.logging.v2.LoggingServiceV2/ListMonitoredResourceDescriptors',
request_serializer=logging.ListMonitoredResourceDescriptorsRequest.serialize,
response_deserializer=logging.ListMonitoredResourceDescriptorsResponse.deserialize,
)
return self._stubs['list_monitored_resource_descriptors']
@property
def list_logs(self) -> Callable[
[logging.ListLogsRequest],
Awaitable[logging.ListLogsResponse]]:
r"""Return a callable for the list logs method over gRPC.
Lists the logs in projects, organizations, folders,
or billing accounts. Only logs that have entries are
listed.
Returns:
Callable[[~.ListLogsRequest],
Awaitable[~.ListLogsResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'list_logs' not in self._stubs:
self._stubs['list_logs'] = self.grpc_channel.unary_unary(
'/google.logging.v2.LoggingServiceV2/ListLogs',
request_serializer=logging.ListLogsRequest.serialize,
response_deserializer=logging.ListLogsResponse.deserialize,
)
return self._stubs['list_logs']
@property
def tail_log_entries(self) -> Callable[
[logging.TailLogEntriesRequest],
Awaitable[logging.TailLogEntriesResponse]]:
r"""Return a callable for the tail log entries method over gRPC.
Streaming read of log entries as they are ingested.
Until the stream is terminated, it will continue reading
logs.
Returns:
Callable[[~.TailLogEntriesRequest],
Awaitable[~.TailLogEntriesResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'tail_log_entries' not in self._stubs:
self._stubs['tail_log_entries'] = self.grpc_channel.stream_stream(
'/google.logging.v2.LoggingServiceV2/TailLogEntries',
request_serializer=logging.TailLogEntriesRequest.serialize,
response_deserializer=logging.TailLogEntriesResponse.deserialize,
)
return self._stubs['tail_log_entries']
def close(self):
return self.grpc_channel.close()
__all__ = (
'LoggingServiceV2GrpcAsyncIOTransport',
)
|
|
# Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
from textwrap import dedent
import pytest
from pants.backend.codegen.protobuf.python import additional_fields
from pants.backend.codegen.protobuf.python.python_protobuf_subsystem import PythonProtobufMypyPlugin
from pants.backend.codegen.protobuf.python.python_protobuf_subsystem import (
rules as protobuf_subsystem_rules,
)
from pants.backend.codegen.protobuf.python.rules import GeneratePythonFromProtobufRequest
from pants.backend.codegen.protobuf.python.rules import rules as protobuf_rules
from pants.backend.codegen.protobuf.target_types import (
ProtobufSourceField,
ProtobufSourcesGeneratorTarget,
)
from pants.backend.codegen.protobuf.target_types import rules as target_types_rules
from pants.backend.python.dependency_inference import module_mapper
from pants.core.util_rules import stripped_source_files
from pants.engine.addresses import Address
from pants.engine.target import GeneratedSources, HydratedSources, HydrateSourcesRequest
from pants.source.source_root import NoSourceRootError
from pants.testutil.python_interpreter_selection import all_major_minor_python_versions
from pants.testutil.rule_runner import QueryRule, RuleRunner, engine_error
GRPC_PROTO_STANZA = """
syntax = "proto3";
package dir1;
// The greeter service definition.
service Greeter {
// Sends a greeting
rpc SayHello (HelloRequest) returns (HelloReply) {}
}
// The request message containing the user's name.
message HelloRequest {
string name = 1;
}
// The response message containing the greetings
message HelloReply {
string message = 1;
}
"""
@pytest.fixture
def rule_runner() -> RuleRunner:
return RuleRunner(
rules=[
*protobuf_rules(),
*protobuf_subsystem_rules(),
*additional_fields.rules(),
*stripped_source_files.rules(),
*target_types_rules(),
*module_mapper.rules(),
QueryRule(HydratedSources, [HydrateSourcesRequest]),
QueryRule(GeneratedSources, [GeneratePythonFromProtobufRequest]),
],
target_types=[ProtobufSourcesGeneratorTarget],
)
def assert_files_generated(
rule_runner: RuleRunner,
address: Address,
*,
expected_files: list[str],
source_roots: list[str],
mypy: bool = False,
extra_args: list[str] | None = None,
) -> None:
args = [
f"--source-root-patterns={repr(source_roots)}",
"--no-python-protobuf-infer-runtime-dependency",
*(extra_args or ()),
]
if mypy:
args.append("--python-protobuf-mypy-plugin")
rule_runner.set_options(args, env_inherit={"PATH", "PYENV_ROOT", "HOME"})
tgt = rule_runner.get_target(address)
protocol_sources = rule_runner.request(
HydratedSources, [HydrateSourcesRequest(tgt[ProtobufSourceField])]
)
generated_sources = rule_runner.request(
GeneratedSources,
[GeneratePythonFromProtobufRequest(protocol_sources.snapshot, tgt)],
)
assert set(generated_sources.snapshot.files) == set(expected_files)
def test_generates_python(rule_runner: RuleRunner) -> None:
# This tests a few things:
# * We generate the correct file names.
# * Protobuf files can import other protobuf files, and those can import others
# (transitive dependencies). We'll only generate the requested target, though.
# * We can handle multiple source roots, which need to be preserved in the final output.
rule_runner.write_files(
{
"src/protobuf/dir1/f.proto": dedent(
"""\
syntax = "proto3";
package dir1;
message Person {
string name = 1;
int32 id = 2;
string email = 3;
}
"""
),
"src/protobuf/dir1/f2.proto": dedent(
"""\
syntax = "proto3";
package dir1;
"""
),
"src/protobuf/dir1/BUILD": "protobuf_sources()",
"src/protobuf/dir2/f.proto": dedent(
"""\
syntax = "proto3";
package dir2;
import "dir1/f.proto";
"""
),
"src/protobuf/dir2/BUILD": (
"protobuf_sources(dependencies=['src/protobuf/dir1'], "
"python_source_root='src/python')"
),
# Test another source root.
"tests/protobuf/test_protos/f.proto": dedent(
"""\
syntax = "proto3";
package test_protos;
import "dir2/f.proto";
"""
),
"tests/protobuf/test_protos/BUILD": (
"protobuf_sources(dependencies=['src/protobuf/dir2'])"
),
}
)
def assert_gen(addr: Address, expected: str) -> None:
assert_files_generated(
rule_runner,
addr,
source_roots=["src/python", "/src/protobuf", "/tests/protobuf"],
expected_files=[expected],
)
assert_gen(
Address("src/protobuf/dir1", relative_file_path="f.proto"), "src/protobuf/dir1/f_pb2.py"
)
assert_gen(
Address("src/protobuf/dir1", relative_file_path="f2.proto"), "src/protobuf/dir1/f2_pb2.py"
)
assert_gen(
Address("src/protobuf/dir2", relative_file_path="f.proto"), "src/python/dir2/f_pb2.py"
)
assert_gen(
Address("tests/protobuf/test_protos", relative_file_path="f.proto"),
"tests/protobuf/test_protos/f_pb2.py",
)
def test_top_level_proto_root(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
"protos/f.proto": dedent(
"""\
syntax = "proto3";
package protos;
"""
),
"protos/BUILD": "protobuf_sources()",
}
)
assert_files_generated(
rule_runner,
Address("protos", relative_file_path="f.proto"),
source_roots=["/"],
expected_files=["protos/f_pb2.py"],
)
def test_top_level_python_source_root(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
"src/proto/protos/f.proto": dedent(
"""\
syntax = "proto3";
package protos;
"""
),
"src/proto/protos/BUILD": "protobuf_sources(python_source_root='.')",
}
)
assert_files_generated(
rule_runner,
Address("src/proto/protos", relative_file_path="f.proto"),
source_roots=["/", "src/proto"],
expected_files=["protos/f_pb2.py"],
)
def test_bad_python_source_root(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
"src/protobuf/dir1/f.proto": dedent(
"""\
syntax = "proto3";
package dir1;
"""
),
"src/protobuf/dir1/BUILD": "protobuf_sources(python_source_root='notasourceroot')",
}
)
with engine_error(NoSourceRootError):
assert_files_generated(
rule_runner,
Address("src/protobuf/dir1", relative_file_path="f.proto"),
source_roots=["src/protobuf"],
expected_files=[],
)
@pytest.mark.platform_specific_behavior
@pytest.mark.parametrize(
"major_minor_interpreter",
all_major_minor_python_versions(PythonProtobufMypyPlugin.default_interpreter_constraints),
)
def test_mypy_plugin(rule_runner: RuleRunner, major_minor_interpreter: str) -> None:
rule_runner.write_files(
{
"src/protobuf/dir1/f.proto": dedent(
"""\
syntax = "proto3";
package dir1;
message Person {
string name = 1;
int32 id = 2;
string email = 3;
}
"""
),
"src/protobuf/dir1/BUILD": "protobuf_sources()",
}
)
assert_files_generated(
rule_runner,
Address("src/protobuf/dir1", relative_file_path="f.proto"),
source_roots=["src/protobuf"],
extra_args=[
"--python-protobuf-mypy-plugin",
f"--mypy-protobuf-interpreter-constraints=['=={major_minor_interpreter}.*']",
],
expected_files=["src/protobuf/dir1/f_pb2.py", "src/protobuf/dir1/f_pb2.pyi"],
)
def test_grpc(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
"src/protobuf/dir1/f.proto": dedent(GRPC_PROTO_STANZA),
"src/protobuf/dir1/BUILD": "protobuf_sources(grpc=True)",
}
)
assert_files_generated(
rule_runner,
Address("src/protobuf/dir1", relative_file_path="f.proto"),
source_roots=["src/protobuf"],
expected_files=["src/protobuf/dir1/f_pb2.py", "src/protobuf/dir1/f_pb2_grpc.py"],
)
def test_grpc_mypy_plugin(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
"src/protobuf/dir1/f.proto": dedent(GRPC_PROTO_STANZA),
"src/protobuf/dir1/BUILD": "protobuf_sources(grpc=True)",
}
)
assert_files_generated(
rule_runner,
Address("src/protobuf/dir1", relative_file_path="f.proto"),
source_roots=["src/protobuf"],
mypy=True,
expected_files=[
"src/protobuf/dir1/f_pb2.py",
"src/protobuf/dir1/f_pb2.pyi",
"src/protobuf/dir1/f_pb2_grpc.py",
"src/protobuf/dir1/f_pb2_grpc.pyi",
],
)
def test_grpc_pre_v2_mypy_plugin(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
"src/protobuf/dir1/f.proto": dedent(GRPC_PROTO_STANZA),
"src/protobuf/dir1/BUILD": "protobuf_sources(grpc=True)",
}
)
assert_files_generated(
rule_runner,
Address("src/protobuf/dir1", relative_file_path="f.proto"),
source_roots=["src/protobuf"],
extra_args=[
"--python-protobuf-mypy-plugin",
"--mypy-protobuf-version=mypy-protobuf==1.24",
"--mypy-protobuf-extra-requirements=six==1.16.0",
"--mypy-protobuf-lockfile=<none>",
],
expected_files=[
"src/protobuf/dir1/f_pb2.py",
"src/protobuf/dir1/f_pb2.pyi",
"src/protobuf/dir1/f_pb2_grpc.py",
],
)
|
|
"""Validation schemas for API requests."""
import werkzeug.datastructures
from flask_restplus import inputs
import api.reqparse as reqparse
MAX_PASSWORD_LENGTH = 1024
def object_type(value):
"""To make the openAPI object type show up in the docs."""
return value
object_type.__schema__ = {"type": "object"} # noqa
def length_restricted(min_length, max_length, base_type):
"""Type to restrict string length."""
def validate(s):
if len(s) < min_length:
raise ValueError("Must be at least %i characters long" % min_length)
if len(s) > max_length:
raise ValueError("Must be at most %i characters long" % max_length)
return s
return validate
# Achievement request schema
achievement_req = reqparse.RequestParser()
achievement_req.add_argument(
"name",
required=True,
type=str,
location="json",
help="Name of the achievement.",
error="Achievement name is required",
)
achievement_req.add_argument(
"score",
required=True,
type=inputs.natural,
location="json",
help="Point value of the achievement (positive integer).",
error="The achievement's score must be a positive integer",
)
achievement_req.add_argument(
"description",
required=True,
type=str,
location="json",
help="Description of the achievement.",
error="Achievement description is required",
)
achievement_req.add_argument(
"processor",
required=True,
type=str,
location="json",
help="Path to the achievement processor.",
error="Achievement processor path is required",
)
achievement_req.add_argument(
"hidden",
required=True,
type=inputs.boolean,
location="json",
help="Hide this achievement?",
error="Specify whether this achievement should be hidden (true/false)",
)
achievement_req.add_argument(
"image",
required=True,
type=str,
location="json",
help="Path to achievement image.",
error="Achievement image path is required",
)
achievement_req.add_argument(
"smallimage",
required=True,
type=str,
location="json",
help="Path to achievement thumbnail.",
error="Achievement thumbnail path is required",
)
achievement_req.add_argument(
"disabled",
required=True,
type=inputs.boolean,
location="json",
help="Disable this achievement?",
error="Specify whether this achievement should be disabled (true/false)",
)
achievement_req.add_argument(
"multiple",
required=True,
type=inputs.boolean,
location="json",
help="Allow earning multiple instances of this achievement?",
error="Specify whether this achievement can be earned multiple times "
+ "(true/false)",
)
achievement_patch_req = achievement_req.copy()
for arg in achievement_patch_req.args:
arg.required = False
# Shell server output schema
# (This is too complex for reqparse to really handle, so we'll trust it.
# If we move to another validation engine e.g. marshmallow, we can revisit.)
shell_server_out = reqparse.RequestParser()
shell_server_out.add_argument(
"sid",
required=True,
type=str,
location="args",
help="Shell server ID.",
error="Shell server ID is required",
)
shell_server_out.add_argument(
"problems",
required=False,
type=list,
location="json",
error="Problems array is invalid",
)
shell_server_out.add_argument(
"bundles",
required=False,
type=list,
location="json",
error="Bundles array is invalid",
)
# Problem PATCH request schema
# ("disabled" is the only mutable field as the others are managed by the
# shell manager.)
problem_patch_req = reqparse.RequestParser()
problem_patch_req.add_argument(
"disabled",
required=True,
type=inputs.boolean,
location="json",
help="Whether the problem is disabled.",
error="Specify whether the problem is disabled",
)
# Shell server list request schema
shell_server_list_req = reqparse.RequestParser()
shell_server_list_req.add_argument(
"assigned_only",
required=False,
type=inputs.boolean,
default=True,
location="args",
help="Whether to include only shell servers assigned to the"
+ " current user. Must be admin to disable.",
error="Specify a boolean value for assigned_only",
)
# Shell server request schema
shell_server_req = reqparse.RequestParser()
shell_server_req.add_argument(
"name",
required=True,
type=str,
location="json",
help="Shell server display name.",
error="Shell server display name is required",
)
shell_server_req.add_argument(
"host",
required=True,
type=str,
location="json",
help="Shell server hostname.",
error="Shell server hostname is required",
)
shell_server_req.add_argument(
"port",
required=True,
type=inputs.int_range(1, 65535),
location="json",
help="Shell server port.",
error="Shell server port is required (1-65535)",
)
shell_server_req.add_argument(
"username",
required=True,
type=str,
location="json",
help="Username.",
error="Shell server username is required",
)
shell_server_req.add_argument(
"password",
required=True,
type=str,
location="json",
help="Password.",
error="Shell server password is required",
)
shell_server_req.add_argument(
"protocol",
required=True,
type=str,
choices=["HTTP", "HTTPS"],
location="json",
help="Protocol used to serve web resources.",
error="Shell server protocol is required (HTTP/HTTPS)",
)
shell_server_req.add_argument(
"server_number",
required=False,
type=inputs.positive,
location="json",
help="Server number (will be automatically assigned if not provided).",
error="Shell server number must be a positive integer",
)
shell_server_patch_req = shell_server_req.copy()
for arg in shell_server_patch_req.args:
arg.required = False
# Shell server reassignment schema
shell_server_reassignment_req = reqparse.RequestParser()
shell_server_reassignment_req.add_argument(
"include_assigned",
required=False,
type=inputs.boolean,
location="json",
help="Whether to update the assignments of teams that already have "
+ "an assigned shell server.",
error="Specify a boolean value for include_assigned",
)
# Exception request schema
exception_req = reqparse.RequestParser()
exception_req.add_argument(
"result_limit",
required=False,
type=inputs.positive,
default=50,
location="args",
help="Maximum number of exceptions to return",
error="result_limit must be a positive integer",
)
# Settings update schema
# @TODO: this is very basic - config.py:change_settings() does the brunt of
# the validation work for now because of RequestParser's limitations
# regarding nested fields. Revisit this when upgrading to a
# better validation library.
settings_patch_req = reqparse.RequestParser()
settings_patch_req.add_argument(
"enable_feedback",
required=False,
type=inputs.boolean,
location="json",
error="enable_feedback must be a boolean",
)
settings_patch_req.add_argument(
"start_time",
required=False,
type=inputs.datetime_from_rfc822,
location="json",
error="start_time must be an RFC 822 timestamp",
)
settings_patch_req.add_argument(
"end_time",
required=False,
type=inputs.datetime_from_rfc822,
location="json",
error="end_time must be an RFC 822 timestamp",
)
settings_patch_req.add_argument(
"competition_name",
required=False,
type=str,
location="json",
error="competition_name must be a string",
)
settings_patch_req.add_argument(
"competition_url",
required=False,
type=str,
location="json",
error="competition_url must be a string",
)
settings_patch_req.add_argument(
"email_filter", required=False, type=list, location="json"
)
settings_patch_req.add_argument(
"max_team_size",
required=False,
type=inputs.natural,
location="json",
error="max_team_size must be a positive integer",
)
settings_patch_req.add_argument(
"achievements", required=False, type=object_type, location="json"
)
settings_patch_req.add_argument(
"username_blacklist", required=False, type=list, location="json"
)
settings_patch_req.add_argument(
"email", required=False, type=object_type, location="json"
)
settings_patch_req.add_argument(
"captcha", required=False, type=object_type, location="json"
)
settings_patch_req.add_argument(
"logging", required=False, type=object_type, location="json"
)
settings_patch_req.add_argument(
"shell_servers", required=False, type=object_type, location="json"
)
settings_patch_req.add_argument(
"max_batch_registrations",
required=False,
type=inputs.natural,
location="json",
error="max_batch_registrations must be " + "a nonnegative integer",
)
settings_patch_req.add_argument(
"enable_rate_limiting", required=False, type=inputs.boolean, location="json"
)
settings_patch_req.add_argument(
"group_limit",
required=False,
type=inputs.natural,
location="json",
error="group_limit must be a nonnegative integer",
)
# Bundle PATCH request schema
# ("dependencies_enabled" is the only mutable field as the others are managed
# by the shell manager.)
bundle_patch_req = reqparse.RequestParser()
bundle_patch_req.add_argument(
"dependencies_enabled",
required=True,
type=inputs.boolean,
location="json",
help="Whether to consider this bundle's dependencies when determining "
+ "unlocked problems.",
error="Specify a boolean value for dependencies_enabled",
)
# Optional parameters for problems request
problems_req = reqparse.RequestParser()
problems_req.add_argument(
"unlocked_only",
required=False,
location="args",
default=True,
help="Whether to display only problems unlocked for your team or "
+ "all matching problems. Must be teacher/admin to disable, unless "
+ "count_only=True. "
+ "If disabled as a teacher account, will only return name, "
+ "category, and score for each problem.",
type=inputs.boolean,
error="Specify a boolean value for unlocked_only",
)
problems_req.add_argument(
"solved_only",
required=False,
location="args",
default=False,
help="Restrict results to problems solved by your team.",
type=inputs.boolean,
error="Specify a boolean value for solved_only",
)
problems_req.add_argument(
"count_only",
required=False,
location="args",
default=False,
help="Whether to return only the count of matching problems.",
type=inputs.boolean,
error="Specify a boolean value for count_only",
)
problems_req.add_argument(
"category",
required=False,
location="args",
default=None,
help="Restrict results to a specific category.",
type=str,
error="Category to filter on must be a string",
)
problems_req.add_argument(
"include_disabled",
required=False,
location="args",
default=False,
help="Whether to include disabled problems.",
type=inputs.boolean,
error="Specify a boolean value for include_disabled",
)
# Submission request
submission_req = reqparse.RequestParser()
submission_req.add_argument(
"pid",
required=True,
type=str,
location="json",
help="ID of the attempted problem",
error="Problem ID is required",
)
submission_req.add_argument(
"key",
required=True,
type=str,
location="json",
help="Flag for the problem",
error="Flag is required",
)
submission_req.add_argument(
"method",
required=True,
type=str,
location="json",
help='Submission method, e.g. "game"',
error="Submission method is required",
)
# Feedback list request
feedback_list_req = reqparse.RequestParser()
feedback_list_req.add_argument(
"pid",
required=False,
type=str,
location="args",
help="Filter feedback by this problem ID only",
error="pid field must be a string",
)
feedback_list_req.add_argument(
"uid",
required=False,
type=str,
location="args",
help="Filter feedback by this user ID only",
error="uid field must be a string",
)
feedback_list_req.add_argument(
"tid",
required=False,
type=str,
location="args",
help="Filter feedback by this team ID only",
error="tid field must be a string",
)
# Feedback submission request
feedback_submission_req = reqparse.RequestParser()
feedback_submission_req.add_argument(
"pid",
required=True,
type=str,
help="Reviewed problem ID",
location="json",
error="Problem ID is required",
)
# @TODO validate this at request time - for now see problem_feedback.py
feedback_submission_req.add_argument(
"feedback",
required=True,
type=object_type,
help="Problem feedback",
location="json",
error="Feedback object required",
)
# New user request
user_req = reqparse.RequestParser()
user_req.add_argument(
"email",
required=True,
type=inputs.regex(r".+@.+\..{2,}"),
location="json",
help="Email address",
error="Email address is not valid",
)
user_req.add_argument(
"firstname",
required=False,
type=length_restricted(1, 50, str),
location="json",
help="Given name",
default="",
error="First name is not valid (50 characters max)",
)
user_req.add_argument(
"lastname",
required=False,
type=length_restricted(1, 50, str),
location="json",
help="Family name",
default="",
error="Last name is not valid (50 characters max)",
)
user_req.add_argument(
"username",
required=True,
type=length_restricted(3, 20, str),
location="json",
help="Username",
error="Username is not valid (must be 3-20 characters)",
)
user_req.add_argument(
"password",
required=True,
type=length_restricted(3, MAX_PASSWORD_LENGTH, str),
location="json",
help="Password",
error="Password is not valid (must be at least 3 characters)",
)
user_req.add_argument(
"affiliation",
required=True,
type=length_restricted(3, 50, str),
location="json",
help="e.g. school or organization",
error="School or organization name is not valid (must be 3-50 characters)",
)
user_req.add_argument(
"usertype",
required=True,
type=str,
choices=["student", "college", "teacher", "other"],
location="json",
help="User type",
error="Invalid user type",
)
user_req.add_argument(
"country",
required=True,
type=length_restricted(2, 2, str),
location="json",
help="2-letter country code",
error="Country is invalid (must be 2-letter country code)",
)
# @TODO validate nested fields
user_req.add_argument(
"demo",
required=True,
type=object_type,
location="json",
help="Demographic information (parentemail, age)",
error="Demographics fields are required",
)
user_req.add_argument(
"gid",
required=False,
type=str,
location="json",
help="Group ID (optional, to automatically enroll in group)",
error="gid field must be a string",
)
user_req.add_argument(
"rid",
required=False,
type=str,
location="json",
help="Registration ID (optional, to automatically enroll in group)",
error="rid field must be a string",
)
user_req.add_argument(
"g-recaptcha-response",
required=False,
location="json",
help="reCAPTCHA response, required if reCAPTCHA enabled in settings",
)
# Login request
login_req = reqparse.RequestParser()
login_req.add_argument(
"username",
required=True,
type=str,
help="Username",
location="json",
error="Username is required",
)
login_req.add_argument(
"password",
required=True,
type=str,
help="Password",
location="json",
error="Password is required",
)
# User extdata update request
user_extdata_req = reqparse.RequestParser()
user_extdata_req.add_argument(
"extdata",
required=True,
type=object_type,
location="json",
help="Arbitrary object to set as extdata",
error="extdata must be a valid JSON object",
)
# Disable account request
disable_account_req = reqparse.RequestParser()
disable_account_req.add_argument(
"password",
required=True,
type=str,
location="json",
help="Current password required for confirmation",
error="Password is required",
)
# Update password request
update_password_req = reqparse.RequestParser()
update_password_req.add_argument(
"current_password",
required=True,
type=str,
location="json",
help="Current password",
error="Current password is required",
)
update_password_req.add_argument(
"new_password",
required=True,
type=length_restricted(3, MAX_PASSWORD_LENGTH, str),
location="json",
help="New password",
error="New password is required (3 character minimum)",
)
update_password_req.add_argument(
"new_password_confirmation",
required=True,
type=length_restricted(3, MAX_PASSWORD_LENGTH, str),
location="json",
help="Must match new_password",
error="New password entries must match",
)
# Reset password confirmation request
reset_password_confirmation_req = reqparse.RequestParser()
reset_password_confirmation_req.add_argument(
"reset_token",
required=True,
type=str,
location="json",
help="Password reset token",
error="Password reset token is required",
)
reset_password_confirmation_req.add_argument(
"new_password",
required=True,
type=length_restricted(3, MAX_PASSWORD_LENGTH, str),
location="json",
help="New password",
error="New password is required (3 character minimum)",
)
reset_password_confirmation_req.add_argument(
"new_password_confirmation",
required=True,
type=length_restricted(3, MAX_PASSWORD_LENGTH, str),
location="json",
help="Must match new_password",
error="New password entries must match",
)
# Reset password request
reset_password_req = reqparse.RequestParser()
reset_password_req.add_argument(
"username",
required=True,
type=str,
location="json",
help="Send a password reset email to this user.",
error="Username is required",
)
# Email verification request
email_verification_req = reqparse.RequestParser()
email_verification_req.add_argument(
"token",
required=True,
type=str,
location="args",
help="Password reset token",
error="Password reset token is required",
)
email_verification_req.add_argument(
"uid",
required=True,
type=str,
location="args",
help="User ID",
error="User ID is required",
)
# Team password update request
update_team_password_req = reqparse.RequestParser()
update_team_password_req.add_argument(
"new_password",
required=True,
type=length_restricted(3, MAX_PASSWORD_LENGTH, str),
location="json",
help="New password",
error="New password is required (3 character minimum)",
)
update_team_password_req.add_argument(
"new_password_confirmation",
required=True,
type=length_restricted(3, MAX_PASSWORD_LENGTH, str),
location="json",
help="Must match new_password",
error="New password fields must match",
)
# Score progression request
score_progression_req = reqparse.RequestParser()
score_progression_req.add_argument(
"category",
required=False,
type=str,
location="args",
help="Restrict score progression to this problem category",
error="Category field must be a string",
)
# Team change request
team_change_req = reqparse.RequestParser()
team_change_req.add_argument(
"team_name",
required=True,
type=str,
location="json",
help="Name of the team to join.",
error="Must specify the name of the team to join",
)
team_change_req.add_argument(
"team_password",
required=True,
type=str,
location="json",
help="Password of the team to join.",
error="Team password is required",
)
# Team request
team_req = reqparse.RequestParser()
team_req.add_argument(
"team_name",
required=True,
type=length_restricted(3, 100, str),
location="json",
help="Name of the new team",
error="A name for the new team is required",
)
team_req.add_argument(
"team_password",
required=True,
type=length_restricted(3, MAX_PASSWORD_LENGTH, str),
location="json",
help="Password for the new team",
error="A password for the new team is required (3 character minimum)",
)
# Team patch request
team_patch_req = reqparse.RequestParser()
team_patch_req.add_argument(
"allow_ineligible_members",
required=False,
type=inputs.boolean,
location="json",
store_missing=False,
help="Whether to allow ineligible users to join the team",
)
# Scoreboard page request
# @TODO marshmallow: default page to 1 rather than None if search is specified
# remove 'or 1' in get_filtered_scoreboard_page calls
scoreboard_page_req = reqparse.RequestParser()
scoreboard_page_req.add_argument(
"page",
required=False,
default=None,
type=inputs.positive,
location="args",
help="Scoreboard page to return",
error="page must be a positive integer",
)
scoreboard_page_req.add_argument(
"search",
required=False,
default=None,
type=str,
location="args",
help="Search filter pattern",
error="Search pattern must be a string",
)
# Score progressions request
score_progressions_req = reqparse.RequestParser()
score_progressions_req.add_argument(
"limit",
required=False,
type=inputs.positive,
location="args",
help="The number of top teams' score progressions to return. "
+ "Must be an admin to use this argument.",
)
# Group request
group_req = reqparse.RequestParser()
group_req.add_argument(
"name",
required=True,
type=length_restricted(3, 100, str),
location="json",
help="Name for the new classroom.",
error="Classroom name is required",
)
# Group patch request
# @TODO because of RequestParser's limitations with nested fields,
# voluptous handles actually checking the settings fields within group.py.
group_patch_req = reqparse.RequestParser()
group_patch_req.add_argument(
"settings",
required=False,
type=object_type,
location="json",
help="Updated settings object.",
)
# Group team modification request
group_modify_team_req = reqparse.RequestParser()
group_modify_team_req.add_argument(
"team_id",
required=True,
location="json",
type=str,
help="ID of the team to modify.",
error="Team ID is required",
)
# Group invite request
group_invite_req = reqparse.RequestParser()
group_invite_req.add_argument(
"email",
required=True,
type=inputs.email(),
location="json",
help="Email address to invite to the classroom.",
error="Must be a valid email address",
)
group_invite_req.add_argument(
"as_teacher",
required=True,
type=inputs.boolean,
location="json",
default=False,
help="Invite this user to be a teacher in the classroom, "
+ "rather than a regular member.",
error="as_teacher must be a boolean value",
)
# Join group request
join_group_req = reqparse.RequestParser()
join_group_req.add_argument(
"group_name",
required=True,
type=length_restricted(3, 100, str),
location="json",
help="Name of the group to join.",
error="Classroom name is required",
)
join_group_req.add_argument(
"group_owner",
required=True,
type=length_restricted(3, 40, str),
location="json",
help="Name of the teacher who owns the group.",
error="Classroom owner is required",
)
# Minigame submission request
minigame_submission_req = reqparse.RequestParser()
minigame_submission_req.add_argument(
"minigame_id",
required=True,
type=str,
location="json",
help="ID of the completed minigame",
error="Minigame ID is required",
)
minigame_submission_req.add_argument(
"verification_key",
required=True,
type=str,
location="json",
help="Verification key for the minigame",
error="Minigame verification key is required",
)
# Batch registration schema
batch_registration_req = reqparse.RequestParser()
batch_registration_req.add_argument(
"csv",
type=werkzeug.datastructures.FileStorage,
location="files",
required=True,
help="Modified copy of the provided batch import CSV",
error="A valid CSV file is required",
)
# User search schema
user_search_req = reqparse.RequestParser()
user_search_req.add_argument(
"field",
required=True,
type=str,
choices=["Email", "Parent Email", "User Name"],
location="json",
help="The field to be searched",
error='Field to search must be one of: "Email", "Parent Email", "User Name"',
)
user_search_req.add_argument(
"query",
required=True,
location="json",
type=str,
help="Body of the query",
error="Query field is empty!",
)
# Scoreboard schema
scoreboard_req = reqparse.RequestParser()
scoreboard_req.add_argument(
"name",
required=True,
type=str,
location="json",
help="Name of the scoreboard",
error="Scoreboard name must be a string",
)
scoreboard_req.add_argument(
"eligibility_conditions",
required=False,
type=object_type,
location="json",
default={},
help="MongoDB query to find eligible users",
error="Eligibility conditions must be a MongoDB query string",
)
scoreboard_req.add_argument(
"priority",
required=False,
type=inputs.natural,
location="json",
default=0,
help="Optional scoreboard priority. Scoreboards are listed "
+ "in order of descending priority on the scoreboard page",
)
scoreboard_req.add_argument(
"sponsor",
required=False,
type=str,
location="json",
default=None,
help="Sponsor of the scoreboard",
error="Sponsor must be a string",
)
scoreboard_req.add_argument(
"logo",
required=False,
type=str,
location="json",
default=None,
help="URL of a logo for the scoreboard",
error="Logo must be an image URL",
)
# User deletion schema
user_delete_req = reqparse.RequestParser()
user_delete_req.add_argument(
"reason",
required=False,
location="json",
type=str,
help="Deletion reason",
error="The reason must be a string!",
)
|
|
from __future__ import unicode_literals
from django.conf.urls import url
from django.test import TestCase, override_settings
from rest_framework import serializers
from rest_framework.test import APIRequestFactory
from tests.models import (
ForeignKeySource, ForeignKeyTarget, ManyToManySource, ManyToManyTarget,
NullableForeignKeySource, NullableOneToOneSource, OneToOneTarget
)
factory = APIRequestFactory()
request = factory.get('/') # Just to ensure we have a request in the serializer context
def dummy_view(request, pk):
pass
urlpatterns = [
url(r'^dummyurl/(?P<pk>[0-9]+)/$', dummy_view, name='dummy-url'),
url(r'^manytomanysource/(?P<pk>[0-9]+)/$', dummy_view, name='manytomanysource-detail'),
url(r'^manytomanytarget/(?P<pk>[0-9]+)/$', dummy_view, name='manytomanytarget-detail'),
url(r'^foreignkeysource/(?P<pk>[0-9]+)/$', dummy_view, name='foreignkeysource-detail'),
url(r'^foreignkeytarget/(?P<pk>[0-9]+)/$', dummy_view, name='foreignkeytarget-detail'),
url(r'^nullableforeignkeysource/(?P<pk>[0-9]+)/$', dummy_view, name='nullableforeignkeysource-detail'),
url(r'^onetoonetarget/(?P<pk>[0-9]+)/$', dummy_view, name='onetoonetarget-detail'),
url(r'^nullableonetoonesource/(?P<pk>[0-9]+)/$', dummy_view, name='nullableonetoonesource-detail'),
]
# ManyToMany
class ManyToManyTargetSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = ManyToManyTarget
fields = ('url', 'name', 'sources')
class ManyToManySourceSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = ManyToManySource
fields = ('url', 'name', 'targets')
# ForeignKey
class ForeignKeyTargetSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = ForeignKeyTarget
fields = ('url', 'name', 'sources')
class ForeignKeySourceSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = ForeignKeySource
fields = ('url', 'name', 'target')
# Nullable ForeignKey
class NullableForeignKeySourceSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = NullableForeignKeySource
fields = ('url', 'name', 'target')
# Nullable OneToOne
class NullableOneToOneTargetSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = OneToOneTarget
fields = ('url', 'name', 'nullable_source')
# TODO: Add test that .data cannot be accessed prior to .is_valid
@override_settings(ROOT_URLCONF='tests.test_relations_hyperlink')
class HyperlinkedManyToManyTests(TestCase):
def setUp(self):
for idx in range(1, 4):
target = ManyToManyTarget(name='target-%d' % idx)
target.save()
source = ManyToManySource(name='source-%d' % idx)
source.save()
for target in ManyToManyTarget.objects.all():
source.targets.add(target)
def test_relative_hyperlinks(self):
queryset = ManyToManySource.objects.all()
serializer = ManyToManySourceSerializer(queryset, many=True, context={'request': None})
expected = [
{'url': '/manytomanysource/1/', 'name': 'source-1', 'targets': ['/manytomanytarget/1/']},
{'url': '/manytomanysource/2/', 'name': 'source-2', 'targets': ['/manytomanytarget/1/', '/manytomanytarget/2/']},
{'url': '/manytomanysource/3/', 'name': 'source-3', 'targets': ['/manytomanytarget/1/', '/manytomanytarget/2/', '/manytomanytarget/3/']}
]
with self.assertNumQueries(4):
self.assertEqual(serializer.data, expected)
def test_many_to_many_retrieve(self):
queryset = ManyToManySource.objects.all()
serializer = ManyToManySourceSerializer(queryset, many=True, context={'request': request})
expected = [
{'url': 'http://testserver/manytomanysource/1/', 'name': 'source-1', 'targets': ['http://testserver/manytomanytarget/1/']},
{'url': 'http://testserver/manytomanysource/2/', 'name': 'source-2', 'targets': ['http://testserver/manytomanytarget/1/', 'http://testserver/manytomanytarget/2/']},
{'url': 'http://testserver/manytomanysource/3/', 'name': 'source-3', 'targets': ['http://testserver/manytomanytarget/1/', 'http://testserver/manytomanytarget/2/', 'http://testserver/manytomanytarget/3/']}
]
with self.assertNumQueries(4):
self.assertEqual(serializer.data, expected)
def test_many_to_many_retrieve_prefetch_related(self):
queryset = ManyToManySource.objects.all().prefetch_related('targets')
serializer = ManyToManySourceSerializer(queryset, many=True, context={'request': request})
with self.assertNumQueries(2):
serializer.data
def test_reverse_many_to_many_retrieve(self):
queryset = ManyToManyTarget.objects.all()
serializer = ManyToManyTargetSerializer(queryset, many=True, context={'request': request})
expected = [
{'url': 'http://testserver/manytomanytarget/1/', 'name': 'target-1', 'sources': ['http://testserver/manytomanysource/1/', 'http://testserver/manytomanysource/2/', 'http://testserver/manytomanysource/3/']},
{'url': 'http://testserver/manytomanytarget/2/', 'name': 'target-2', 'sources': ['http://testserver/manytomanysource/2/', 'http://testserver/manytomanysource/3/']},
{'url': 'http://testserver/manytomanytarget/3/', 'name': 'target-3', 'sources': ['http://testserver/manytomanysource/3/']}
]
with self.assertNumQueries(4):
self.assertEqual(serializer.data, expected)
def test_many_to_many_update(self):
data = {'url': 'http://testserver/manytomanysource/1/', 'name': 'source-1', 'targets': ['http://testserver/manytomanytarget/1/', 'http://testserver/manytomanytarget/2/', 'http://testserver/manytomanytarget/3/']}
instance = ManyToManySource.objects.get(pk=1)
serializer = ManyToManySourceSerializer(instance, data=data, context={'request': request})
self.assertTrue(serializer.is_valid())
serializer.save()
self.assertEqual(serializer.data, data)
# Ensure source 1 is updated, and everything else is as expected
queryset = ManyToManySource.objects.all()
serializer = ManyToManySourceSerializer(queryset, many=True, context={'request': request})
expected = [
{'url': 'http://testserver/manytomanysource/1/', 'name': 'source-1', 'targets': ['http://testserver/manytomanytarget/1/', 'http://testserver/manytomanytarget/2/', 'http://testserver/manytomanytarget/3/']},
{'url': 'http://testserver/manytomanysource/2/', 'name': 'source-2', 'targets': ['http://testserver/manytomanytarget/1/', 'http://testserver/manytomanytarget/2/']},
{'url': 'http://testserver/manytomanysource/3/', 'name': 'source-3', 'targets': ['http://testserver/manytomanytarget/1/', 'http://testserver/manytomanytarget/2/', 'http://testserver/manytomanytarget/3/']}
]
self.assertEqual(serializer.data, expected)
def test_reverse_many_to_many_update(self):
data = {'url': 'http://testserver/manytomanytarget/1/', 'name': 'target-1', 'sources': ['http://testserver/manytomanysource/1/']}
instance = ManyToManyTarget.objects.get(pk=1)
serializer = ManyToManyTargetSerializer(instance, data=data, context={'request': request})
self.assertTrue(serializer.is_valid())
serializer.save()
self.assertEqual(serializer.data, data)
# Ensure target 1 is updated, and everything else is as expected
queryset = ManyToManyTarget.objects.all()
serializer = ManyToManyTargetSerializer(queryset, many=True, context={'request': request})
expected = [
{'url': 'http://testserver/manytomanytarget/1/', 'name': 'target-1', 'sources': ['http://testserver/manytomanysource/1/']},
{'url': 'http://testserver/manytomanytarget/2/', 'name': 'target-2', 'sources': ['http://testserver/manytomanysource/2/', 'http://testserver/manytomanysource/3/']},
{'url': 'http://testserver/manytomanytarget/3/', 'name': 'target-3', 'sources': ['http://testserver/manytomanysource/3/']}
]
self.assertEqual(serializer.data, expected)
def test_many_to_many_create(self):
data = {'url': 'http://testserver/manytomanysource/4/', 'name': 'source-4', 'targets': ['http://testserver/manytomanytarget/1/', 'http://testserver/manytomanytarget/3/']}
serializer = ManyToManySourceSerializer(data=data, context={'request': request})
self.assertTrue(serializer.is_valid())
obj = serializer.save()
self.assertEqual(serializer.data, data)
self.assertEqual(obj.name, 'source-4')
# Ensure source 4 is added, and everything else is as expected
queryset = ManyToManySource.objects.all()
serializer = ManyToManySourceSerializer(queryset, many=True, context={'request': request})
expected = [
{'url': 'http://testserver/manytomanysource/1/', 'name': 'source-1', 'targets': ['http://testserver/manytomanytarget/1/']},
{'url': 'http://testserver/manytomanysource/2/', 'name': 'source-2', 'targets': ['http://testserver/manytomanytarget/1/', 'http://testserver/manytomanytarget/2/']},
{'url': 'http://testserver/manytomanysource/3/', 'name': 'source-3', 'targets': ['http://testserver/manytomanytarget/1/', 'http://testserver/manytomanytarget/2/', 'http://testserver/manytomanytarget/3/']},
{'url': 'http://testserver/manytomanysource/4/', 'name': 'source-4', 'targets': ['http://testserver/manytomanytarget/1/', 'http://testserver/manytomanytarget/3/']}
]
self.assertEqual(serializer.data, expected)
def test_reverse_many_to_many_create(self):
data = {'url': 'http://testserver/manytomanytarget/4/', 'name': 'target-4', 'sources': ['http://testserver/manytomanysource/1/', 'http://testserver/manytomanysource/3/']}
serializer = ManyToManyTargetSerializer(data=data, context={'request': request})
self.assertTrue(serializer.is_valid())
obj = serializer.save()
self.assertEqual(serializer.data, data)
self.assertEqual(obj.name, 'target-4')
# Ensure target 4 is added, and everything else is as expected
queryset = ManyToManyTarget.objects.all()
serializer = ManyToManyTargetSerializer(queryset, many=True, context={'request': request})
expected = [
{'url': 'http://testserver/manytomanytarget/1/', 'name': 'target-1', 'sources': ['http://testserver/manytomanysource/1/', 'http://testserver/manytomanysource/2/', 'http://testserver/manytomanysource/3/']},
{'url': 'http://testserver/manytomanytarget/2/', 'name': 'target-2', 'sources': ['http://testserver/manytomanysource/2/', 'http://testserver/manytomanysource/3/']},
{'url': 'http://testserver/manytomanytarget/3/', 'name': 'target-3', 'sources': ['http://testserver/manytomanysource/3/']},
{'url': 'http://testserver/manytomanytarget/4/', 'name': 'target-4', 'sources': ['http://testserver/manytomanysource/1/', 'http://testserver/manytomanysource/3/']}
]
self.assertEqual(serializer.data, expected)
@override_settings(ROOT_URLCONF='tests.test_relations_hyperlink')
class HyperlinkedForeignKeyTests(TestCase):
def setUp(self):
target = ForeignKeyTarget(name='target-1')
target.save()
new_target = ForeignKeyTarget(name='target-2')
new_target.save()
for idx in range(1, 4):
source = ForeignKeySource(name='source-%d' % idx, target=target)
source.save()
def test_foreign_key_retrieve(self):
queryset = ForeignKeySource.objects.all()
serializer = ForeignKeySourceSerializer(queryset, many=True, context={'request': request})
expected = [
{'url': 'http://testserver/foreignkeysource/1/', 'name': 'source-1', 'target': 'http://testserver/foreignkeytarget/1/'},
{'url': 'http://testserver/foreignkeysource/2/', 'name': 'source-2', 'target': 'http://testserver/foreignkeytarget/1/'},
{'url': 'http://testserver/foreignkeysource/3/', 'name': 'source-3', 'target': 'http://testserver/foreignkeytarget/1/'}
]
with self.assertNumQueries(1):
self.assertEqual(serializer.data, expected)
def test_reverse_foreign_key_retrieve(self):
queryset = ForeignKeyTarget.objects.all()
serializer = ForeignKeyTargetSerializer(queryset, many=True, context={'request': request})
expected = [
{'url': 'http://testserver/foreignkeytarget/1/', 'name': 'target-1', 'sources': ['http://testserver/foreignkeysource/1/', 'http://testserver/foreignkeysource/2/', 'http://testserver/foreignkeysource/3/']},
{'url': 'http://testserver/foreignkeytarget/2/', 'name': 'target-2', 'sources': []},
]
with self.assertNumQueries(3):
self.assertEqual(serializer.data, expected)
def test_foreign_key_update(self):
data = {'url': 'http://testserver/foreignkeysource/1/', 'name': 'source-1', 'target': 'http://testserver/foreignkeytarget/2/'}
instance = ForeignKeySource.objects.get(pk=1)
serializer = ForeignKeySourceSerializer(instance, data=data, context={'request': request})
self.assertTrue(serializer.is_valid())
serializer.save()
self.assertEqual(serializer.data, data)
# Ensure source 1 is updated, and everything else is as expected
queryset = ForeignKeySource.objects.all()
serializer = ForeignKeySourceSerializer(queryset, many=True, context={'request': request})
expected = [
{'url': 'http://testserver/foreignkeysource/1/', 'name': 'source-1', 'target': 'http://testserver/foreignkeytarget/2/'},
{'url': 'http://testserver/foreignkeysource/2/', 'name': 'source-2', 'target': 'http://testserver/foreignkeytarget/1/'},
{'url': 'http://testserver/foreignkeysource/3/', 'name': 'source-3', 'target': 'http://testserver/foreignkeytarget/1/'}
]
self.assertEqual(serializer.data, expected)
def test_foreign_key_update_incorrect_type(self):
data = {'url': 'http://testserver/foreignkeysource/1/', 'name': 'source-1', 'target': 2}
instance = ForeignKeySource.objects.get(pk=1)
serializer = ForeignKeySourceSerializer(instance, data=data, context={'request': request})
self.assertFalse(serializer.is_valid())
self.assertEqual(serializer.errors, {'target': ['Incorrect type. Expected URL string, received int.']})
def test_reverse_foreign_key_update(self):
data = {'url': 'http://testserver/foreignkeytarget/2/', 'name': 'target-2', 'sources': ['http://testserver/foreignkeysource/1/', 'http://testserver/foreignkeysource/3/']}
instance = ForeignKeyTarget.objects.get(pk=2)
serializer = ForeignKeyTargetSerializer(instance, data=data, context={'request': request})
self.assertTrue(serializer.is_valid())
# We shouldn't have saved anything to the db yet since save
# hasn't been called.
queryset = ForeignKeyTarget.objects.all()
new_serializer = ForeignKeyTargetSerializer(queryset, many=True, context={'request': request})
expected = [
{'url': 'http://testserver/foreignkeytarget/1/', 'name': 'target-1', 'sources': ['http://testserver/foreignkeysource/1/', 'http://testserver/foreignkeysource/2/', 'http://testserver/foreignkeysource/3/']},
{'url': 'http://testserver/foreignkeytarget/2/', 'name': 'target-2', 'sources': []},
]
self.assertEqual(new_serializer.data, expected)
serializer.save()
self.assertEqual(serializer.data, data)
# Ensure target 2 is update, and everything else is as expected
queryset = ForeignKeyTarget.objects.all()
serializer = ForeignKeyTargetSerializer(queryset, many=True, context={'request': request})
expected = [
{'url': 'http://testserver/foreignkeytarget/1/', 'name': 'target-1', 'sources': ['http://testserver/foreignkeysource/2/']},
{'url': 'http://testserver/foreignkeytarget/2/', 'name': 'target-2', 'sources': ['http://testserver/foreignkeysource/1/', 'http://testserver/foreignkeysource/3/']},
]
self.assertEqual(serializer.data, expected)
def test_foreign_key_create(self):
data = {'url': 'http://testserver/foreignkeysource/4/', 'name': 'source-4', 'target': 'http://testserver/foreignkeytarget/2/'}
serializer = ForeignKeySourceSerializer(data=data, context={'request': request})
self.assertTrue(serializer.is_valid())
obj = serializer.save()
self.assertEqual(serializer.data, data)
self.assertEqual(obj.name, 'source-4')
# Ensure source 1 is updated, and everything else is as expected
queryset = ForeignKeySource.objects.all()
serializer = ForeignKeySourceSerializer(queryset, many=True, context={'request': request})
expected = [
{'url': 'http://testserver/foreignkeysource/1/', 'name': 'source-1', 'target': 'http://testserver/foreignkeytarget/1/'},
{'url': 'http://testserver/foreignkeysource/2/', 'name': 'source-2', 'target': 'http://testserver/foreignkeytarget/1/'},
{'url': 'http://testserver/foreignkeysource/3/', 'name': 'source-3', 'target': 'http://testserver/foreignkeytarget/1/'},
{'url': 'http://testserver/foreignkeysource/4/', 'name': 'source-4', 'target': 'http://testserver/foreignkeytarget/2/'},
]
self.assertEqual(serializer.data, expected)
def test_reverse_foreign_key_create(self):
data = {'url': 'http://testserver/foreignkeytarget/3/', 'name': 'target-3', 'sources': ['http://testserver/foreignkeysource/1/', 'http://testserver/foreignkeysource/3/']}
serializer = ForeignKeyTargetSerializer(data=data, context={'request': request})
self.assertTrue(serializer.is_valid())
obj = serializer.save()
self.assertEqual(serializer.data, data)
self.assertEqual(obj.name, 'target-3')
# Ensure target 4 is added, and everything else is as expected
queryset = ForeignKeyTarget.objects.all()
serializer = ForeignKeyTargetSerializer(queryset, many=True, context={'request': request})
expected = [
{'url': 'http://testserver/foreignkeytarget/1/', 'name': 'target-1', 'sources': ['http://testserver/foreignkeysource/2/']},
{'url': 'http://testserver/foreignkeytarget/2/', 'name': 'target-2', 'sources': []},
{'url': 'http://testserver/foreignkeytarget/3/', 'name': 'target-3', 'sources': ['http://testserver/foreignkeysource/1/', 'http://testserver/foreignkeysource/3/']},
]
self.assertEqual(serializer.data, expected)
def test_foreign_key_update_with_invalid_null(self):
data = {'url': 'http://testserver/foreignkeysource/1/', 'name': 'source-1', 'target': None}
instance = ForeignKeySource.objects.get(pk=1)
serializer = ForeignKeySourceSerializer(instance, data=data, context={'request': request})
self.assertFalse(serializer.is_valid())
self.assertEqual(serializer.errors, {'target': ['This field may not be null.']})
@override_settings(ROOT_URLCONF='tests.test_relations_hyperlink')
class HyperlinkedNullableForeignKeyTests(TestCase):
def setUp(self):
target = ForeignKeyTarget(name='target-1')
target.save()
for idx in range(1, 4):
if idx == 3:
target = None
source = NullableForeignKeySource(name='source-%d' % idx, target=target)
source.save()
def test_foreign_key_retrieve_with_null(self):
queryset = NullableForeignKeySource.objects.all()
serializer = NullableForeignKeySourceSerializer(queryset, many=True, context={'request': request})
expected = [
{'url': 'http://testserver/nullableforeignkeysource/1/', 'name': 'source-1', 'target': 'http://testserver/foreignkeytarget/1/'},
{'url': 'http://testserver/nullableforeignkeysource/2/', 'name': 'source-2', 'target': 'http://testserver/foreignkeytarget/1/'},
{'url': 'http://testserver/nullableforeignkeysource/3/', 'name': 'source-3', 'target': None},
]
self.assertEqual(serializer.data, expected)
def test_foreign_key_create_with_valid_null(self):
data = {'url': 'http://testserver/nullableforeignkeysource/4/', 'name': 'source-4', 'target': None}
serializer = NullableForeignKeySourceSerializer(data=data, context={'request': request})
self.assertTrue(serializer.is_valid())
obj = serializer.save()
self.assertEqual(serializer.data, data)
self.assertEqual(obj.name, 'source-4')
# Ensure source 4 is created, and everything else is as expected
queryset = NullableForeignKeySource.objects.all()
serializer = NullableForeignKeySourceSerializer(queryset, many=True, context={'request': request})
expected = [
{'url': 'http://testserver/nullableforeignkeysource/1/', 'name': 'source-1', 'target': 'http://testserver/foreignkeytarget/1/'},
{'url': 'http://testserver/nullableforeignkeysource/2/', 'name': 'source-2', 'target': 'http://testserver/foreignkeytarget/1/'},
{'url': 'http://testserver/nullableforeignkeysource/3/', 'name': 'source-3', 'target': None},
{'url': 'http://testserver/nullableforeignkeysource/4/', 'name': 'source-4', 'target': None}
]
self.assertEqual(serializer.data, expected)
def test_foreign_key_create_with_valid_emptystring(self):
"""
The emptystring should be interpreted as null in the context
of relationships.
"""
data = {'url': 'http://testserver/nullableforeignkeysource/4/', 'name': 'source-4', 'target': ''}
expected_data = {'url': 'http://testserver/nullableforeignkeysource/4/', 'name': 'source-4', 'target': None}
serializer = NullableForeignKeySourceSerializer(data=data, context={'request': request})
self.assertTrue(serializer.is_valid())
obj = serializer.save()
self.assertEqual(serializer.data, expected_data)
self.assertEqual(obj.name, 'source-4')
# Ensure source 4 is created, and everything else is as expected
queryset = NullableForeignKeySource.objects.all()
serializer = NullableForeignKeySourceSerializer(queryset, many=True, context={'request': request})
expected = [
{'url': 'http://testserver/nullableforeignkeysource/1/', 'name': 'source-1', 'target': 'http://testserver/foreignkeytarget/1/'},
{'url': 'http://testserver/nullableforeignkeysource/2/', 'name': 'source-2', 'target': 'http://testserver/foreignkeytarget/1/'},
{'url': 'http://testserver/nullableforeignkeysource/3/', 'name': 'source-3', 'target': None},
{'url': 'http://testserver/nullableforeignkeysource/4/', 'name': 'source-4', 'target': None}
]
self.assertEqual(serializer.data, expected)
def test_foreign_key_update_with_valid_null(self):
data = {'url': 'http://testserver/nullableforeignkeysource/1/', 'name': 'source-1', 'target': None}
instance = NullableForeignKeySource.objects.get(pk=1)
serializer = NullableForeignKeySourceSerializer(instance, data=data, context={'request': request})
self.assertTrue(serializer.is_valid())
serializer.save()
self.assertEqual(serializer.data, data)
# Ensure source 1 is updated, and everything else is as expected
queryset = NullableForeignKeySource.objects.all()
serializer = NullableForeignKeySourceSerializer(queryset, many=True, context={'request': request})
expected = [
{'url': 'http://testserver/nullableforeignkeysource/1/', 'name': 'source-1', 'target': None},
{'url': 'http://testserver/nullableforeignkeysource/2/', 'name': 'source-2', 'target': 'http://testserver/foreignkeytarget/1/'},
{'url': 'http://testserver/nullableforeignkeysource/3/', 'name': 'source-3', 'target': None},
]
self.assertEqual(serializer.data, expected)
def test_foreign_key_update_with_valid_emptystring(self):
"""
The emptystring should be interpreted as null in the context
of relationships.
"""
data = {'url': 'http://testserver/nullableforeignkeysource/1/', 'name': 'source-1', 'target': ''}
expected_data = {'url': 'http://testserver/nullableforeignkeysource/1/', 'name': 'source-1', 'target': None}
instance = NullableForeignKeySource.objects.get(pk=1)
serializer = NullableForeignKeySourceSerializer(instance, data=data, context={'request': request})
self.assertTrue(serializer.is_valid())
serializer.save()
self.assertEqual(serializer.data, expected_data)
# Ensure source 1 is updated, and everything else is as expected
queryset = NullableForeignKeySource.objects.all()
serializer = NullableForeignKeySourceSerializer(queryset, many=True, context={'request': request})
expected = [
{'url': 'http://testserver/nullableforeignkeysource/1/', 'name': 'source-1', 'target': None},
{'url': 'http://testserver/nullableforeignkeysource/2/', 'name': 'source-2', 'target': 'http://testserver/foreignkeytarget/1/'},
{'url': 'http://testserver/nullableforeignkeysource/3/', 'name': 'source-3', 'target': None},
]
self.assertEqual(serializer.data, expected)
@override_settings(ROOT_URLCONF='tests.test_relations_hyperlink')
class HyperlinkedNullableOneToOneTests(TestCase):
def setUp(self):
target = OneToOneTarget(name='target-1')
target.save()
new_target = OneToOneTarget(name='target-2')
new_target.save()
source = NullableOneToOneSource(name='source-1', target=target)
source.save()
def test_reverse_foreign_key_retrieve_with_null(self):
queryset = OneToOneTarget.objects.all()
serializer = NullableOneToOneTargetSerializer(queryset, many=True, context={'request': request})
expected = [
{'url': 'http://testserver/onetoonetarget/1/', 'name': 'target-1', 'nullable_source': 'http://testserver/nullableonetoonesource/1/'},
{'url': 'http://testserver/onetoonetarget/2/', 'name': 'target-2', 'nullable_source': None},
]
self.assertEqual(serializer.data, expected)
|
|
import re, util
from collections import Counter
def get_actions(cursor):
actions = [] #[(regex, function)]
def action(regex):
regex = re.sub(r'\{(.*?)}', lambda m:r'(?P<{0}>.*?)'.format(m.group(1)), regex) + r'\.?$'
compiled_regex = re.compile(regex, flags=re.IGNORECASE)
return lambda function: actions.append((compiled_regex, function))
selector = util.Selector(cursor)
select = selector.select
select_all = selector.select_all
ActionError = util.ActionError
@action('(?:Join|/in)')
def join(queue, username):
if select('players', name=username):
raise ActionError('You cannot join because you are already playing!')
else:
cursor.execute('INSERT INTO players (name, phase) VALUES (?, 0)', (username,))
player_id = cursor.lastrowid
util.give_coins(cursor, player_id, util.get_param(cursor, 'starting_coins'))
roma_id = util.get_param(cursor, 'roma_id')
util.give_offices(cursor, player_id, roma_id, 1, 1)
region_name = select('regions', id=roma_id)['name']
raise ActionError('You have successfully joined the game and have been given an office in {0}. Choose a region for your second office to begin.'.format(region_name))
@action('Take office in {region_name}')
def take_office(queue, username, region_name):
player = util.get_player(cursor, username, phase=0)
region = util.get_region(cursor, region_name)
offices_per_region = util.get_offices_per_region(cursor)
if offices_per_region[region['id']] != min(offices_per_region.values()):
raise ActionError('You must select an office in a region with the least offices')
util.give_offices(cursor, player['id'], region['id'], 1, 1)
cursor.execute('UPDATE players SET phase=1 WHERE id=?', (player['id'],))
return 'You now have an office in {0}'.format(region['name'])
@action('Buy {quantity} {item_name} (?:in|from) {region_name}')
def buy_item(queue, username, quantity, item_name, region_name):
player = util.get_player(cursor, username, phase=1)
item = util.get_item(cursor, item_name)
if not item['buyable']:
raise ActionError('You can\'t buy that item'.format(item_name))
region = util.get_region(cursor, region_name)
if not select('offices', player_id=player['id'], region_id=region['id']):
raise ActionError('You can only buy from regions where you have an office')
try:
quantity = int(quantity)
except ValueError:
raise ActionError('Invalid integer: {0}'.format(quantity))
if quantity < 0:
raise ActionError('You can\'t buy negative items')
price = util.get_price(cursor, region['id'], item['id'], buying=True)*quantity
if price > util.get_coins(cursor, player['id']):
raise ActionError('You can\'t afford that')
if quantity > util.get_free_capacity(cursor, player['id']):
raise ActionError('You don\'t have enough free capacity to do that')
util.give_coins(cursor, player['id'], -price)
util.give_items(cursor, player['id'], item['id'], quantity)
return 'You have successfully bought {0} {1} from {2}'.format(quantity, item['name'], region['name'])
@action('Sell {quantity} {item_name} (?:in|to) {region_name}')
def sell_item(queue, username, quantity, item_name, region_name):
player = util.get_player(cursor, username, phase=2)
item = util.get_item(cursor, item_name)
if not item['buyable']:
raise ActionError('You can\'t sell that item'.format(item_name))
region = util.get_region(cursor, region_name)
if not select('offices', player_id=player['id'], region_id=region['id']):
raise ActionError('You can only sell to regions where you have an office')
try:
quantity = int(quantity)
except ValueError:
raise ActionError('Invalid integer: {0}'.format(quantity))
if quantity < 0:
raise ActionError('You can\'t sell negative items')
price = util.get_price(cursor, region['id'], item['id'], buying=False)*quantity
inv_quantity = util.get_inventory_quantity(cursor, player['id'], item['id'])
if quantity > inv_quantity:
raise ActionError('You don\'t have that many of that item')
util.give_coins(cursor, player['id'], price)
util.give_items(cursor, player['id'], item['id'], -quantity)
return 'You have successfully sold {0} {1} to {2}'.format(quantity, item['name'], region['name'])
@action('Lend {quantity} coins to {borrower_name} with {interest} coins interest due turn {turn_number} as {loan_name}')
def lend(queue, username, quantity, borrower_name, interest, turn_number, loan_name):
player = util.get_player(cursor, username, phase=1)
try:
quantity = int(quantity)
except ValueError:
raise ActionError('Invalid integer: {0}'.format(quantity))
try:
interest = int(interest)
except ValueError:
raise ActionError('Invalid integer: {0}'.format(interest))
if quantity < 0 or interest < 0:
raise ActionError('Loan amount and interest can\'t be negative')
borrower = select('players', name=borrower_name)
if not borrower:
raise ActionError('Unknown player: {0}'.format(borrower_name))
if player['id'] == borrower['id']:
raise ActionError('You can\'t make a loan to yourself')
try:
turn_number = int(turn_number)
except ValueError:
raise ActionError('{0} is not an integer'.format(turn_number))
current_turn = util.get_global(cursor, 'turn_number')
if current_turn >= turn_number:
raise ActionError('Turn {0} has already started'.format(turn_number))
if select('loans', name=loan_name):
raise ActionError('There is already a loan with that name')
cursor.execute('INSERT INTO loans (offerer, offeree, coins, interest, due_by, accepted, name) VALUES (?, ?, ?, ?, ?, ?, ?)',
(player['id'], borrower['id'], quantity, interest, turn_number, False, loan_name))
return 'Loan successfully offered'
@action('Accept loan {loan_name}')
def accept_loan(queue, username, loan_name):
player = util.get_player(cursor, username, phase=1)
loan = select('loans', name=loan_name)
if not loan:
raise ActionError('There is no loan with that name')
if loan['accepted']:
raise ActionError('That loan offer has already been accepted')
if loan['offeree'] != player['id']:
raise ActionError('That loan offer wasn\'t made to you')
lender = select('players', id=loan['offerer'])
if loan['coins'] > util.get_coins(cursor, lender['id']):
raise ActionError('{0} doesn\'t have enough coins'.format(lender['name']))
util.give_coins(cursor, lender['id'], -loan['coins'])
util.give_coins(cursor, player['id'], loan['coins'])
cursor.execute('UPDATE loans SET accepted=1 WHERE id=?', (loan['id'],))
return 'Loan successfully accepted'
@action('Refuse loan {loan_name}')
def refuse_loan(queue, username, loan_name):
player = util.get_player(cursor, username)
loan = select('loans', name=loan_name)
if not loan:
raise ActionError('There is no loan with that name')
if loan['accepted']:
raise ActionError('That loan offer has already been accepted')
if loan['offeree'] != player['id']:
raise ActionError('That loan offer wasn\'t made to you')
cursor.execute('DELETE FROM loans WHERE id=?', (loan['id'],))
return 'Loan succesfully refused'
@action('Cancel loan {loan_name}')
def cancel_loan(queue, username, loan_name):
player = util.get_player(cursor, username)
loan = select('loans', name=loan_name)
if not loan:
raise ActionError('There is no loan with that name')
if loan['accepted']:
raise ActionError('That loan offer has already been accepted')
if loan['offerer'] != player['id']:
raise ActionError('That loan offer wasn\'t made by you')
cursor.execute('DELETE FROM loans WHERE id=?', (loan['id'],))
raise ActionError('Loan succesfully cancelled')
#@action(r'Trade {items1} for {items2>(?:(?:[0-9]+|an?) .*?(?:, | and |, and )?)*) with {player} as {name}')
@action('Offer {items_1} for {items_2} to {offeree_name} as {offer_name}')
def make_offer(queue, username, items_1, items_2, offeree_name, offer_name):
player = util.get_player(cursor, username, phase=1)
offeree = select('players', name=offeree_name)
if not offeree:
raise ActionError('Unknown player: {0}'.format(offeree_name))
if select('offers', name=offer_name):
raise ActionError('There is already an offer with that name')
if player['id'] == offeree['id']:
raise ActionError('You can\'t make an offer to yourself')
try:
item_count = util.count_items_str(cursor, items_1)
item_count_2 = util.count_items_str(cursor, items_2)
except ValueError as e:
raise ActionError(str(e))
item_count.subtract(item_count_2)
cursor.execute('INSERT INTO offers (offerer, offeree, name) VALUES (?, ?, ?)', (player['id'], offeree['id'], offer_name))
offer_id = cursor.lastrowid
for item, quantity in item_count.items():
cursor.execute('INSERT INTO offer_items (offer_id, item_id, quantity) VALUES (?, ?, ?)', (offer_id, item, quantity))
return 'Offer successfully made'
@action('Accept (?:offer )?{offer_name}')
def accept_offer(queue, username, offer_name):
player = util.get_player(cursor, username, phase=1)
offer = select('offers', name=offer_name)
if not offer:
raise ActionError('There is no offer with that name')
if offer['offeree'] != player['id']:
raise ActionError('That offer wasn\'t made to you')
offerer = select('players', id=offer['offerer'])
offer_items = select_all('offer_items', offer_id=offer['id'])
for offer_item in offer_items:
itempayer = offerer if offer_item['quantity'] >= 0 else player
inv_quantity = util.get_inventory_quantity(cursor, itempayer['id'], offer_item['item_id'])
if abs(offer_item['quantity']) > inv_quantity:
raise ActionError('{0} doesn\'t have enough {1}'.format(itempayer['name'], select('items', id=offer_item['item_id'])['name']))
capacity = sum(select('items', id=item['item_id'])['capacity']*item['quantity'] for item in offer_items)
capacityuser = player if capacity >= 0 else offerer
if capacity > util.get_free_capacity(cursor, capacityuser['id']):
raise ActionError('{0} doesn\'t have enough free capacity'.format(capacityuser['name']))
for offer_item in offer_items:
util.give_items(cursor, offerer['id'], offer_item['item_id'], -offer_item['quantity'])
util.give_items(cursor, player['id'], offer_item['item_id'], offer_item['quantity'])
util.delete_offer(cursor, offer['id'])
return 'Offer successfully completed'
@action('Refuse (?:offer )?{offer_name}')
def refuse_offer(queue, username, offer_name):
player = util.get_player(cursor, username)
offer = select('offers', name=offer_name)
if not offer:
raise ActionError('There is no offer with that name')
if offer['offeree'] != player['id']:
raise ActionError('That offer wasn\'t made to you')
util.delete_offer(cursor, offer['id'])
return 'Offer successfully refused'
@action('Cancel (?:offer )?{offer_name}')
def cancel_offer(queue, username, offer_name):
player = util.get_player(cursor, username)
offer = select('offers', name=offer_name)
if not offer:
raise ActionError('There is no offer with that name')
if offer['offerer'] != player['id']:
raise ActionError('You didn\'t make that offer')
util.delete_offer(cursor, offer['id'])
return 'Offer successfully cancelled'
@action('Dump {quantity} {item_name}')
def dump(queue, username, quantity, item_name):
player = util.get_player(cursor, username)
item = util.get_item(cursor, item_name)
try:
quantity = int(quantity)
except ValueError:
raise ActionError('Invalid integer: {0}'.format(quantity))
if quantity < 0:
raise ActionError('You can\'t dump negative items')
inv_quantity = util.get_inventory_quantity(cursor, player['id'], item['id'])
if quantity > inv_quantity:
raise ActionError('You don\'t have that many of that item')
util.give_items(cursor, player['id'], item['id'], -quantity)
return 'You have successfully dumped {0} {1}'.format(quantity, item['name'])
@action('(?:Move to|Enter) phase 2')
def phase_2(queue, username):
player = util.get_player(cursor, username, phase=1)
cursor.execute('UPDATE players SET phase=2 WHERE id=?', (player['id'],))
offers = select_all('offers', offerer=player['id'])
for offer in offers:
util.delete_offer(cursor, offer['id'])
cursor.execute('DELETE FROM loans WHERE offerer=? AND accepted=0', (player['id'],))
return 'You have moved to phase 2. Your pending offers and loans have been cancelled.'
@action('Repay loan {loan_name}')
def repay_loan(queue, username, loan_name):
player = util.get_player(cursor, username, phase=1)
loan = select('loans', name=loan_name)
if not loan:
raise ActionError('There is no loan with that name')
if not loan['accepted']:
raise ActionError('That loan offer hasn\'t been accepted yet')
if loan['offeree'] != player['id']:
raise ActionError('That loan wasn\'t made to you')
coins = loan['coins'] + loan['interest']
if coins > util.get_coins(cursor, player['id']):
raise ActionError('You don\'t have enough coins')
return False
lender = select('players', id=loan['offerer'])
util.give_coins(cursor, lender['id'], coins)
util.give_coins(cursor, player['id'], -coins)
cursor.execute('DELETE FROM loans WHERE id=?', (loan['id'],))
return 'Loan succesfully repayed'
@action('Build office in {region_name}')
def build_office(queue, username, region_name):
player = util.get_player(cursor, username, phase=2)
region = util.get_region(cursor, region_name)
office_price = util.get_office_price(cursor, region['id'])
if office_price > util.get_coins(cursor, player['id']):
raise ActionError('You can\'t afford that')
util.give_coins(cursor, player['id'], -office_price)
util.give_offices(cursor, player['id'], region['id'], 1, 1)
return 'Office succesfully built'
@action('Upgrade(?: level {level})? office in {region_name}')
def upgrade_office(queue, username, level, region_name):
player = util.get_player(cursor, username, phase=2)
region = util.get_region(cursor, region_name)
if level:
try:
level = int(level)
except ValueError:
raise ActionError('{0} is not an integer'.format(level))
office = select('offices', player_id=player['id'], region_id=region['id'], level=level)
if not office:
raise ActionError('You do not have any level {0} offices in {1}'.format(level, region['name']))
else:
cursor.execute('SELECT * FROM offices WHERE player_id=? AND region_id=? ORDER BY level', (player['id'], region['id']))
office = cursor.fetchone()
if not office:
raise ActionError('You do not have any offices in {0}'.format(region['name']))
old_level = office['level']
if not select('office_levels', level=old_level+1):
raise ActionError('That office is already at the maximum level')
cursor.execute('SELECT item_id, quantity FROM upgrade_prices WHERE level=?', (office['level']+1,))
upgrade_items = cursor.fetchall()
for item_id, quantity in upgrade_items:
inv_item = select('inventories', player_id=player['id'], item_id=item_id)
if not inv_item or inv_item['quantity'] < quantity:
raise ActionError('You don\'t have enough {0}'.format(select('items', id=item_id)['name']))
for item_id, quantity in upgrade_items:
util.give_items(cursor, player['id'], item_id, -quantity)
util.give_offices(cursor, player['id'], region['id'], old_level, -1)
util.give_offices(cursor, player['id'], region['id'], old_level+1, 1)
raise ActionError('Successfully upgrade office in {0} to level {1}'.format(region['name'], old_level+1))
@action('Offer {items} to {god_name}')
def offer_to_god(queue, username, items, god_name):
powers_start = util.get_param(cursor, 'myth_powers_start')
if util.get_global(cursor, 'turn_number') < powers_start:
raise ActionError('Myth powers can\'t be used until turn {1}'.format(powers_start))
return False
player = util.get_player(cursor, username, phase=2)
god = select('gods', name=god_name)
if not god:
raise ActionError('Unknown god: {0}'.format(god_name))
try:
offer_items = util.count_items_str(cursor, items)
except ValueError as e:
raise ActionError(str(e))
return False
for item_id, quantity in offer_items.items():
if util.get_inventory_quantity(cursor, player['id'], item_id) < quantity:
raise ActionError('You don\'t have that many {0}'.format(select('items', id=item_id)['name']))
for item_id, quantity in offer_items.items():
util.give_items(cursor, player['id'], item_id, -quantity)
util.give_myth_items(cursor, player['id'], item_id, god['id'], quantity)
myth_power = util.get_current_power(cursor, god['id'])
if myth_power:
available_myth = util.select(cursor, 'available_myth', myth_power_id = myth_power['id'])
if not available_myth['purchased']:
cursor.execute('UPDATE available_myth SET purchased=1 WHERE myth_power_id=?', (myth_power['id'],))
power_items = select_all('myth_power_prices', myth_power_id=myth_power['id'])
myth_offered = util.get_myth_offered(cursor, player['id'], god['id'])
have_items = True
for power_item in power_items:
if power_item['quantity'] > myth_offered[power_item['item_id']]:
have_items = False
break
if have_items:
cursor.execute('INSERT INTO unused_myth (player_id, myth_power_id) VALUES (?, ?)', (player['id'], myth_power['id']))
return 'Successfully offered to {0}. You now have {1}. Make sure to use it before the next turn'.format(
god['name'], myth_power['name'])
return 'Successfully offered to {0}. No myth power obtained'.format(god['name'])
@action('Use(?: power)? {myth_power_name}(?: on {target})?')
def use_power(queue, username, myth_power_name, target):
player = util.get_player(cursor, username, phase=2)
cursor.execute('SELECT * FROM myth_powers WHERE id=(SELECT myth_power_id FROM unused_myth WHERE player_id=?) '
'AND name=?', (player['id'], myth_power_name))
myth_power = cursor.fetchone()
if not myth_power:
raise ActionError('You don\'t have that myth power to use')
event = select('events', id=myth_power['event_id'])
start_turn = myth_power['delay'] + util.get_global(cursor, 'turn_number') + 1
cursor.execute('INSERT INTO queued_events (event_id, starts) VALUES (?, ?)', (event['id'], start_turn))
queued_event_id = cursor.lastrowid
if event['type'] == 0:
# Give items event
cursor.execute('INSERT INTO give_items_event_players (queued_event_id, player_id) VALUES (?, ?)', (queued_event_id, player['id']))
items_text = util.format_items(cursor, select_all('give_items_event_items', event_id=event['id']))
message = '{0}: {1} gets {2}'.format(event['name'], username, items_text)
elif event['type'] in (1, 2):
# Price change event
price_change = select('price_change_events', event_id=event['id'])
buy_change = price_change['buy_change']
buy_change_text = '{0}% {1}'.format(abs(buy_change), 'increase' if buy_change > 0 else 'decrease')
sell_change = price_change['sell_change']
sell_change_text = '{0}% {1}'.format(abs(sell_change), 'increase' if sell_change > 0 else 'decrease')
end_turn = start_turn + price_change['duration']
if event['type'] == 1:
region = select('regions', name=target)
if not region:
raise ActionError('Unknown region: {0}'.format(region_name))
cursor.execute('INSERT INTO price_change_event_regions (queued_event_id, region_id) VALUES (?, ?)', (queued_event_id, region['id']))
message = '{0} in {1}: {2} in buy prices and {3} in sell prices until turn {4}'.format(
event['name'], select('regions', id=region['id'])['name'], buy_change_text, sell_change_text, end_turn)
else:
items = select_all('price_change_event_items', event_id=event['id'])
item_names = ', '.join(select('items', id=item['id'])['name'] for item in items)
message = '{0}: {1} in buy prices and {2} in sell prices for {3} until turn {4}'.format(
event['name'], buy_change_text, sell_change_text, item_names, end_turn)
elif event['type'] == 3:
pass
else:
raise ActionError('Internal error: Unknown event type {0}'.format(event['type']))
cursor.execute('UPDATE queued_events WHERE id=? SET message=?', (queued_event_id, message))
cursor.execute('DELETE FROM unused_myth WHERE myth_power_id=? AND player_id=?', (myth_power['id'], player['id']))
return '{0} used {1}. It will take effect turn {2}'.format(myth_power['name'], ' on '+target if target else '', start_turn)
@action('Do nothing')
def use_power(queue, username):
return 'Successfully did nothing'
return actions
|
|
from __future__ import print_function
import glob
import os
import platform
import subprocess
import sys
import tempfile
import textwrap
from timing import monotonic_time_nanos
from tracing import Tracing
from buck_tool import BuckTool, check_output, JAVA_MAX_HEAP_SIZE_MB
from buck_tool import BuckToolException, RestartBuck
from subprocutils import which
import buck_version
JAVA_CLASSPATHS = [
"build/abi_processor/classes",
"build/classes",
"build/src-gen/classes",
"build/aosp/classes",
"build/dx_classes",
"src",
"src-gen",
"third-party/java/android/sdklib.jar",
"third-party/java/android/sdk-common-24.2.3.jar",
"third-party/java/android/common-24.2.3.jar",
"third-party/java/android/layoutlib-api-24.2.3.jar",
"third-party/java/aopalliance/aopalliance.jar",
"third-party/java/args4j/args4j-2.0.30.jar",
"third-party/java/asm/asm-debug-all-5.0.3.jar",
"third-party/java/closure-templates/soy-excluding-deps.jar",
"third-party/java/commons-compress/commons-compress-1.8.1.jar",
"third-party/java/commons-logging/commons-logging-1.2.jar",
"third-party/java/concurrent-locks/concurrent-locks-1.0.0.jar",
"third-party/java/dd-plist/dd-plist.jar",
"third-party/java/ddmlib/ddmlib-22.5.3.jar",
"third-party/java/eclipse/org.eclipse.core.contenttype_3.5.100.v20160418-1621.jar",
"third-party/java/eclipse/org.eclipse.core.jobs_3.8.0.v20160319-0610.jar",
"third-party/java/eclipse/org.eclipse.core.resources_3.11.0.v20160422-0304.jar",
"third-party/java/eclipse/org.eclipse.core.runtime_3.12.0.v20160427-1901.jar",
"third-party/java/eclipse/org.eclipse.equinox.common_3.8.0.v20160422-1942.jar",
"third-party/java/eclipse/org.eclipse.equinox.preferences_3.6.0.v20160120-1756.jar",
"third-party/java/eclipse/org.eclipse.jdt.core.prefs",
"third-party/java/eclipse/org.eclipse.jdt.core_3.12.0.v20160426-1326.jar",
"third-party/java/eclipse/org.eclipse.osgi_3.11.0.v20160427-2120.jar",
"third-party/java/gson/gson-2.2.4.jar",
"third-party/java/guava/guava-19.0.jar",
"third-party/java/guice/guice-3.0.jar",
"third-party/java/guice/guice-assistedinject-3.0.jar",
"third-party/java/guice/guice-multibindings-3.0.jar",
"third-party/java/httpcomponents/httpclient-4.4.1.jar",
"third-party/java/httpcomponents/httpcore-4.4.1.jar",
"third-party/java/icu4j/icu4j-54.1.1.jar",
"third-party/java/infer-annotations/infer-annotations-1.5.jar",
"third-party/java/ini4j/ini4j-0.5.2.jar",
"third-party/java/jackson/jackson-annotations-2.7.8.jar",
"third-party/java/jackson/jackson-core-2.7.8.jar",
"third-party/java/jackson/jackson-databind-2.7.8.jar",
"third-party/java/jackson/jackson-datatype-jdk8-2.7.8.jar",
"third-party/java/jackson/jackson-datatype-guava-2.7.8.jar",
"third-party/java/jetty/jetty-all-9.2.10.v20150310.jar",
"third-party/java/jna/jna-4.2.0.jar",
"third-party/java/jna/jna-platform-4.2.0.jar",
"third-party/java/jsr/javax.inject-1.jar",
"third-party/java/jsr/jsr305.jar",
"third-party/java/kxml2/kxml2-2.3.0.jar",
"third-party/java/nailgun/nailgun-server-0.9.2-SNAPSHOT.jar",
"third-party/java/nuprocess/nuprocess-1.1.0.jar",
"third-party/java/ObjCBridge/ObjCBridge.jar",
"third-party/java/okhttp/okhttp-3.3.0.jar",
"third-party/java/okio/okio-1.8.0.jar",
"third-party/java/oshi/oshi-core-3.3-SNAPSHOT.jar",
"third-party/java/servlet-api/javax.servlet-api-3.1.0.jar",
"third-party/java/slf4j/slf4j-jdk14-1.7.5.jar",
"third-party/java/stringtemplate/ST-4.0.8.jar",
"third-party/java/thrift/libthrift-0.9.3.jar",
"third-party/java/xz-java-1.5/xz-1.5.jar",
# maven/aether libs
"third-party/java/aether/aether-api-1.0.2.v20150114.jar",
"third-party/java/aether/aether-connector-basic-1.0.2.v20150114.jar",
"third-party/java/aether/aether-impl-1.0.0.v20140518.jar",
"third-party/java/aether/aether-spi-1.0.2.v20150114.jar",
"third-party/java/aether/aether-transport-http-1.0.2.v20150114.jar",
"third-party/java/aether/aether-transport-file-1.0.2.v20150114.jar",
"third-party/java/aether/aether-util-1.0.2.v20150114.jar",
"third-party/java/commons-codec/commons-codec-1.6.jar",
"third-party/java/maven/maven-aether-provider-3.2.5.jar",
"third-party/java/maven/maven-model-3.2.5.jar",
"third-party/java/maven/maven-model-builder-3.2.5.jar",
"third-party/java/slf4j/slf4j-api-1.7.5.jar",
"third-party/java/plexus/plexus-utils-3.0.20.jar",
"third-party/java/plexus/plexus-interpolation-1.21.jar",
"third-party/java/eden/eden.jar",
"third-party/java/eden/java-thrift-dependencies.jar",
]
RESOURCES = {
"abi_processor_classes": "build/abi_processor/classes",
"android_agent_path": "assets/android/agent.apk",
"buck_server": "bin/buck",
"buck_build_type_info": "config/build_type/LOCAL_ANT/type.txt",
"dx": "third-party/java/dx/etc/dx",
"jacoco_agent_jar": "third-party/java/jacoco/jacocoagent.jar",
"libjcocoa.dylib": "third-party/java/ObjCBridge/libjcocoa.dylib",
"logging_config_file": "config/logging.properties.st",
"native_exopackage_fake_path": "assets/android/native-exopackage-fakes.apk",
"path_to_asm_jar": "third-party/java/asm/asm-debug-all-5.0.3.jar",
"path_to_rawmanifest_py": "src/com/facebook/buck/util/versioncontrol/rawmanifest.py",
"path_to_buck_py": "src/com/facebook/buck/parser/buck.py",
"path_to_intellij_py": "src/com/facebook/buck/command/intellij.py",
"path_to_pathlib_py": "third-party/py/pathlib/pathlib.py",
"path_to_pex": "src/com/facebook/buck/python/make_pex.py",
"path_to_pywatchman": "third-party/py/pywatchman",
"path_to_scandir_py": "third-party/py/scandir/scandir.py",
"path_to_sh_binary_template": "src/com/facebook/buck/shell/sh_binary_template",
"path_to_static_content": "webserver/static",
"report_generator_jar": "build/report-generator.jar",
"testrunner_classes": "build/testrunner/classes",
}
def get_ant_env(max_heap_size_mb):
ant_env = os.environ.copy()
ant_opts = ant_env.get('ANT_OPTS', '')
if ant_opts.find('-Xmx') == -1:
# Adjust the max heap size if it's not already specified.
ant_max_heap_arg = '-Xmx{0}m'.format(max_heap_size_mb)
if ant_opts:
ant_opts += ' '
ant_opts += ant_max_heap_arg
ant_env['ANT_OPTS'] = ant_opts
return ant_env
class BuckRepo(BuckTool):
def __init__(self, buck_bin_dir, buck_project):
super(BuckRepo, self).__init__(buck_project)
self._buck_dir = self._platform_path(os.path.dirname(buck_bin_dir))
self._build_success_file = os.path.join(
self._buck_dir, "build", "successful-build")
dot_git = os.path.join(self._buck_dir, '.git')
self._is_git = os.path.exists(dot_git) and os.path.isdir(dot_git) and which('git') and \
sys.platform != 'cygwin'
self._is_buck_repo_dirty_override = os.environ.get('BUCK_REPOSITORY_DIRTY')
buck_version = buck_project.buck_version
if self._is_git and not buck_project.has_no_buck_check and buck_version:
revision = buck_version[0]
branch = buck_version[1] if len(buck_version) > 1 else None
self._checkout_and_clean(revision, branch)
self._build()
def _checkout_and_clean(self, revision, branch):
with Tracing('BuckRepo._checkout_and_clean'):
if not self._revision_exists(revision):
print(textwrap.dedent("""\
Required revision {0} is not
available in the local repository.
Buck is fetching updates from git. You can disable this by creating
a '.nobuckcheck' file in your repository, but this might lead to
strange bugs or build failures.""".format(revision)),
file=sys.stderr)
git_command = ['git', 'fetch']
git_command.extend(['--all'] if not branch else ['origin', branch])
try:
subprocess.check_call(
git_command,
stdout=sys.stderr,
cwd=self._buck_dir)
except subprocess.CalledProcessError:
raise BuckToolException(textwrap.dedent("""\
Failed to fetch Buck updates from git."""))
current_revision = self._get_git_revision()
if current_revision != revision:
print(textwrap.dedent("""\
Buck is at {0}, but should be {1}.
Buck is updating itself. To disable this, add a '.nobuckcheck'
file to your project root. In general, you should only disable
this if you are developing Buck.""".format(
current_revision, revision)),
file=sys.stderr)
try:
subprocess.check_call(
['git', 'checkout', '--quiet', revision],
cwd=self._buck_dir)
except subprocess.CalledProcessError:
raise BuckToolException(textwrap.dedent("""\
Failed to update Buck to revision {0}.""".format(revision)))
if os.path.exists(self._build_success_file):
os.remove(self._build_success_file)
ant = self._check_for_ant()
self._run_ant_clean(ant)
raise RestartBuck()
def _join_buck_dir(self, relative_path):
return os.path.join(self._buck_dir, *(relative_path.split('/')))
def _has_local_changes(self):
if not self._is_git:
return False
output = check_output(
['git', 'ls-files', '-m'],
cwd=self._buck_dir)
return bool(output.strip())
def _get_git_revision(self):
if not self._is_git:
return 'N/A'
return buck_version.get_git_revision(self._buck_dir)
def _get_git_commit_timestamp(self):
if self._is_buck_repo_dirty_override or not self._is_git:
return -1
return buck_version.get_git_revision_timestamp(self._buck_dir)
def _revision_exists(self, revision):
returncode = subprocess.call(
['git', 'cat-file', '-e', revision],
cwd=self._buck_dir)
return returncode == 0
def _check_for_ant(self):
ant = which('ant')
if not ant:
message = "You do not have ant on your $PATH. Cannot build Buck."
if sys.platform == "darwin":
message += "\nTry running 'brew install ant'."
raise BuckToolException(message)
return ant
def _print_ant_failure_and_exit(self, ant_log_path):
print(textwrap.dedent("""\
::: 'ant' failed in the buck repo at '{0}',
::: and 'buck' is not properly built. It will be unusable
::: until the error is corrected. You can check the logs
::: at {1} to figure out what broke.""".format(
self._buck_dir, ant_log_path)), file=sys.stderr)
if self._is_git:
raise BuckToolException(textwrap.dedent("""\
::: It is possible that running this command will fix it:
::: git -C "{0}" clean -xfd""".format(self._buck_dir)))
else:
raise BuckToolException(textwrap.dedent("""\
::: It is possible that running this command will fix it:
::: rm -rf "{0}"/build""".format(self._buck_dir)))
def _run_ant_clean(self, ant):
clean_log_path = os.path.join(self._buck_project.get_buck_out_log_dir(), 'ant-clean.log')
with open(clean_log_path, 'w') as clean_log:
exitcode = subprocess.call([ant, 'clean'], stdout=clean_log,
cwd=self._buck_dir, env=get_ant_env(JAVA_MAX_HEAP_SIZE_MB))
if exitcode is not 0:
self._print_ant_failure_and_exit(clean_log_path)
def _run_ant(self, ant):
ant_log_path = os.path.join(self._buck_project.get_buck_out_log_dir(), 'ant.log')
with open(ant_log_path, 'w') as ant_log:
exitcode = subprocess.call([ant], stdout=ant_log,
cwd=self._buck_dir, env=get_ant_env(JAVA_MAX_HEAP_SIZE_MB))
if exitcode is not 0:
self._print_ant_failure_and_exit(ant_log_path)
def _build(self):
with Tracing('BuckRepo._build'):
if not os.path.exists(self._build_success_file):
print(
"Buck does not appear to have been built -- building Buck!",
file=sys.stderr)
ant = self._check_for_ant()
self._run_ant_clean(ant)
self._run_ant(ant)
print("All done, continuing with build.", file=sys.stderr)
def _has_resource(self, resource):
return True
def _get_resource(self, resource, exe=False):
return self._join_buck_dir(RESOURCES[resource.name])
def _get_buck_version_uid(self):
with Tracing('BuckRepo._get_buck_version_uid'):
# Check if the developer has requested that we impersonate some other version.
fake_buck_version_file_path = os.path.join(self._buck_dir, ".fakebuckversion")
if os.path.exists(fake_buck_version_file_path):
with open(fake_buck_version_file_path) as fake_buck_version_file:
fake_buck_version = fake_buck_version_file.read().strip()
print(textwrap.dedent("""\
::: Faking buck version %s, despite your buck directory not being that version."""
% fake_buck_version),
file=sys.stderr)
return fake_buck_version
# First try to get the "clean" buck version. If it succeeds,
# return it.
clean_version = buck_version.get_clean_buck_version(
self._buck_dir,
allow_dirty=self._is_buck_repo_dirty_override == "1")
if clean_version is not None:
return clean_version
# Otherwise, if there is a .nobuckcheck file, or if there isn't
# a .buckversion file, fall back to a "dirty" version.
if (self._buck_project.has_no_buck_check or
not self._buck_project.buck_version):
return buck_version.get_dirty_buck_version(self._buck_dir)
if self._has_local_changes():
print(textwrap.dedent("""\
::: Your buck directory has local modifications, and therefore
::: builds will not be able to use a distributed cache.
::: The following files must be either reverted or committed:"""),
file=sys.stderr)
subprocess.call(
['git', 'ls-files', '-m'],
stdout=sys.stderr,
cwd=self._buck_dir)
elif os.environ.get('BUCK_CLEAN_REPO_IF_DIRTY') != 'NO':
print(textwrap.dedent("""\
::: Your local buck directory is dirty, and therefore builds will
::: not be able to use a distributed cache."""), file=sys.stderr)
if sys.stdout.isatty():
print(
"::: Do you want to clean your buck directory? [y/N]",
file=sys.stderr)
choice = raw_input().lower()
if choice == "y":
subprocess.call(
['git', 'clean', '-fd'],
stdout=sys.stderr,
cwd=self._buck_dir)
raise RestartBuck()
return buck_version.get_dirty_buck_version(self._buck_dir)
def _is_buck_production(self):
return False
def _get_extra_java_args(self):
with Tracing('BuckRepo._get_extra_java_args'):
return [
"-Dbuck.git_commit={0}".format(self._get_git_revision()),
"-Dbuck.git_commit_timestamp={0}".format(
self._get_git_commit_timestamp()),
"-Dbuck.git_dirty={0}".format(
int(self._is_buck_repo_dirty_override == "1" or
buck_version.is_dirty(self._buck_dir))),
]
def _get_bootstrap_classpath(self):
return self._join_buck_dir("build/bootstrapper/bootstrapper.jar")
def _get_java_classpath(self):
return self._pathsep.join([self._join_buck_dir(p) for p in JAVA_CLASSPATHS])
|
|
# coding: utf-8
#
# Copyright 2010-2014 Ning, Inc.
# Copyright 2014-2020 Groupon, Inc
# Copyright 2020-2021 Equinix, Inc
# Copyright 2014-2021 The Billing Project, LLC
#
# The Billing Project, LLC licenses this file to you under the Apache License, version 2.0
# (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""
Kill Bill
Kill Bill is an open-source billing and payments platform # noqa: E501
OpenAPI spec version: 0.22.22-SNAPSHOT
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class AccountTimeline(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'account': 'Account',
'bundles': 'List[Bundle]',
'invoices': 'List[Invoice]',
'payments': 'List[InvoicePayment]'
}
attribute_map = {
'account': 'account',
'bundles': 'bundles',
'invoices': 'invoices',
'payments': 'payments'
}
def __init__(self, account=None, bundles=None, invoices=None, payments=None): # noqa: E501
"""AccountTimeline - a model defined in Swagger""" # noqa: E501
self._account = None
self._bundles = None
self._invoices = None
self._payments = None
self.discriminator = None
if account is not None:
self.account = account
if bundles is not None:
self.bundles = bundles
if invoices is not None:
self.invoices = invoices
if payments is not None:
self.payments = payments
@property
def account(self):
"""Gets the account of this AccountTimeline. # noqa: E501
:return: The account of this AccountTimeline. # noqa: E501
:rtype: Account
"""
return self._account
@account.setter
def account(self, account):
"""Sets the account of this AccountTimeline.
:param account: The account of this AccountTimeline. # noqa: E501
:type: Account
"""
self._account = account
@property
def bundles(self):
"""Gets the bundles of this AccountTimeline. # noqa: E501
:return: The bundles of this AccountTimeline. # noqa: E501
:rtype: List[Bundle]
"""
return self._bundles
@bundles.setter
def bundles(self, bundles):
"""Sets the bundles of this AccountTimeline.
:param bundles: The bundles of this AccountTimeline. # noqa: E501
:type: List[Bundle]
"""
self._bundles = bundles
@property
def invoices(self):
"""Gets the invoices of this AccountTimeline. # noqa: E501
:return: The invoices of this AccountTimeline. # noqa: E501
:rtype: List[Invoice]
"""
return self._invoices
@invoices.setter
def invoices(self, invoices):
"""Sets the invoices of this AccountTimeline.
:param invoices: The invoices of this AccountTimeline. # noqa: E501
:type: List[Invoice]
"""
self._invoices = invoices
@property
def payments(self):
"""Gets the payments of this AccountTimeline. # noqa: E501
:return: The payments of this AccountTimeline. # noqa: E501
:rtype: List[InvoicePayment]
"""
return self._payments
@payments.setter
def payments(self, payments):
"""Sets the payments of this AccountTimeline.
:param payments: The payments of this AccountTimeline. # noqa: E501
:type: List[InvoicePayment]
"""
self._payments = payments
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, AccountTimeline):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
|
# coding=utf-8
# Copyright 2022 The TensorFlow GAN Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Trains a StarGAN model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import tensorflow.compat.v1 as tf
import tensorflow_gan as tfgan
from tensorflow_gan.examples.stargan import data_provider
from tensorflow_gan.examples.stargan import network
HParams = collections.namedtuple('HParams', [
'batch_size', 'patch_size', 'train_log_dir', 'generator_lr',
'discriminator_lr', 'max_number_of_steps', 'adam_beta1', 'adam_beta2',
'gen_disc_step_ratio', 'tf_master', 'ps_replicas', 'task'
])
def _define_model(images, labels):
"""Create the StarGAN Model.
Args:
images: `Tensor` or list of `Tensor` of shape (N, H, W, C).
labels: `Tensor` or list of `Tensor` of shape (N, num_domains).
Returns:
`StarGANModel` namedtuple.
"""
return tfgan.stargan_model(
generator_fn=network.generator,
discriminator_fn=network.discriminator,
input_data=images,
input_data_domain_label=labels)
def _get_lr(base_lr, max_number_of_steps):
"""Returns a learning rate `Tensor`.
Args:
base_lr: A scalar float `Tensor` or a Python number. The base learning
rate.
max_number_of_steps: A Python number. The total number of steps to train.
Returns:
A scalar float `Tensor` of learning rate which equals `base_lr` when the
global training step is less than Fmax_number_of_steps / 2, afterwards
it linearly decays to zero.
"""
global_step = tf.train.get_or_create_global_step()
lr_constant_steps = max_number_of_steps // 2
def _lr_decay():
return tf.train.polynomial_decay(
learning_rate=base_lr,
global_step=(global_step - lr_constant_steps),
decay_steps=(max_number_of_steps - lr_constant_steps),
end_learning_rate=0.0)
return tf.cond(
pred=global_step < lr_constant_steps,
true_fn=lambda: base_lr,
false_fn=_lr_decay)
def _get_optimizer(gen_lr, dis_lr, beta1, beta2):
"""Returns generator optimizer and discriminator optimizer.
Args:
gen_lr: A scalar float `Tensor` or a Python number. The Generator learning
rate.
dis_lr: A scalar float `Tensor` or a Python number. The Discriminator
learning rate.
beta1: A scalar float `Tensor` or a Python number. The beta1 parameter to
the `AdamOptimizer`.
beta2: A scalar float `Tensor` or a Python number. The beta2 parameter to
the `AdamOptimizer`.
Returns:
A tuple of generator optimizer and discriminator optimizer.
"""
gen_opt = tf.train.AdamOptimizer(
gen_lr, beta1=beta1, beta2=beta2, use_locking=True)
dis_opt = tf.train.AdamOptimizer(
dis_lr, beta1=beta1, beta2=beta2, use_locking=True)
return gen_opt, dis_opt
def _define_train_ops(model, loss, gen_lr, dis_lr, beta1, beta2,
max_number_of_steps):
"""Defines train ops that trains `stargan_model` with `stargan_loss`.
Args:
model: A `StarGANModel` namedtuple.
loss: A `StarGANLoss` namedtuple containing all losses for `stargan_model`.
gen_lr: A scalar float `Tensor` or a Python number. The Generator base
learning rate.
dis_lr: A scalar float `Tensor` or a Python number. The Discriminator base
learning rate.
beta1: A scalar float `Tensor` or a Python number. The beta1 parameter to
the `AdamOptimizer`.
beta2: A scalar float `Tensor` or a Python number. The beta2 parameter to
the `AdamOptimizer`.
max_number_of_steps: A Python number. The total number of steps to train.
Returns:
A `GANTrainOps` namedtuple.
"""
gen_lr = _get_lr(gen_lr, max_number_of_steps)
dis_lr = _get_lr(dis_lr, max_number_of_steps)
gen_opt, dis_opt = _get_optimizer(gen_lr, dis_lr, beta1, beta2)
train_ops = tfgan.gan_train_ops(
model,
loss,
generator_optimizer=gen_opt,
discriminator_optimizer=dis_opt,
summarize_gradients=True,
colocate_gradients_with_ops=True,
aggregation_method=tf.AggregationMethod.EXPERIMENTAL_ACCUMULATE_N)
tf.summary.scalar('generator_lr', gen_lr)
tf.summary.scalar('discriminator_lr', dis_lr)
return train_ops
def _define_train_step(gen_disc_step_ratio):
"""Get the training step for generator and discriminator for each GAN step.
Args:
gen_disc_step_ratio: A python number. The ratio of generator to
discriminator training steps.
Returns:
GANTrainSteps namedtuple representing the training step configuration.
"""
if gen_disc_step_ratio <= 1:
discriminator_step = int(1 / gen_disc_step_ratio)
return tfgan.GANTrainSteps(1, discriminator_step)
else:
generator_step = int(gen_disc_step_ratio)
return tfgan.GANTrainSteps(generator_step, 1)
def train(hparams):
"""Trains a StarGAN.
Args:
hparams: An HParams instance containing the hyperparameters for training.
"""
# Create the log_dir if not exist.
if not tf.io.gfile.exists(hparams.train_log_dir):
tf.io.gfile.makedirs(hparams.train_log_dir)
# Shard the model to different parameter servers.
with tf.device(tf.train.replica_device_setter(hparams.ps_replicas)):
# Create the input dataset.
with tf.name_scope('inputs'), tf.device('/cpu:0'):
images, labels = data_provider.provide_data('train', hparams.batch_size,
hparams.patch_size)
# Define the model.
with tf.name_scope('model'):
model = _define_model(images, labels)
# Add image summary.
tfgan.eval.add_stargan_image_summaries(
model, num_images=3 * hparams.batch_size, display_diffs=True)
# Define the model loss.
loss = tfgan.stargan_loss(model)
# Define the train ops.
with tf.name_scope('train_ops'):
train_ops = _define_train_ops(model, loss, hparams.generator_lr,
hparams.discriminator_lr,
hparams.adam_beta1, hparams.adam_beta2,
hparams.max_number_of_steps)
# Define the train steps.
train_steps = _define_train_step(hparams.gen_disc_step_ratio)
# Define a status message.
status_message = tf.strings.join([
'Starting train step: ',
tf.as_string(tf.train.get_or_create_global_step())
],
name='status_message')
# Train the model.
tfgan.gan_train(
train_ops,
hparams.train_log_dir,
get_hooks_fn=tfgan.get_sequential_train_hooks(train_steps),
hooks=[
tf.estimator.StopAtStepHook(num_steps=hparams.max_number_of_steps),
tf.estimator.LoggingTensorHook([status_message], every_n_iter=10)
],
master=hparams.tf_master,
is_chief=hparams.task == 0)
|
|
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import (
compat_HTTPError,
compat_str,
compat_urlparse,
)
from ..utils import (
ExtractorError,
js_to_json,
parse_duration,
parse_iso8601,
)
class ViideaIE(InfoExtractor):
_VALID_URL = r'''(?x)https?://(?:www\.)?(?:
videolectures\.net|
flexilearn\.viidea\.net|
presentations\.ocwconsortium\.org|
video\.travel-zoom\.si|
video\.pomp-forum\.si|
tv\.nil\.si|
video\.hekovnik.com|
video\.szko\.si|
kpk\.viidea\.com|
inside\.viidea\.net|
video\.kiberpipa\.org|
bvvideo\.si|
kongres\.viidea\.net|
edemokracija\.viidea\.com
)(?:/lecture)?/(?P<id>[^/]+)(?:/video/(?P<part>\d+))?/*(?:[#?].*)?$'''
_TESTS = [{
'url': 'http://videolectures.net/promogram_igor_mekjavic_eng/',
'info_dict': {
'id': '20171',
'display_id': 'promogram_igor_mekjavic_eng',
'ext': 'mp4',
'title': 'Automatics, robotics and biocybernetics',
'description': 'md5:815fc1deb6b3a2bff99de2d5325be482',
'thumbnail': r're:http://.*\.jpg',
'timestamp': 1372349289,
'upload_date': '20130627',
'duration': 565,
},
'params': {
# m3u8 download
'skip_download': True,
},
}, {
# video with invalid direct format links (HTTP 403)
'url': 'http://videolectures.net/russir2010_filippova_nlp/',
'info_dict': {
'id': '14891',
'display_id': 'russir2010_filippova_nlp',
'ext': 'flv',
'title': 'NLP at Google',
'description': 'md5:fc7a6d9bf0302d7cc0e53f7ca23747b3',
'thumbnail': r're:http://.*\.jpg',
'timestamp': 1284375600,
'upload_date': '20100913',
'duration': 5352,
},
'params': {
# rtmp download
'skip_download': True,
},
}, {
# event playlist
'url': 'http://videolectures.net/deeplearning2015_montreal/',
'info_dict': {
'id': '23181',
'title': 'Deep Learning Summer School, Montreal 2015',
'description': 'md5:0533a85e4bd918df52a01f0e1ebe87b7',
'thumbnail': r're:http://.*\.jpg',
'timestamp': 1438560000,
},
'playlist_count': 30,
}, {
# multi part lecture
'url': 'http://videolectures.net/mlss09uk_bishop_ibi/',
'info_dict': {
'id': '9737',
'display_id': 'mlss09uk_bishop_ibi',
'title': 'Introduction To Bayesian Inference',
'thumbnail': r're:http://.*\.jpg',
'timestamp': 1251622800,
},
'playlist': [{
'info_dict': {
'id': '9737_part1',
'display_id': 'mlss09uk_bishop_ibi_part1',
'ext': 'wmv',
'title': 'Introduction To Bayesian Inference (Part 1)',
'thumbnail': r're:http://.*\.jpg',
'duration': 4622,
'timestamp': 1251622800,
'upload_date': '20090830',
},
}, {
'info_dict': {
'id': '9737_part2',
'display_id': 'mlss09uk_bishop_ibi_part2',
'ext': 'wmv',
'title': 'Introduction To Bayesian Inference (Part 2)',
'thumbnail': r're:http://.*\.jpg',
'duration': 5641,
'timestamp': 1251622800,
'upload_date': '20090830',
},
}],
'playlist_count': 2,
}]
def _real_extract(self, url):
lecture_slug, explicit_part_id = re.match(self._VALID_URL, url).groups()
webpage = self._download_webpage(url, lecture_slug)
cfg = self._parse_json(self._search_regex(
[r'cfg\s*:\s*({.+?})\s*,\s*[\da-zA-Z_]+\s*:\s*\(?\s*function',
r'cfg\s*:\s*({[^}]+})'],
webpage, 'cfg'), lecture_slug, js_to_json)
lecture_id = compat_str(cfg['obj_id'])
base_url = self._proto_relative_url(cfg['livepipe'], 'http:')
try:
lecture_data = self._download_json(
'%s/site/api/lecture/%s?format=json' % (base_url, lecture_id),
lecture_id)['lecture'][0]
except ExtractorError as e:
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 403:
msg = self._parse_json(
e.cause.read().decode('utf-8'), lecture_id)
raise ExtractorError(msg['detail'], expected=True)
raise
lecture_info = {
'id': lecture_id,
'display_id': lecture_slug,
'title': lecture_data['title'],
'timestamp': parse_iso8601(lecture_data.get('time')),
'description': lecture_data.get('description_wiki'),
'thumbnail': lecture_data.get('thumb'),
}
playlist_entries = []
lecture_type = lecture_data.get('type')
parts = [compat_str(video) for video in cfg.get('videos', [])]
if parts:
multipart = len(parts) > 1
def extract_part(part_id):
smil_url = '%s/%s/video/%s/smil.xml' % (base_url, lecture_slug, part_id)
smil = self._download_smil(smil_url, lecture_id)
info = self._parse_smil(smil, smil_url, lecture_id)
self._sort_formats(info['formats'])
info['id'] = lecture_id if not multipart else '%s_part%s' % (lecture_id, part_id)
info['display_id'] = lecture_slug if not multipart else '%s_part%s' % (lecture_slug, part_id)
if multipart:
info['title'] += ' (Part %s)' % part_id
switch = smil.find('.//switch')
if switch is not None:
info['duration'] = parse_duration(switch.attrib.get('dur'))
item_info = lecture_info.copy()
item_info.update(info)
return item_info
if explicit_part_id or not multipart:
result = extract_part(explicit_part_id or parts[0])
else:
result = {
'_type': 'multi_video',
'entries': [extract_part(part) for part in parts],
}
result.update(lecture_info)
# Immediately return explicitly requested part or non event item
if explicit_part_id or lecture_type != 'evt':
return result
playlist_entries.append(result)
# It's probably a playlist
if not parts or lecture_type == 'evt':
playlist_webpage = self._download_webpage(
'%s/site/ajax/drilldown/?id=%s' % (base_url, lecture_id), lecture_id)
entries = [
self.url_result(compat_urlparse.urljoin(url, video_url), 'Viidea')
for _, video_url in re.findall(
r'<a[^>]+href=(["\'])(.+?)\1[^>]+id=["\']lec=\d+', playlist_webpage)]
playlist_entries.extend(entries)
playlist = self.playlist_result(playlist_entries, lecture_id)
playlist.update(lecture_info)
return playlist
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Driver base-classes:
(Beginning of) the contract that compute drivers must follow, and shared
types that support that contract
"""
from nova.compute import power_state
from nova import flags
from nova.openstack.common import log as logging
LOG = logging.getLogger(__name__)
FLAGS = flags.FLAGS
def block_device_info_get_root(block_device_info):
block_device_info = block_device_info or {}
return block_device_info.get('root_device_name')
def block_device_info_get_swap(block_device_info):
block_device_info = block_device_info or {}
return block_device_info.get('swap') or {'device_name': None,
'swap_size': 0}
def swap_is_usable(swap):
return swap and swap['device_name'] and swap['swap_size'] > 0
def block_device_info_get_ephemerals(block_device_info):
block_device_info = block_device_info or {}
ephemerals = block_device_info.get('ephemerals') or []
return ephemerals
def block_device_info_get_mapping(block_device_info):
block_device_info = block_device_info or {}
block_device_mapping = block_device_info.get('block_device_mapping') or []
return block_device_mapping
class ComputeDriver(object):
"""Base class for compute drivers.
The interface to this class talks in terms of 'instances' (Amazon EC2 and
internal Nova terminology), by which we mean 'running virtual machine'
(XenAPI terminology) or domain (Xen or libvirt terminology).
An instance has an ID, which is the identifier chosen by Nova to represent
the instance further up the stack. This is unfortunately also called a
'name' elsewhere. As far as this layer is concerned, 'instance ID' and
'instance name' are synonyms.
Note that the instance ID or name is not human-readable or
customer-controlled -- it's an internal ID chosen by Nova. At the
nova.virt layer, instances do not have human-readable names at all -- such
things are only known higher up the stack.
Most virtualization platforms will also have their own identity schemes,
to uniquely identify a VM or domain. These IDs must stay internal to the
platform-specific layer, and never escape the connection interface. The
platform-specific layer is responsible for keeping track of which instance
ID maps to which platform-specific ID, and vice versa.
Some methods here take an instance of nova.compute.service.Instance. This
is the data structure used by nova.compute to store details regarding an
instance, and pass them into this layer. This layer is responsible for
translating that generic data structure into terms that are specific to the
virtualization platform.
"""
def init_host(self, host):
"""Initialize anything that is necessary for the driver to function,
including catching up with currently running VM's on the given host."""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def get_info(self, instance):
"""Get the current status of an instance, by name (not ID!)
Returns a dict containing:
:state: the running state, one of the power_state codes
:max_mem: (int) the maximum memory in KBytes allowed
:mem: (int) the memory in KBytes used by the domain
:num_cpu: (int) the number of virtual CPUs for the domain
:cpu_time: (int) the CPU time used in nanoseconds
"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def get_num_instances(self):
"""Return the total number of virtual machines.
Return the number of virtual machines that the hypervisor knows
about.
.. note::
This implementation works for all drivers, but it is
not particularly efficient. Maintainers of the virt drivers are
encouraged to override this method with something more
efficient.
"""
return len(self.list_instances())
def instance_exists(self, instance_id):
"""Checks existence of an instance on the host.
:param instance_id: The ID / name of the instance to lookup
Returns True if an instance with the supplied ID exists on
the host, False otherwise.
.. note::
This implementation works for all drivers, but it is
not particularly efficient. Maintainers of the virt drivers are
encouraged to override this method with something more
efficient.
"""
return instance_id in self.list_instances()
def list_instances(self):
"""
Return the names of all the instances known to the virtualization
layer, as a list.
"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def spawn(self, context, instance, image_meta, injected_files,
admin_password, network_info=None, block_device_info=None):
"""
Create a new instance/VM/domain on the virtualization platform.
Once this successfully completes, the instance should be
running (power_state.RUNNING).
If this fails, any partial instance should be completely
cleaned up, and the virtualization platform should be in the state
that it was before this call began.
:param context: security context
:param instance: Instance object as returned by DB layer.
This function should use the data there to guide
the creation of the new instance.
:param image_meta: image object returned by nova.image.glance that
defines the image from which to boot this instance
:param injected_files: User files to inject into instance.
:param admin_password: Administrator password to set in instance.
:param network_info:
:py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info`
:param block_device_info: Information about block devices to be
attached to the instance.
"""
raise NotImplementedError()
def destroy(self, instance, network_info, block_device_info=None):
"""Destroy (shutdown and delete) the specified instance.
If the instance is not found (for example if networking failed), this
function should still succeed. It's probably a good idea to log a
warning in that case.
:param instance: Instance object as returned by DB layer.
:param network_info:
:py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info`
:param block_device_info: Information about block devices that should
be detached from the instance.
"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def reboot(self, instance, network_info, reboot_type):
"""Reboot the specified instance.
:param instance: Instance object as returned by DB layer.
:param network_info:
:py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info`
:param reboot_type: Either a HARD or SOFT reboot
"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def get_console_pool_info(self, console_type):
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def get_console_output(self, instance):
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def get_vnc_console(self, instance):
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def get_diagnostics(self, instance):
"""Return data about VM diagnostics"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def get_all_bw_usage(self, instances, start_time, stop_time=None):
"""Return bandwidth usage info for each interface on each
running VM"""
raise NotImplementedError()
def get_host_ip_addr(self):
"""
Retrieves the IP address of the dom0
"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def attach_volume(self, connection_info, instance_name, mountpoint):
"""Attach the disk to the instance at mountpoint using info"""
raise NotImplementedError()
def detach_volume(self, connection_info, instance_name, mountpoint):
"""Detach the disk attached to the instance"""
raise NotImplementedError()
def migrate_disk_and_power_off(self, context, instance, dest,
instance_type, network_info):
"""
Transfers the disk of a running instance in multiple phases, turning
off the instance before the end.
"""
raise NotImplementedError()
def snapshot(self, context, instance, image_id):
"""
Snapshots the specified instance.
:param context: security context
:param instance: Instance object as returned by DB layer.
:param image_id: Reference to a pre-created image that will
hold the snapshot.
"""
raise NotImplementedError()
def finish_migration(self, context, migration, instance, disk_info,
network_info, image_meta, resize_instance):
"""Completes a resize, turning on the migrated instance
:param network_info:
:py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info`
:param image_meta: image object returned by nova.image.glance that
defines the image from which this instance
was created
"""
raise NotImplementedError()
def confirm_migration(self, migration, instance, network_info):
"""Confirms a resize, destroying the source VM"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def finish_revert_migration(self, instance, network_info):
"""Finish reverting a resize, powering back on the instance"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def pause(self, instance):
"""Pause the specified instance."""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def unpause(self, instance):
"""Unpause paused VM instance"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def suspend(self, instance):
"""suspend the specified instance"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def resume(self, instance):
"""resume the specified instance"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def resume_state_on_host_boot(self, context, instance, network_info):
"""resume guest state when a host is booted"""
raise NotImplementedError()
def rescue(self, context, instance, network_info, image_meta,
rescue_password):
"""Rescue the specified instance"""
raise NotImplementedError()
def unrescue(self, instance, network_info):
"""Unrescue the specified instance"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def power_off(self, instance):
"""Power off the specified instance."""
raise NotImplementedError()
def power_on(self, instance):
"""Power on the specified instance"""
raise NotImplementedError()
def update_available_resource(self, ctxt, host):
"""Updates compute manager resource info on ComputeNode table.
This method is called when nova-compute launches, and
whenever admin executes "nova-manage service update_resource".
:param ctxt: security context
:param host: hostname that compute manager is currently running
"""
raise NotImplementedError()
def live_migration(self, ctxt, instance_ref, dest,
post_method, recover_method, block_migration=False):
"""Live migration of an instance to another host.
:params ctxt: security context
:params instance_ref:
nova.db.sqlalchemy.models.Instance object
instance object that is migrated.
:params dest: destination host
:params post_method:
post operation method.
expected nova.compute.manager.post_live_migration.
:params recover_method:
recovery method when any exception occurs.
expected nova.compute.manager.recover_live_migration.
:params block_migration: if true, migrate VM disk.
"""
raise NotImplementedError()
def check_can_live_migrate_destination(self, ctxt, instance_ref,
block_migration=False,
disk_over_commit=False):
"""Check if it is possible to execute live migration.
This runs checks on the destination host, and then calls
back to the source host to check the results.
:param ctxt: security context
:param instance_ref: nova.db.sqlalchemy.models.Instance
:param dest: destination host
:param block_migration: if true, prepare for block migration
:param disk_over_commit: if true, allow disk over commit
"""
raise NotImplementedError()
def check_can_live_migrate_destination_cleanup(self, ctxt,
dest_check_data):
"""Do required cleanup on dest host after check_can_live_migrate calls
:param ctxt: security context
:param dest_check_data: result of check_can_live_migrate_destination
"""
raise NotImplementedError()
def check_can_live_migrate_source(self, ctxt, instance_ref,
dest_check_data):
"""Check if it is possible to execute live migration.
This checks if the live migration can succeed, based on the
results from check_can_live_migrate_destination.
:param context: security context
:param instance_ref: nova.db.sqlalchemy.models.Instance
:param dest_check_data: result of check_can_live_migrate_destination
"""
raise NotImplementedError()
def refresh_security_group_rules(self, security_group_id):
"""This method is called after a change to security groups.
All security groups and their associated rules live in the datastore,
and calling this method should apply the updated rules to instances
running the specified security group.
An error should be raised if the operation cannot complete.
"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def refresh_security_group_members(self, security_group_id):
"""This method is called when a security group is added to an instance.
This message is sent to the virtualization drivers on hosts that are
running an instance that belongs to a security group that has a rule
that references the security group identified by `security_group_id`.
It is the responsibility of this method to make sure any rules
that authorize traffic flow with members of the security group are
updated and any new members can communicate, and any removed members
cannot.
Scenario:
* we are running on host 'H0' and we have an instance 'i-0'.
* instance 'i-0' is a member of security group 'speaks-b'
* group 'speaks-b' has an ingress rule that authorizes group 'b'
* another host 'H1' runs an instance 'i-1'
* instance 'i-1' is a member of security group 'b'
When 'i-1' launches or terminates we will receive the message
to update members of group 'b', at which time we will make
any changes needed to the rules for instance 'i-0' to allow
or deny traffic coming from 'i-1', depending on if it is being
added or removed from the group.
In this scenario, 'i-1' could just as easily have been running on our
host 'H0' and this method would still have been called. The point was
that this method isn't called on the host where instances of that
group are running (as is the case with
:py:meth:`refresh_security_group_rules`) but is called where references
are made to authorizing those instances.
An error should be raised if the operation cannot complete.
"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def refresh_provider_fw_rules(self):
"""This triggers a firewall update based on database changes.
When this is called, rules have either been added or removed from the
datastore. You can retrieve rules with
:py:meth:`nova.db.provider_fw_rule_get_all`.
Provider rules take precedence over security group rules. If an IP
would be allowed by a security group ingress rule, but blocked by
a provider rule, then packets from the IP are dropped. This includes
intra-project traffic in the case of the allow_project_net_traffic
flag for the libvirt-derived classes.
"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def reset_network(self, instance):
"""reset networking for specified instance"""
# TODO(Vek): Need to pass context in for access to auth_token
pass
def ensure_filtering_rules_for_instance(self, instance_ref, network_info):
"""Setting up filtering rules and waiting for its completion.
To migrate an instance, filtering rules to hypervisors
and firewalls are inevitable on destination host.
( Waiting only for filtering rules to hypervisor,
since filtering rules to firewall rules can be set faster).
Concretely, the below method must be called.
- setup_basic_filtering (for nova-basic, etc.)
- prepare_instance_filter(for nova-instance-instance-xxx, etc.)
to_xml may have to be called since it defines PROJNET, PROJMASK.
but libvirt migrates those value through migrateToURI(),
so , no need to be called.
Don't use thread for this method since migration should
not be started when setting-up filtering rules operations
are not completed.
:params instance_ref: nova.db.sqlalchemy.models.Instance object
"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def filter_defer_apply_on(self):
"""Defer application of IPTables rules"""
pass
def filter_defer_apply_off(self):
"""Turn off deferral of IPTables rules and apply the rules now"""
pass
def unfilter_instance(self, instance, network_info):
"""Stop filtering instance"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def set_admin_password(self, context, instance_id, new_pass=None):
"""
Set the root password on the specified instance.
The first parameter is an instance of nova.compute.service.Instance,
and so the instance is being specified as instance.name. The second
parameter is the value of the new password.
"""
raise NotImplementedError()
def inject_file(self, instance, b64_path, b64_contents):
"""
Writes a file on the specified instance.
The first parameter is an instance of nova.compute.service.Instance,
and so the instance is being specified as instance.name. The second
parameter is the base64-encoded path to which the file is to be
written on the instance; the third is the contents of the file, also
base64-encoded.
"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def change_instance_metadata(self, context, instance, diff):
"""
Applies a diff to the instance metadata.
This is an optional driver method which is used to publish
changes to the instance's metadata to the hypervisor. If the
hypervisor has no means of publishing the instance metadata to
the instance, then this method should not be implemented.
"""
pass
def inject_network_info(self, instance, nw_info):
"""inject network info for specified instance"""
# TODO(Vek): Need to pass context in for access to auth_token
pass
def poll_rebooting_instances(self, timeout):
"""Poll for rebooting instances"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def poll_rescued_instances(self, timeout):
"""Poll for rescued instances"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def host_power_action(self, host, action):
"""Reboots, shuts down or powers up the host."""
raise NotImplementedError()
def host_maintenance_mode(self, host, mode):
"""Start/Stop host maintenance window. On start, it triggers
guest VMs evacuation."""
raise NotImplementedError()
def set_host_enabled(self, host, enabled):
"""Sets the specified host's ability to accept new instances."""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def get_host_uptime(self, host):
"""Returns the result of calling "uptime" on the target host."""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def plug_vifs(self, instance, network_info):
"""Plug VIFs into networks."""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def unplug_vifs(self, instance, network_info):
"""Unplug VIFs from networks."""
raise NotImplementedError()
def update_host_status(self):
"""Refresh host stats"""
raise NotImplementedError()
def get_host_stats(self, refresh=False):
"""Return currently known host stats"""
raise NotImplementedError()
def block_stats(self, instance_name, disk_id):
"""
Return performance counters associated with the given disk_id on the
given instance_name. These are returned as [rd_req, rd_bytes, wr_req,
wr_bytes, errs], where rd indicates read, wr indicates write, req is
the total number of I/O requests made, bytes is the total number of
bytes transferred, and errs is the number of requests held up due to a
full pipeline.
All counters are long integers.
This method is optional. On some platforms (e.g. XenAPI) performance
statistics can be retrieved directly in aggregate form, without Nova
having to do the aggregation. On those platforms, this method is
unused.
Note that this function takes an instance ID.
"""
raise NotImplementedError()
def interface_stats(self, instance_name, iface_id):
"""
Return performance counters associated with the given iface_id on the
given instance_id. These are returned as [rx_bytes, rx_packets,
rx_errs, rx_drop, tx_bytes, tx_packets, tx_errs, tx_drop], where rx
indicates receive, tx indicates transmit, bytes and packets indicate
the total number of bytes or packets transferred, and errs and dropped
is the total number of packets failed / dropped.
All counters are long integers.
This method is optional. On some platforms (e.g. XenAPI) performance
statistics can be retrieved directly in aggregate form, without Nova
having to do the aggregation. On those platforms, this method is
unused.
Note that this function takes an instance ID.
"""
raise NotImplementedError()
def legacy_nwinfo(self):
"""
Indicate if the driver requires the legacy network_info format.
"""
# TODO(tr3buchet): update all subclasses and remove this
return True
def manage_image_cache(self, context):
"""
Manage the driver's local image cache.
Some drivers chose to cache images for instances on disk. This method
is an opportunity to do management of that cache which isn't directly
related to other calls into the driver. The prime example is to clean
the cache and remove images which are no longer of interest.
"""
def add_to_aggregate(self, context, aggregate, host, **kwargs):
"""Add a compute host to an aggregate."""
#NOTE(jogo) Currently only used for XenAPI-Pool
raise NotImplementedError()
def remove_from_aggregate(self, context, aggregate, host, **kwargs):
"""Remove a compute host from an aggregate."""
raise NotImplementedError()
def undo_aggregate_operation(self, context, op, aggregate_id,
host, set_error=True):
"""Undo for Resource Pools"""
raise NotImplementedError()
def get_volume_connector(self, instance):
"""Get connector information for the instance for attaching to volumes.
Connector information is a dictionary representing the ip of the
machine that will be making the connection, the name of the iscsi
initiator and the hostname of the machine as follows::
{
'ip': ip,
'initiator': initiator,
'host': hostname
}
"""
raise NotImplementedError()
|
|
# (C) Datadog, Inc. 2010-2016
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
# stdlib
import re
# 3p
from nose.plugins.attrib import attr
from mock import Mock
# project
from tests.checks.common import AgentCheckTest
from checks import AgentCheck
from tests.core.test_wmi import TestCommonWMI
MINIMAL_INSTANCE = {
'host': '.',
}
INSTANCE = {
'host': '.',
'sites': ['Default Web Site', 'Test-Website-1', 'Non Existing Website'],
}
INVALID_HOST_INSTANCE = {
'host': 'nonexistinghost'
}
@attr('windows')
@attr(requires='iis')
class IISTest(AgentCheckTest):
CHECK_NAME = 'iis'
IIS_METRICS = (
'iis.uptime',
# Network
'iis.net.bytes_sent',
'iis.net.bytes_rcvd',
'iis.net.bytes_total',
'iis.net.num_connections',
'iis.net.files_sent',
'iis.net.files_rcvd',
'iis.net.connection_attempts',
# HTTP Methods
'iis.httpd_request_method.get',
'iis.httpd_request_method.post',
'iis.httpd_request_method.head',
'iis.httpd_request_method.put',
'iis.httpd_request_method.delete',
'iis.httpd_request_method.options',
'iis.httpd_request_method.trace',
# Errors
'iis.errors.not_found',
'iis.errors.locked',
# Users
'iis.users.anon',
'iis.users.nonanon',
# Requests
'iis.requests.cgi',
'iis.requests.isapi',
)
def test_basic_check(self):
self.run_check_twice({'instances': [MINIMAL_INSTANCE]})
for metric in self.IIS_METRICS:
self.assertMetric(metric, tags=[], count=1)
self.assertServiceCheckOK('iis.site_up', tags=["site:{0}".format('Total')], count=1)
self.coverage_report()
def test_check_on_specific_websites(self):
self.run_check_twice({'instances': [INSTANCE]})
site_tags = ['Default_Web_Site', 'Test_Website_1']
for metric in self.IIS_METRICS:
for site_tag in site_tags:
self.assertMetric(metric, tags=["site:{0}".format(site_tag)], count=1)
self.assertServiceCheckOK('iis.site_up',
tags=["site:{0}".format('Default_Web_Site')], count=1)
self.assertServiceCheckOK('iis.site_up',
tags=["site:{0}".format('Test_Website_1')], count=1)
self.assertServiceCheckCritical('iis.site_up',
tags=["site:{0}".format('Non_Existing_Website')], count=1)
self.coverage_report()
def test_service_check_with_invalid_host(self):
self.run_check({'instances': [INVALID_HOST_INSTANCE]})
self.assertServiceCheckCritical('iis.site_up', tags=["site:{0}".format('Total')])
self.coverage_report()
@attr('windows')
@attr(requires='windows')
class IISTestCase(AgentCheckTest, TestCommonWMI):
CHECK_NAME = 'iis'
WIN_SERVICES_MINIMAL_CONFIG = {
'host': ".",
'tags': ["mytag1", "mytag2"]
}
WIN_SERVICES_CONFIG = {
'host': ".",
'tags': ["mytag1", "mytag2"],
'sites': ["Default Web Site", "Working site", "Failing site"]
}
IIS_METRICS = [
'iis.uptime',
# Network
'iis.net.bytes_sent',
'iis.net.bytes_rcvd',
'iis.net.bytes_total',
'iis.net.num_connections',
'iis.net.files_sent',
'iis.net.files_rcvd',
'iis.net.connection_attempts',
# HTTP Methods
'iis.httpd_request_method.get',
'iis.httpd_request_method.post',
'iis.httpd_request_method.head',
'iis.httpd_request_method.put',
'iis.httpd_request_method.delete',
'iis.httpd_request_method.options',
'iis.httpd_request_method.trace',
# Errors
'iis.errors.not_found',
'iis.errors.locked',
# Users
'iis.users.anon',
'iis.users.nonanon',
# Requests
'iis.requests.cgi',
'iis.requests.isapi',
]
def test_check(self):
"""
Returns the right metrics and service checks
"""
# Set up & run the check
config = {
'instances': [self.WIN_SERVICES_CONFIG]
}
logger = Mock()
self.run_check_twice(config, mocks={'log': logger})
# Test metrics
# ... normalize site-names
default_site_name = re.sub(r"[,\+\*\-/()\[\]{}\s]", "_", config['instances'][0]['sites'][0])
ok_site_name = re.sub(r"[,\+\*\-/()\[\]{}\s]", "_", config['instances'][0]['sites'][1])
fail_site_name = re.sub(r"[,\+\*\-/()\[\]{}\s]", "_", config['instances'][0]['sites'][2])
for site_name in [default_site_name, ok_site_name]:
for mname in self.IIS_METRICS:
self.assertMetric(mname, tags=["mytag1", "mytag2", "site:{0}".format(site_name)], count=1)
self.assertServiceCheck('iis.site_up', status=AgentCheck.OK,
tags=["site:{0}".format(site_name)], count=1)
self.assertServiceCheck('iis.site_up', status=AgentCheck.CRITICAL,
tags=["site:{0}".format(fail_site_name)], count=1)
# Check completed with no warnings
self.assertFalse(logger.warning.called)
self.coverage_report()
def test_check_2008(self):
"""
Returns the right metrics and service checks for 2008 IIS
"""
# Run check
config = {
'instances': [self.WIN_SERVICES_CONFIG]
}
config['instances'][0]['is_2008'] = True
self.run_check_twice(config)
# Test metrics
query = ("Select ServiceUptime,TotalBytesSent,TotalBytesReceived,TotalBytesTransfered,"
"CurrentConnections,TotalFilesSent,TotalFilesReceived,TotalConnectionAttemptsAllInstances,"
"TotalGetRequests,TotalPostRequests,TotalHeadRequests,TotalPutRequests,TotalDeleteRequests,"
"TotalOptionsRequests,TotalTraceRequests,TotalNotFoundErrors,TotalLockedErrors,TotalAnonymousUsers,"
"TotalNonAnonymousUsers,TotalCGIRequests,TotalISAPIExtensionRequests"
" from Win32_PerfFormattedData_W3SVC_WebService WHERE "
"( Name = 'Failing site' ) OR ( Name = 'Working site' ) OR ( Name = 'Default Web Site' )")
self.assertWMIQuery(query)
# Normalize site-names
default_site_name = re.sub(r"[,\+\*\-/()\[\]{}\s]", "_", config['instances'][0]['sites'][0])
ok_site_name = re.sub(r"[,\+\*\-/()\[\]{}\s]", "_", config['instances'][0]['sites'][1])
fail_site_name = re.sub(r"[,\+\*\-/()\[\]{}\s]", "_", config['instances'][0]['sites'][2])
for site_name in [default_site_name, ok_site_name]:
for mname in self.IIS_METRICS:
self.assertMetric(mname, tags=["mytag1", "mytag2", "site:{0}".format(site_name)], count=1)
self.assertServiceCheck('iis.site_up', status=AgentCheck.OK,
tags=["site:{0}".format(site_name)], count=1)
self.assertServiceCheck('iis.site_up', status=AgentCheck.CRITICAL,
tags=["site:{0}".format(fail_site_name)], count=1)
self.coverage_report()
def test_check_without_sites_specified(self):
"""
Returns the right metrics and service checks for the `_Total` site
"""
# Run check
config = {
'instances': [self.WIN_SERVICES_MINIMAL_CONFIG]
}
self.run_check_twice(config)
for mname in self.IIS_METRICS:
self.assertMetric(mname, tags=["mytag1", "mytag2"], count=1)
self.assertServiceCheck('iis.site_up', status=AgentCheck.OK,
tags=["site:{0}".format('Total')], count=1)
self.coverage_report()
|
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import unittest
PERF_ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
from telemetry.unittest_util import system_stub
from unittest_data import system_stub_test_module
class CloudStorageTest(unittest.TestCase):
SUCCESS_FILE_HASH = 'success'.zfill(40)
PUBLIC_FILE_HASH = 'public'.zfill(40)
PARTNER_FILE_HASH = 'partner'.zfill(40)
INTERNAL_FILE_HASH = 'internal'.zfill(40)
UPDATED_HASH = 'updated'.zfill(40)
def setUp(self):
self.cloud_storage = system_stub.CloudStorageModuleStub()
# Files in Cloud Storage.
self.remote_files = ['preset_public_file.wpr',
'preset_partner_file.wpr',
'preset_internal_file.wpr']
self.remote_paths = {
self.cloud_storage.PUBLIC_BUCKET:
{'preset_public_file.wpr':CloudStorageTest.PUBLIC_FILE_HASH},
self.cloud_storage.PARTNER_BUCKET:
{'preset_partner_file.wpr':CloudStorageTest.PARTNER_FILE_HASH},
self.cloud_storage.INTERNAL_BUCKET:
{'preset_internal_file.wpr':CloudStorageTest.INTERNAL_FILE_HASH}}
# Local data files and hashes.
self.data_files = ['/path/to/success.wpr',
'/path/to/wrong_hash.wpr',
'/path/to/preset_public_file.wpr',
'/path/to/preset_partner_file.wpr',
'/path/to/preset_internal_file.wpr']
self.local_file_hashes = {
'/path/to/success.wpr': CloudStorageTest.SUCCESS_FILE_HASH,
'/path/to/wrong_hash.wpr': CloudStorageTest.SUCCESS_FILE_HASH,
'/path/to/preset_public_file.wpr':CloudStorageTest.PUBLIC_FILE_HASH,
'/path/to/preset_partner_file.wpr':CloudStorageTest.PARTNER_FILE_HASH,
'/path/to/preset_internal_file.wpr':CloudStorageTest.INTERNAL_FILE_HASH,
}
self.cloud_storage.SetCalculatedHashesForTesting(self.local_file_hashes)
# Local hash files and their contents.
local_hash_files = {
'/path/to/success.wpr.sha1': CloudStorageTest.SUCCESS_FILE_HASH,
'/path/to/wrong_hash.wpr.sha1': 'wronghash'.zfill(40),
'/path/to/preset_public_file.wpr.sha1': CloudStorageTest.PUBLIC_FILE_HASH,
'/path/to/preset_partner_file.wpr.sha1':
CloudStorageTest.PARTNER_FILE_HASH,
'/path/to/preset_internal_file.wpr.sha1':
CloudStorageTest.INTERNAL_FILE_HASH,
}
self.cloud_storage.SetHashFileContentsForTesting(local_hash_files)
def testSetup(self):
self.assertEqual(self.local_file_hashes,
self.cloud_storage.local_file_hashes)
self.assertEqual(set(self.data_files),
set(self.cloud_storage.GetLocalDataFiles()))
self.assertEqual(self.cloud_storage.default_remote_paths,
self.cloud_storage.GetRemotePathsForTesting())
self.cloud_storage.SetRemotePathsForTesting(self.remote_paths)
self.assertEqual(self.remote_paths,
self.cloud_storage.GetRemotePathsForTesting())
def testExistsEmptyCloudStorage(self):
# Test empty remote files dictionary.
self.assertFalse(self.cloud_storage.Exists(self.cloud_storage.PUBLIC_BUCKET,
'preset_public_file.wpr'))
self.assertFalse(self.cloud_storage.Exists(
self.cloud_storage.PARTNER_BUCKET, 'preset_partner_file.wpr'))
self.assertFalse(self.cloud_storage.Exists(
self.cloud_storage.INTERNAL_BUCKET, 'preset_internal_file.wpr'))
def testExistsNonEmptyCloudStorage(self):
# Test non-empty remote files dictionary.
self.cloud_storage.SetRemotePathsForTesting(self.remote_paths)
self.assertTrue(self.cloud_storage.Exists(self.cloud_storage.PUBLIC_BUCKET,
'preset_public_file.wpr'))
self.assertTrue(self.cloud_storage.Exists(self.cloud_storage.PARTNER_BUCKET,
'preset_partner_file.wpr'))
self.assertTrue(self.cloud_storage.Exists(
self.cloud_storage.INTERNAL_BUCKET, 'preset_internal_file.wpr'))
self.assertFalse(self.cloud_storage.Exists(self.cloud_storage.PUBLIC_BUCKET,
'fake_file'))
self.assertFalse(self.cloud_storage.Exists(
self.cloud_storage.PARTNER_BUCKET, 'fake_file'))
self.assertFalse(self.cloud_storage.Exists(
self.cloud_storage.INTERNAL_BUCKET, 'fake_file'))
# Reset state.
self.cloud_storage.SetRemotePathsForTesting()
def testNonEmptyInsertAndExistsPublic(self):
# Test non-empty remote files dictionary.
self.cloud_storage.SetRemotePathsForTesting(self.remote_paths)
self.assertFalse(self.cloud_storage.Exists(self.cloud_storage.PUBLIC_BUCKET,
'success.wpr'))
self.cloud_storage.Insert(self.cloud_storage.PUBLIC_BUCKET, 'success.wpr',
'/path/to/success.wpr')
self.assertTrue(self.cloud_storage.Exists(self.cloud_storage.PUBLIC_BUCKET,
'success.wpr'))
# Reset state.
self.cloud_storage.SetRemotePathsForTesting()
def testEmptyInsertAndExistsPublic(self):
# Test empty remote files dictionary.
self.assertFalse(self.cloud_storage.Exists(self.cloud_storage.PUBLIC_BUCKET,
'success.wpr'))
self.cloud_storage.Insert(self.cloud_storage.PUBLIC_BUCKET, 'success.wpr',
'/path/to/success.wpr')
self.assertTrue(self.cloud_storage.Exists(self.cloud_storage.PUBLIC_BUCKET,
'success.wpr'))
def testEmptyInsertAndGet(self):
self.assertRaises(self.cloud_storage.NotFoundError, self.cloud_storage.Get,
self.cloud_storage.PUBLIC_BUCKET, 'success.wpr',
'/path/to/success.wpr')
self.cloud_storage.Insert(self.cloud_storage.PUBLIC_BUCKET, 'success.wpr',
'/path/to/success.wpr')
self.assertTrue(self.cloud_storage.Exists(self.cloud_storage.PUBLIC_BUCKET,
'success.wpr'))
self.assertEqual(CloudStorageTest.SUCCESS_FILE_HASH,
self.cloud_storage.Get(self.cloud_storage.PUBLIC_BUCKET, 'success.wpr',
'/path/to/success.wpr'))
def testNonEmptyInsertAndGet(self):
self.cloud_storage.SetRemotePathsForTesting(self.remote_paths)
self.assertRaises(self.cloud_storage.NotFoundError, self.cloud_storage.Get,
self.cloud_storage.PUBLIC_BUCKET, 'success.wpr',
'/path/to/success.wpr')
self.cloud_storage.Insert(self.cloud_storage.PUBLIC_BUCKET, 'success.wpr',
'/path/to/success.wpr')
self.assertTrue(self.cloud_storage.Exists(self.cloud_storage.PUBLIC_BUCKET,
'success.wpr'))
self.assertEqual(CloudStorageTest.SUCCESS_FILE_HASH,
self.cloud_storage.Get(self.cloud_storage.PUBLIC_BUCKET,
'success.wpr',
'/path/to/success.wpr'))
# Reset state.
self.cloud_storage.SetRemotePathsForTesting()
def testGetIfChanged(self):
self.cloud_storage.SetRemotePathsForTesting(self.remote_paths)
self.assertRaises(self.cloud_storage.NotFoundError, self.cloud_storage.Get,
self.cloud_storage.PUBLIC_BUCKET, 'success.wpr',
'/path/to/success.wpr')
self.assertFalse(self.cloud_storage.GetIfChanged(
'/path/to/preset_public_file.wpr', self.cloud_storage.PUBLIC_BUCKET))
self.cloud_storage.ChangeRemoteHashForTesting(
self.cloud_storage.PUBLIC_BUCKET, 'preset_public_file.wpr',
CloudStorageTest.UPDATED_HASH)
self.assertTrue(self.cloud_storage.GetIfChanged(
'/path/to/preset_public_file.wpr', self.cloud_storage.PUBLIC_BUCKET))
self.assertFalse(self.cloud_storage.GetIfChanged(
'/path/to/preset_public_file.wpr', self.cloud_storage.PUBLIC_BUCKET))
# Reset state.
self.cloud_storage.SetRemotePathsForTesting()
def testList(self):
self.assertEqual([],
self.cloud_storage.List(self.cloud_storage.PUBLIC_BUCKET))
self.cloud_storage.SetRemotePathsForTesting(self.remote_paths)
self.assertEqual(['preset_public_file.wpr'],
self.cloud_storage.List(self.cloud_storage.PUBLIC_BUCKET))
# Reset state.
self.cloud_storage.SetRemotePathsForTesting()
def testPermissionError(self):
self.cloud_storage.SetRemotePathsForTesting(self.remote_paths)
self.cloud_storage.SetPermissionLevelForTesting(
self.cloud_storage.PUBLIC_PERMISSION)
self.assertRaises(
self.cloud_storage.PermissionError, self.cloud_storage.Get,
self.cloud_storage.INTERNAL_BUCKET, 'preset_internal_file.wpr',
'/path/to/preset_internal_file.wpr')
self.assertRaises(
self.cloud_storage.PermissionError, self.cloud_storage.GetIfChanged,
'/path/to/preset_internal_file.wpr', self.cloud_storage.INTERNAL_BUCKET)
self.assertRaises(
self.cloud_storage.PermissionError, self.cloud_storage.List,
self.cloud_storage.INTERNAL_BUCKET)
self.assertRaises(
self.cloud_storage.PermissionError, self.cloud_storage.Exists,
self.cloud_storage.INTERNAL_BUCKET, 'preset_internal_file.wpr')
self.assertRaises(
self.cloud_storage.PermissionError, self.cloud_storage.Insert,
self.cloud_storage.INTERNAL_BUCKET, 'success.wpr', '/path/to/success.wpr')
# Reset state.
self.cloud_storage.SetRemotePathsForTesting()
def testCredentialsError(self):
self.cloud_storage.SetRemotePathsForTesting(self.remote_paths)
self.cloud_storage.SetPermissionLevelForTesting(
self.cloud_storage.CREDENTIALS_ERROR_PERMISSION)
self.assertRaises(
self.cloud_storage.CredentialsError, self.cloud_storage.Get,
self.cloud_storage.INTERNAL_BUCKET, 'preset_internal_file.wpr',
'/path/to/preset_internal_file.wpr')
self.assertRaises(
self.cloud_storage.CredentialsError, self.cloud_storage.GetIfChanged,
self.cloud_storage.INTERNAL_BUCKET, '/path/to/preset_internal_file.wpr')
self.assertRaises(
self.cloud_storage.CredentialsError, self.cloud_storage.List,
self.cloud_storage.INTERNAL_BUCKET)
self.assertRaises(
self.cloud_storage.CredentialsError, self.cloud_storage.Exists,
self.cloud_storage.INTERNAL_BUCKET, 'preset_internal_file.wpr')
self.assertRaises(
self.cloud_storage.CredentialsError, self.cloud_storage.Insert,
self.cloud_storage.INTERNAL_BUCKET, 'success.wpr',
'/path/to/success.wpr')
# Reset state.
self.cloud_storage.SetRemotePathsForTesting()
def testOpenRestoresCorrectly(self):
file_path = os.path.realpath(__file__)
stubs = system_stub.Override(system_stub_test_module, ['open'])
stubs.open.files = {file_path:'contents'}
f = system_stub_test_module.SystemStubTest.TestOpen(file_path)
self.assertEqual(type(f), system_stub.OpenFunctionStub.FileStub)
stubs.open.files = {}
stubs.Restore()
# This will throw an error if the open stub wasn't restored correctly.
f = system_stub_test_module.SystemStubTest.TestOpen(file_path)
self.assertEqual(type(f), file)
|
|
# -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import copy
from collections.abc import MappingView
from types import MappingProxyType
import numpy as np
from astropy import units as u
from astropy.utils.state import ScienceState
from astropy.utils.decorators import format_doc, classproperty, deprecated
from astropy.coordinates.angles import Angle
from astropy.coordinates.matrix_utilities import rotation_matrix, matrix_product, matrix_transpose
from astropy.coordinates import representation as r
from astropy.coordinates.baseframe import (BaseCoordinateFrame,
frame_transform_graph,
base_doc)
from astropy.coordinates.attributes import (CoordinateAttribute,
QuantityAttribute,
DifferentialAttribute)
from astropy.coordinates.transformations import AffineTransform
from astropy.coordinates.errors import ConvertError
from .icrs import ICRS
__all__ = ['Galactocentric']
# Measured by minimizing the difference between a plane of coordinates along
# l=0, b=[-90,90] and the Galactocentric x-z plane
# This is not used directly, but accessed via `get_roll0`. We define it here to
# prevent having to create new Angle objects every time `get_roll0` is called.
_ROLL0 = Angle(58.5986320306*u.degree)
class _StateProxy(MappingView):
"""
`~collections.abc.MappingView` with a read-only ``getitem`` through
`~types.MappingProxyType`.
"""
def __init__(self, mapping):
super().__init__(mapping)
self._mappingproxy = MappingProxyType(self._mapping) # read-only
def __getitem__(self, key):
"""Read-only ``getitem``."""
return self._mappingproxy[key]
def __deepcopy__(self, memo):
return copy.deepcopy(self._mapping, memo=memo)
class galactocentric_frame_defaults(ScienceState):
"""This class controls the global setting of default values for the frame
attributes in the `~astropy.coordinates.Galactocentric` frame, which may be
updated in future versions of ``astropy``. Note that when using
`~astropy.coordinates.Galactocentric`, changing values here will not affect
any attributes that are set explicitly by passing values in to the
`~astropy.coordinates.Galactocentric` initializer. Modifying these defaults
will only affect the frame attribute values when using the frame as, e.g.,
``Galactocentric`` or ``Galactocentric()`` with no explicit arguments.
This class controls the parameter settings by specifying a string name,
with the following pre-specified options:
- 'pre-v4.0': The current default value, which sets the default frame
attribute values to their original (pre-astropy-v4.0) values.
- 'v4.0': The attribute values as updated in Astropy version 4.0.
- 'latest': An alias of the most recent parameter set (currently: 'v4.0')
Alternatively, user-defined parameter settings may be registered, with
:meth:`~astropy.coordinates.galactocentric_frame_defaults.register`,
and used identically as pre-specified parameter sets. At minimum,
registrations must have unique names and a dictionary of parameters
with keys "galcen_coord", "galcen_distance", "galcen_v_sun", "z_sun",
"roll". See examples below.
This class also tracks the references for all parameter values in the
attribute ``references``, as well as any further information the registry.
The pre-specified options can be extended to include similar
state information as user-defined parameter settings -- for example, to add
parameter uncertainties.
The preferred method for getting a parameter set and metadata, by name, is
:meth:`~galactocentric_frame_defaults.get_from_registry` since
it ensures the immutability of the registry.
See :ref:`astropy:astropy-coordinates-galactocentric-defaults` for more
information.
Examples
--------
The default `~astropy.coordinates.Galactocentric` frame parameters can be
modified globally::
>>> from astropy.coordinates import galactocentric_frame_defaults
>>> _ = galactocentric_frame_defaults.set('v4.0') # doctest: +SKIP
>>> Galactocentric() # doctest: +SKIP
<Galactocentric Frame (galcen_coord=<ICRS Coordinate: (ra, dec) in deg
(266.4051, -28.936175)>, galcen_distance=8.122 kpc, galcen_v_sun=(12.9, 245.6, 7.78) km / s, z_sun=20.8 pc, roll=0.0 deg)>
>>> _ = galactocentric_frame_defaults.set('pre-v4.0') # doctest: +SKIP
>>> Galactocentric() # doctest: +SKIP
<Galactocentric Frame (galcen_coord=<ICRS Coordinate: (ra, dec) in deg
(266.4051, -28.936175)>, galcen_distance=8.3 kpc, galcen_v_sun=(11.1, 232.24, 7.25) km / s, z_sun=27.0 pc, roll=0.0 deg)>
The default parameters can also be updated by using this class as a context
manager::
>>> with galactocentric_frame_defaults.set('pre-v4.0'):
... print(Galactocentric()) # doctest: +FLOAT_CMP
<Galactocentric Frame (galcen_coord=<ICRS Coordinate: (ra, dec) in deg
(266.4051, -28.936175)>, galcen_distance=8.3 kpc, galcen_v_sun=(11.1, 232.24, 7.25) km / s, z_sun=27.0 pc, roll=0.0 deg)>
Again, changing the default parameter values will not affect frame
attributes that are explicitly specified::
>>> import astropy.units as u
>>> with galactocentric_frame_defaults.set('pre-v4.0'):
... print(Galactocentric(galcen_distance=8.0*u.kpc)) # doctest: +FLOAT_CMP
<Galactocentric Frame (galcen_coord=<ICRS Coordinate: (ra, dec) in deg
(266.4051, -28.936175)>, galcen_distance=8.0 kpc, galcen_v_sun=(11.1, 232.24, 7.25) km / s, z_sun=27.0 pc, roll=0.0 deg)>
Additional parameter sets may be registered, for instance to use the
Dehnen & Binney (1998) measurements of the solar motion. We can also
add metadata, such as the 1-sigma errors. In this example we will modify
the required key "parameters", change the recommended key "references" to
match "parameters", and add the extra key "error" (any key can be added)::
>>> state = galactocentric_frame_defaults.get_from_registry("v4.0")
>>> state["parameters"]["galcen_v_sun"] = (10.00, 225.25, 7.17) * (u.km / u.s)
>>> state["references"]["galcen_v_sun"] = "https://ui.adsabs.harvard.edu/full/1998MNRAS.298..387D"
>>> state["error"] = {"galcen_v_sun": (0.36, 0.62, 0.38) * (u.km / u.s)}
>>> galactocentric_frame_defaults.register(name="DB1998", **state)
Just as in the previous examples, the new parameter set can be retrieved with::
>>> state = galactocentric_frame_defaults.get_from_registry("DB1998")
>>> print(state["error"]["galcen_v_sun"]) # doctest: +FLOAT_CMP
[0.36 0.62 0.38] km / s
"""
_latest_value = 'v4.0'
_value = None
_references = None
_state = dict() # all other data
# Note: _StateProxy() produces read-only view of enclosed mapping.
_registry = {
"v4.0": {
"parameters": _StateProxy(
{
"galcen_coord": ICRS(
ra=266.4051 * u.degree, dec=-28.936175 * u.degree
),
"galcen_distance": 8.122 * u.kpc,
"galcen_v_sun": r.CartesianDifferential(
[12.9, 245.6, 7.78] * (u.km / u.s)
),
"z_sun": 20.8 * u.pc,
"roll": 0 * u.deg,
}
),
"references": _StateProxy(
{
"galcen_coord": "https://ui.adsabs.harvard.edu/abs/2004ApJ...616..872R",
"galcen_distance": "https://ui.adsabs.harvard.edu/abs/2018A%26A...615L..15G",
"galcen_v_sun": [
"https://ui.adsabs.harvard.edu/abs/2018RNAAS...2..210D",
"https://ui.adsabs.harvard.edu/abs/2018A%26A...615L..15G",
"https://ui.adsabs.harvard.edu/abs/2004ApJ...616..872R",
],
"z_sun": "https://ui.adsabs.harvard.edu/abs/2019MNRAS.482.1417B",
"roll": None,
}
),
},
"pre-v4.0": {
"parameters": _StateProxy(
{
"galcen_coord": ICRS(
ra=266.4051 * u.degree, dec=-28.936175 * u.degree
),
"galcen_distance": 8.3 * u.kpc,
"galcen_v_sun": r.CartesianDifferential(
[11.1, 220 + 12.24, 7.25] * (u.km / u.s)
),
"z_sun": 27.0 * u.pc,
"roll": 0 * u.deg,
}
),
"references": _StateProxy(
{
"galcen_coord": "https://ui.adsabs.harvard.edu/abs/2004ApJ...616..872R",
"galcen_distance": "https://ui.adsabs.harvard.edu/#abs/2009ApJ...692.1075G",
"galcen_v_sun": [
"https://ui.adsabs.harvard.edu/#abs/2010MNRAS.403.1829S",
"https://ui.adsabs.harvard.edu/#abs/2015ApJS..216...29B",
],
"z_sun": "https://ui.adsabs.harvard.edu/#abs/2001ApJ...553..184C",
"roll": None,
}
),
},
}
@classproperty # read-only
def parameters(cls):
return cls._value
@classproperty # read-only
def references(cls):
return cls._references
@classmethod
def get_from_registry(cls, name: str):
"""
Return Galactocentric solar parameters and metadata given string names
for the parameter sets. This method ensures the returned state is a
mutable copy, so any changes made do not affect the registry state.
Returns
-------
state : dict
Copy of the registry for the string name.
Should contain, at minimum:
- "parameters": dict
Galactocentric solar parameters
- "references" : Dict[str, Union[str, Sequence[str]]]
References for "parameters".
Fields are str or sequence of str.
Raises
------
KeyError
If invalid string input to registry
to retrieve solar parameters for Galactocentric frame.
"""
# Resolve the meaning of 'latest': latest parameter set is from v4.0
# - update this as newer parameter choices are added
if name == 'latest':
name = cls._latest_value
# Get the state from the registry.
# Copy to ensure registry is immutable to modifications of "_value".
# Raises KeyError if `name` is invalid string input to registry
# to retrieve solar parameters for Galactocentric frame.
state = copy.deepcopy(cls._registry[name]) # ensure mutable
return state
@deprecated("v4.2", alternative="`get_from_registry`")
@classmethod
def get_solar_params_from_string(cls, arg):
"""
Return Galactocentric solar parameters given string names
for the parameter sets.
Returns
-------
parameters : dict
Copy of Galactocentric solar parameters from registry
Raises
------
KeyError
If invalid string input to registry
to retrieve solar parameters for Galactocentric frame.
"""
return cls.get_from_registry(arg)["parameters"]
@classmethod
def validate(cls, value):
if value is None:
value = cls._latest_value
if isinstance(value, str):
state = cls.get_from_registry(value)
cls._references = state["references"]
cls._state = state
parameters = state["parameters"]
elif isinstance(value, dict):
parameters = value
elif isinstance(value, Galactocentric):
# turn the frame instance into a dict of frame attributes
parameters = dict()
for k in value.frame_attributes:
parameters[k] = getattr(value, k)
cls._references = value.frame_attribute_references.copy()
cls._state = dict(parameters=parameters,
references=cls._references)
else:
raise ValueError("Invalid input to retrieve solar parameters for "
"Galactocentric frame: input must be a string, "
"dict, or Galactocentric instance")
return parameters
@classmethod
def register(cls, name: str, parameters: dict, references=None,
**meta: dict):
"""Register a set of parameters.
Parameters
----------
name : str
The registration name for the parameter and metadata set.
parameters : dict
The solar parameters for Galactocentric frame.
references : dict or None, optional
References for contents of `parameters`.
None becomes empty dict.
**meta : dict, optional
Any other properties to register.
"""
# check on contents of `parameters`
must_have = {"galcen_coord", "galcen_distance", "galcen_v_sun",
"z_sun", "roll"}
missing = must_have.difference(parameters)
if missing:
raise ValueError(f"Missing parameters: {missing}")
references = references or {} # None -> {}
state = dict(parameters=parameters, references=references)
state.update(meta) # meta never has keys "parameters" or "references"
cls._registry[name] = state
doc_components = """
x : `~astropy.units.Quantity`, optional
Cartesian, Galactocentric :math:`x` position component.
y : `~astropy.units.Quantity`, optional
Cartesian, Galactocentric :math:`y` position component.
z : `~astropy.units.Quantity`, optional
Cartesian, Galactocentric :math:`z` position component.
v_x : `~astropy.units.Quantity`, optional
Cartesian, Galactocentric :math:`v_x` velocity component.
v_y : `~astropy.units.Quantity`, optional
Cartesian, Galactocentric :math:`v_y` velocity component.
v_z : `~astropy.units.Quantity`, optional
Cartesian, Galactocentric :math:`v_z` velocity component.
"""
doc_footer = """
Other parameters
----------------
galcen_coord : `ICRS`, optional, keyword-only
The ICRS coordinates of the Galactic center.
galcen_distance : `~astropy.units.Quantity`, optional, keyword-only
The distance from the sun to the Galactic center.
galcen_v_sun : `~astropy.coordinates.representation.CartesianDifferential`, `~astropy.units.Quantity` ['speed'], optional, keyword-only
The velocity of the sun *in the Galactocentric frame* as Cartesian
velocity components.
z_sun : `~astropy.units.Quantity` ['length'], optional, keyword-only
The distance from the sun to the Galactic midplane.
roll : `~astropy.coordinates.Angle`, optional, keyword-only
The angle to rotate about the final x-axis, relative to the
orientation for Galactic. For example, if this roll angle is 0,
the final x-z plane will align with the Galactic coordinates x-z
plane. Unless you really know what this means, you probably should
not change this!
Examples
--------
To transform to the Galactocentric frame with the default
frame attributes, pass the uninstantiated class name to the
``transform_to()`` method of a `~astropy.coordinates.SkyCoord` object::
>>> import astropy.units as u
>>> import astropy.coordinates as coord
>>> c = coord.SkyCoord(ra=[158.3122, 24.5] * u.degree,
... dec=[-17.3, 81.52] * u.degree,
... distance=[11.5, 24.12] * u.kpc,
... frame='icrs')
>>> c.transform_to(coord.Galactocentric) # doctest: +FLOAT_CMP
<SkyCoord (Galactocentric: galcen_coord=<ICRS Coordinate: (ra, dec) in deg
(266.4051, -28.936175)>, galcen_distance=8.122 kpc, galcen_v_sun=(12.9, 245.6, 7.78) km / s, z_sun=20.8 pc, roll=0.0 deg): (x, y, z) in kpc
[( -9.43489286, -9.40062188, 6.51345359),
(-21.11044918, 18.76334013, 7.83175149)]>
To specify a custom set of parameters, you have to include extra keyword
arguments when initializing the Galactocentric frame object::
>>> c.transform_to(coord.Galactocentric(galcen_distance=8.1*u.kpc)) # doctest: +FLOAT_CMP
<SkyCoord (Galactocentric: galcen_coord=<ICRS Coordinate: (ra, dec) in deg
(266.4051, -28.936175)>, galcen_distance=8.1 kpc, galcen_v_sun=(12.9, 245.6, 7.78) km / s, z_sun=20.8 pc, roll=0.0 deg): (x, y, z) in kpc
[( -9.41284763, -9.40062188, 6.51346272),
(-21.08839478, 18.76334013, 7.83184184)]>
Similarly, transforming from the Galactocentric frame to another coordinate frame::
>>> c = coord.SkyCoord(x=[-8.3, 4.5] * u.kpc,
... y=[0., 81.52] * u.kpc,
... z=[0.027, 24.12] * u.kpc,
... frame=coord.Galactocentric)
>>> c.transform_to(coord.ICRS) # doctest: +FLOAT_CMP
<SkyCoord (ICRS): (ra, dec, distance) in (deg, deg, kpc)
[( 88.22423301, 29.88672864, 0.17813456),
(289.72864549, 49.9865043 , 85.93949064)]>
Or, with custom specification of the Galactic center::
>>> c = coord.SkyCoord(x=[-8.0, 4.5] * u.kpc,
... y=[0., 81.52] * u.kpc,
... z=[21.0, 24120.0] * u.pc,
... frame=coord.Galactocentric,
... z_sun=21 * u.pc, galcen_distance=8. * u.kpc)
>>> c.transform_to(coord.ICRS) # doctest: +FLOAT_CMP
<SkyCoord (ICRS): (ra, dec, distance) in (deg, deg, kpc)
[( 86.2585249 , 28.85773187, 2.75625475e-05),
(289.77285255, 50.06290457, 8.59216010e+01)]>
"""
@format_doc(base_doc, components=doc_components, footer=doc_footer)
class Galactocentric(BaseCoordinateFrame):
r"""
A coordinate or frame in the Galactocentric system.
This frame allows specifying the Sun-Galactic center distance, the height of
the Sun above the Galactic midplane, and the solar motion relative to the
Galactic center. However, as there is no modern standard definition of a
Galactocentric reference frame, it is important to pay attention to the
default values used in this class if precision is important in your code.
The default values of the parameters of this frame are taken from the
original definition of the frame in 2014. As such, the defaults are somewhat
out of date relative to recent measurements made possible by, e.g., Gaia.
The defaults can, however, be changed at runtime by setting the parameter
set name in `~astropy.coordinates.galactocentric_frame_defaults`.
The current default parameter set is ``"pre-v4.0"``, indicating that the
parameters were adopted before ``astropy`` version 4.0. A regularly-updated
parameter set can instead be used by setting
``galactocentric_frame_defaults.set ('latest')``, and other parameter set
names may be added in future versions. To find out the scientific papers
that the current default parameters are derived from, use
``galcen.frame_attribute_references`` (where ``galcen`` is an instance of
this frame), which will update even if the default parameter set is changed.
The position of the Sun is assumed to be on the x axis of the final,
right-handed system. That is, the x axis points from the position of
the Sun projected to the Galactic midplane to the Galactic center --
roughly towards :math:`(l,b) = (0^\circ,0^\circ)`. For the default
transformation (:math:`{\rm roll}=0^\circ`), the y axis points roughly
towards Galactic longitude :math:`l=90^\circ`, and the z axis points
roughly towards the North Galactic Pole (:math:`b=90^\circ`).
For a more detailed look at the math behind this transformation, see
the document :ref:`astropy:coordinates-galactocentric`.
The frame attributes are listed under **Other Parameters**.
"""
default_representation = r.CartesianRepresentation
default_differential = r.CartesianDifferential
# frame attributes
galcen_coord = CoordinateAttribute(frame=ICRS)
galcen_distance = QuantityAttribute(unit=u.kpc)
galcen_v_sun = DifferentialAttribute(
allowed_classes=[r.CartesianDifferential])
z_sun = QuantityAttribute(unit=u.pc)
roll = QuantityAttribute(unit=u.deg)
def __init__(self, *args, **kwargs):
# Set default frame attribute values based on the ScienceState instance
# for the solar parameters defined above
default_params = galactocentric_frame_defaults.get()
self.frame_attribute_references = \
galactocentric_frame_defaults.references.copy()
for k in default_params:
if k in kwargs:
# If a frame attribute is set by the user, remove its reference
self.frame_attribute_references.pop(k, None)
# Keep the frame attribute if it is set by the user, otherwise use
# the default value
kwargs[k] = kwargs.get(k, default_params[k])
super().__init__(*args, **kwargs)
@classmethod
def get_roll0(cls):
"""
The additional roll angle (about the final x axis) necessary to align
the final z axis to match the Galactic yz-plane. Setting the ``roll``
frame attribute to -this method's return value removes this rotation,
allowing the use of the `Galactocentric` frame in more general contexts.
"""
# note that the actual value is defined at the module level. We make at
# a property here because this module isn't actually part of the public
# API, so it's better for it to be accessible from Galactocentric
return _ROLL0
# ICRS to/from Galactocentric ----------------------->
def get_matrix_vectors(galactocentric_frame, inverse=False):
"""
Use the ``inverse`` argument to get the inverse transformation, matrix and
offsets to go from Galactocentric to ICRS.
"""
# shorthand
gcf = galactocentric_frame
# rotation matrix to align x(ICRS) with the vector to the Galactic center
mat1 = rotation_matrix(-gcf.galcen_coord.dec, 'y')
mat2 = rotation_matrix(gcf.galcen_coord.ra, 'z')
# extra roll away from the Galactic x-z plane
mat0 = rotation_matrix(gcf.get_roll0() - gcf.roll, 'x')
# construct transformation matrix and use it
R = matrix_product(mat0, mat1, mat2)
# Now need to translate by Sun-Galactic center distance around x' and
# rotate about y' to account for tilt due to Sun's height above the plane
translation = r.CartesianRepresentation(gcf.galcen_distance * [1., 0., 0.])
z_d = gcf.z_sun / gcf.galcen_distance
H = rotation_matrix(-np.arcsin(z_d), 'y')
# compute total matrices
A = matrix_product(H, R)
# Now we re-align the translation vector to account for the Sun's height
# above the midplane
offset = -translation.transform(H)
if inverse:
# the inverse of a rotation matrix is a transpose, which is much faster
# and more stable to compute
A = matrix_transpose(A)
offset = (-offset).transform(A)
offset_v = r.CartesianDifferential.from_cartesian(
(-gcf.galcen_v_sun).to_cartesian().transform(A))
offset = offset.with_differentials(offset_v)
else:
offset = offset.with_differentials(gcf.galcen_v_sun)
return A, offset
def _check_coord_repr_diff_types(c):
if isinstance(c.data, r.UnitSphericalRepresentation):
raise ConvertError("Transforming to/from a Galactocentric frame "
"requires a 3D coordinate, e.g. (angle, angle, "
"distance) or (x, y, z).")
if ('s' in c.data.differentials and
isinstance(c.data.differentials['s'],
(r.UnitSphericalDifferential,
r.UnitSphericalCosLatDifferential,
r.RadialDifferential))):
raise ConvertError("Transforming to/from a Galactocentric frame "
"requires a 3D velocity, e.g., proper motion "
"components and radial velocity.")
@frame_transform_graph.transform(AffineTransform, ICRS, Galactocentric)
def icrs_to_galactocentric(icrs_coord, galactocentric_frame):
_check_coord_repr_diff_types(icrs_coord)
return get_matrix_vectors(galactocentric_frame)
@frame_transform_graph.transform(AffineTransform, Galactocentric, ICRS)
def galactocentric_to_icrs(galactocentric_coord, icrs_frame):
_check_coord_repr_diff_types(galactocentric_coord)
return get_matrix_vectors(galactocentric_coord, inverse=True)
# Create loopback transformation
frame_transform_graph._add_merged_transform(Galactocentric, ICRS, Galactocentric)
|
|
"""Copyright 2008 Orbitz WorldWide
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License."""
import csv
import math
import pytz
from datetime import datetime
from time import time
from random import shuffle
from httplib import CannotSendRequest
from urllib import urlencode
from urlparse import urlsplit, urlunsplit
from cgi import parse_qs
from cStringIO import StringIO
try:
import cPickle as pickle
except ImportError:
import pickle
from graphite.compat import HttpResponse
from graphite.util import getProfileByUsername, json, unpickle
from graphite.remote_storage import HTTPConnectionWithTimeout
from graphite.logger import log
from graphite.render.evaluator import evaluateTarget
from graphite.render.attime import parseATTime
from graphite.render.functions import PieFunctions
from graphite.render.hashing import hashRequest, hashData
from graphite.render.glyph import GraphTypes
from django.http import HttpResponseServerError, HttpResponseRedirect
from django.template import Context, loader
from django.core.cache import cache
from django.core.exceptions import ObjectDoesNotExist
from django.conf import settings
from django.utils.cache import add_never_cache_headers, patch_response_headers
def renderView(request):
start = time()
(graphOptions, requestOptions) = parseOptions(request)
useCache = 'noCache' not in requestOptions
cacheTimeout = requestOptions['cacheTimeout']
requestContext = {
'startTime' : requestOptions['startTime'],
'endTime' : requestOptions['endTime'],
'localOnly' : requestOptions['localOnly'],
'data' : []
}
data = requestContext['data']
# First we check the request cache
if useCache:
requestKey = hashRequest(request)
cachedResponse = cache.get(requestKey)
if cachedResponse:
log.cache('Request-Cache hit [%s]' % requestKey)
log.rendering('Returned cached response in %.6f' % (time() - start))
return cachedResponse
else:
log.cache('Request-Cache miss [%s]' % requestKey)
# Now we prepare the requested data
if requestOptions['graphType'] == 'pie':
for target in requestOptions['targets']:
if target.find(':') >= 0:
try:
name,value = target.split(':',1)
value = float(value)
except:
raise ValueError("Invalid target '%s'" % target)
data.append( (name,value) )
else:
seriesList = evaluateTarget(requestContext, target)
for series in seriesList:
func = PieFunctions[requestOptions['pieMode']]
data.append( (series.name, func(requestContext, series) or 0 ))
elif requestOptions['graphType'] == 'line':
# Let's see if at least our data is cached
if useCache:
targets = requestOptions['targets']
startTime = requestOptions['startTime']
endTime = requestOptions['endTime']
dataKey = hashData(targets, startTime, endTime)
cachedData = cache.get(dataKey)
if cachedData:
log.cache("Data-Cache hit [%s]" % dataKey)
else:
log.cache("Data-Cache miss [%s]" % dataKey)
else:
cachedData = None
if cachedData is not None:
requestContext['data'] = data = cachedData
else: # Have to actually retrieve the data now
for target in requestOptions['targets']:
if not target.strip():
continue
t = time()
seriesList = evaluateTarget(requestContext, target)
log.rendering("Retrieval of %s took %.6f" % (target, time() - t))
data.extend(seriesList)
if useCache:
cache.add(dataKey, data, cacheTimeout)
# If data is all we needed, we're done
format = requestOptions.get('format')
if format == 'csv':
response = HttpResponse(content_type='text/csv')
writer = csv.writer(response, dialect='excel')
for series in data:
for i, value in enumerate(series):
timestamp = datetime.fromtimestamp(series.start + (i * series.step), requestOptions['tzinfo'])
writer.writerow((series.name, timestamp.strftime("%Y-%m-%d %H:%M:%S"), value))
return response
if format == 'json':
series_data = []
if 'maxDataPoints' in requestOptions and any(data):
startTime = min([series.start for series in data])
endTime = max([series.end for series in data])
timeRange = endTime - startTime
maxDataPoints = requestOptions['maxDataPoints']
for series in data:
numberOfDataPoints = timeRange/series.step
if maxDataPoints < numberOfDataPoints:
valuesPerPoint = math.ceil(float(numberOfDataPoints) / float(maxDataPoints))
secondsPerPoint = int(valuesPerPoint * series.step)
# Nudge start over a little bit so that the consolidation bands align with each call
# removing 'jitter' seen when refreshing.
nudge = secondsPerPoint + (series.start % series.step) - (series.start % secondsPerPoint)
series.start = series.start + nudge
valuesToLose = int(nudge/series.step)
for r in range(1, valuesToLose):
del series[0]
series.consolidate(valuesPerPoint)
timestamps = range(int(series.start), int(series.end) + 1, int(secondsPerPoint))
else:
timestamps = range(int(series.start), int(series.end) + 1, int(series.step))
datapoints = zip(series, timestamps)
series_data.append(dict(target=series.name, datapoints=datapoints))
else:
for series in data:
timestamps = range(int(series.start), int(series.end) + 1, int(series.step))
datapoints = zip(series, timestamps)
series_data.append(dict(target=series.name, datapoints=datapoints))
if 'jsonp' in requestOptions:
response = HttpResponse(
content="%s(%s)" % (requestOptions['jsonp'], json.dumps(series_data)),
content_type='text/javascript')
else:
response = HttpResponse(content=json.dumps(series_data),
content_type='application/json')
if useCache:
patch_response_headers(response, cache_timeout=cacheTimeout)
else:
add_never_cache_headers(response)
return response
if format == 'raw':
response = HttpResponse(content_type='text/plain')
for series in data:
response.write( "%s,%d,%d,%d|" % (series.name, series.start, series.end, series.step) )
response.write( ','.join(map(str,series)) )
response.write('\n')
log.rendering('Total rawData rendering time %.6f' % (time() - start))
return response
if format == 'svg':
graphOptions['outputFormat'] = 'svg'
if format == 'pickle':
response = HttpResponse(content_type='application/pickle')
seriesInfo = [series.getInfo() for series in data]
pickle.dump(seriesInfo, response, protocol=-1)
log.rendering('Total pickle rendering time %.6f' % (time() - start))
return response
# We've got the data, now to render it
graphOptions['data'] = data
if settings.REMOTE_RENDERING: # Rendering on other machines is faster in some situations
image = delegateRendering(requestOptions['graphType'], graphOptions)
else:
image = doImageRender(requestOptions['graphClass'], graphOptions)
useSVG = graphOptions.get('outputFormat') == 'svg'
if useSVG and 'jsonp' in requestOptions:
response = HttpResponse(
content="%s(%s)" % (requestOptions['jsonp'], json.dumps(image)),
content_type='text/javascript')
else:
response = buildResponse(image, 'image/svg+xml' if useSVG else 'image/png')
if useCache:
cache.add(requestKey, response, cacheTimeout)
patch_response_headers(response, cache_timeout=cacheTimeout)
else:
add_never_cache_headers(response)
log.rendering('Total rendering time %.6f seconds' % (time() - start))
return response
def parseOptions(request):
queryParams = request.REQUEST
# Start with some defaults
graphOptions = {'width' : 330, 'height' : 250}
requestOptions = {}
graphType = queryParams.get('graphType','line')
assert graphType in GraphTypes, "Invalid graphType '%s', must be one of %s" % (graphType,GraphTypes.keys())
graphClass = GraphTypes[graphType]
# Fill in the requestOptions
requestOptions['graphType'] = graphType
requestOptions['graphClass'] = graphClass
requestOptions['pieMode'] = queryParams.get('pieMode', 'average')
requestOptions['cacheTimeout'] = int( queryParams.get('cacheTimeout', settings.DEFAULT_CACHE_DURATION) )
requestOptions['targets'] = []
# Extract the targets out of the queryParams
mytargets = []
# Normal format: ?target=path.1&target=path.2
if len(queryParams.getlist('target')) > 0:
mytargets = queryParams.getlist('target')
# Rails/PHP/jQuery common practice format: ?target[]=path.1&target[]=path.2
elif len(queryParams.getlist('target[]')) > 0:
mytargets = queryParams.getlist('target[]')
# Collect the targets
for target in mytargets:
requestOptions['targets'].append(target)
if 'pickle' in queryParams:
requestOptions['format'] = 'pickle'
if 'rawData' in queryParams:
requestOptions['format'] = 'raw'
if 'format' in queryParams:
requestOptions['format'] = queryParams['format']
if 'jsonp' in queryParams:
requestOptions['jsonp'] = queryParams['jsonp']
if 'noCache' in queryParams:
requestOptions['noCache'] = True
if 'maxDataPoints' in queryParams and queryParams['maxDataPoints'].isdigit():
requestOptions['maxDataPoints'] = int(queryParams['maxDataPoints'])
requestOptions['localOnly'] = queryParams.get('local') == '1'
# Fill in the graphOptions
for opt in graphClass.customizable:
if opt in queryParams:
val = queryParams[opt]
if (val.isdigit() or (val.startswith('-') and val[1:].isdigit())) and 'color' not in opt.lower():
val = int(val)
elif '.' in val and (val.replace('.','',1).isdigit() or (val.startswith('-') and val[1:].replace('.','',1).isdigit())):
val = float(val)
elif val.lower() in ('true','false'):
val = val.lower() == 'true'
elif val.lower() == 'default' or val == '':
continue
graphOptions[opt] = val
tzinfo = pytz.timezone(settings.TIME_ZONE)
if 'tz' in queryParams:
try:
tzinfo = pytz.timezone(queryParams['tz'])
except pytz.UnknownTimeZoneError:
pass
requestOptions['tzinfo'] = tzinfo
# Get the time interval for time-oriented graph types
if graphType == 'line' or graphType == 'pie':
if 'until' in queryParams:
untilTime = parseATTime(queryParams['until'], tzinfo)
else:
untilTime = parseATTime('now', tzinfo)
if 'from' in queryParams:
fromTime = parseATTime(queryParams['from'], tzinfo)
else:
fromTime = parseATTime('-1d', tzinfo)
startTime = min(fromTime, untilTime)
endTime = max(fromTime, untilTime)
assert startTime != endTime, "Invalid empty time range"
requestOptions['startTime'] = startTime
requestOptions['endTime'] = endTime
return (graphOptions, requestOptions)
connectionPools = {}
def delegateRendering(graphType, graphOptions):
start = time()
postData = graphType + '\n' + pickle.dumps(graphOptions)
servers = settings.RENDERING_HOSTS[:] #make a copy so we can shuffle it safely
shuffle(servers)
for server in servers:
start2 = time()
try:
# Get a connection
try:
pool = connectionPools[server]
except KeyError: #happens the first time
pool = connectionPools[server] = set()
try:
connection = pool.pop()
except KeyError: #No available connections, have to make a new one
connection = HTTPConnectionWithTimeout(server)
connection.timeout = settings.REMOTE_RENDER_CONNECT_TIMEOUT
# Send the request
try:
connection.request('POST','/render/local/', postData)
except CannotSendRequest:
connection = HTTPConnectionWithTimeout(server) #retry once
connection.timeout = settings.REMOTE_RENDER_CONNECT_TIMEOUT
connection.request('POST', '/render/local/', postData)
# Read the response
response = connection.getresponse()
assert response.status == 200, "Bad response code %d from %s" % (response.status,server)
contentType = response.getheader('Content-Type')
imageData = response.read()
assert contentType == 'image/png', "Bad content type: \"%s\" from %s" % (contentType,server)
assert imageData, "Received empty response from %s" % server
# Wrap things up
log.rendering('Remotely rendered image on %s in %.6f seconds' % (server,time() - start2))
log.rendering('Spent a total of %.6f seconds doing remote rendering work' % (time() - start))
pool.add(connection)
return imageData
except:
log.exception("Exception while attempting remote rendering request on %s" % server)
log.rendering('Exception while remotely rendering on %s wasted %.6f' % (server,time() - start2))
continue
def renderLocalView(request):
try:
start = time()
reqParams = StringIO(request.body)
graphType = reqParams.readline().strip()
optionsPickle = reqParams.read()
reqParams.close()
graphClass = GraphTypes[graphType]
options = unpickle.loads(optionsPickle)
image = doImageRender(graphClass, options)
log.rendering("Delegated rendering request took %.6f seconds" % (time() - start))
response = buildResponse(image)
add_never_cache_headers(response)
return response
except:
log.exception("Exception in graphite.render.views.rawrender")
return HttpResponseServerError()
def renderMyGraphView(request,username,graphName):
profile = getProfileByUsername(username)
if not profile:
return errorPage("No such user '%s'" % username)
try:
graph = profile.mygraph_set.get(name=graphName)
except ObjectDoesNotExist:
return errorPage("User %s doesn't have a MyGraph named '%s'" % (username,graphName))
request_params = dict(request.REQUEST.items())
if request_params:
url_parts = urlsplit(graph.url)
query_string = url_parts[3]
if query_string:
url_params = parse_qs(query_string)
# Remove lists so that we can do an update() on the dict
for param, value in url_params.items():
if isinstance(value, list) and param != 'target':
url_params[param] = value[-1]
url_params.update(request_params)
# Handle 'target' being a list - we want duplicate &target params out of it
url_param_pairs = []
for key,val in url_params.items():
if isinstance(val, list):
for v in val:
url_param_pairs.append( (key,v) )
else:
url_param_pairs.append( (key,val) )
query_string = urlencode(url_param_pairs)
url = urlunsplit(url_parts[:3] + (query_string,) + url_parts[4:])
else:
url = graph.url
return HttpResponseRedirect(url)
def doImageRender(graphClass, graphOptions):
pngData = StringIO()
t = time()
img = graphClass(**graphOptions)
img.output(pngData)
log.rendering('Rendered PNG in %.6f seconds' % (time() - t))
imageData = pngData.getvalue()
pngData.close()
return imageData
def buildResponse(imageData, content_type="image/png"):
return HttpResponse(imageData, content_type=content_type)
def errorPage(message):
template = loader.get_template('500.html')
context = Context(dict(message=message))
return HttpResponseServerError( template.render(context) )
|
|
"""
Context managers for use with the ``with`` statement.
.. note:: When using Python 2.5, you will need to start your fabfile
with ``from __future__ import with_statement`` in order to make use of
the ``with`` statement (which is a regular, non ``__future__`` feature of
Python 2.6+.)
.. note:: If you are using multiple directly nested ``with`` statements, it can
be convenient to use multiple context expressions in one single with
statement. Instead of writing::
with cd('/path/to/app'):
with prefix('workon myvenv'):
run('./manage.py syncdb')
run('./manage.py loaddata myfixture')
you can write::
with cd('/path/to/app'), prefix('workon myvenv'):
run('./manage.py syncdb')
run('./manage.py loaddata myfixture')
Note that you need Python 2.7+ for this to work. On Python 2.5 or 2.6, you
can do the following::
from contextlib import nested
with nested(cd('/path/to/app'), prefix('workon myvenv')):
...
Finally, note that `~fabric.context_managers.settings` implements
``nested`` itself -- see its API doc for details.
"""
from contextlib import contextmanager
from fabric.exitstack import ExitStack
import socket
import select
from fabric.thread_handling import ThreadHandler
from fabric.state import output, win32, connections, env
from fabric import state
from fabric.utils import isatty
import collections
if not win32:
import termios
import tty
def _set_output(groups, which):
"""
Refactored subroutine used by ``hide`` and ``show``.
"""
previous = {}
try:
# Preserve original values, pull in new given value to use
for group in output.expand_aliases(groups):
previous[group] = output[group]
output[group] = which
# Yield control
yield
finally:
# Restore original values
output.update(previous)
def documented_contextmanager(func):
wrapper = contextmanager(func)
wrapper.undecorated = func
return wrapper
@documented_contextmanager
def show(*groups):
"""
Context manager for setting the given output ``groups`` to True.
``groups`` must be one or more strings naming the output groups defined in
`~fabric.state.output`. The given groups will be set to True for the
duration of the enclosed block, and restored to their previous value
afterwards.
For example, to turn on debug output (which is typically off by default)::
def my_task():
with show('debug'):
run('ls /var/www')
As almost all output groups are displayed by default, `show` is most useful
for turning on the normally-hidden ``debug`` group, or when you know or
suspect that code calling your own code is trying to hide output with
`hide`.
"""
return _set_output(groups, True)
@documented_contextmanager
def hide(*groups):
"""
Context manager for setting the given output ``groups`` to False.
``groups`` must be one or more strings naming the output groups defined in
`~fabric.state.output`. The given groups will be set to False for the
duration of the enclosed block, and restored to their previous value
afterwards.
For example, to hide the "[hostname] run:" status lines, as well as
preventing printout of stdout and stderr, one might use `hide` as follows::
def my_task():
with hide('running', 'stdout', 'stderr'):
run('ls /var/www')
"""
return _set_output(groups, False)
@documented_contextmanager
def _setenv(variables):
"""
Context manager temporarily overriding ``env`` with given key/value pairs.
A callable that returns a dict can also be passed. This is necessary when
new values are being calculated from current values, in order to ensure that
the "current" value is current at the time that the context is entered, not
when the context manager is initialized. (See Issue #736.)
This context manager is used internally by `settings` and is not intended
to be used directly.
"""
if isinstance(variables, collections.Callable):
variables = variables()
clean_revert = variables.pop('clean_revert', False)
previous = {}
new = []
for key, value in variables.items():
if key in state.env:
previous[key] = state.env[key]
else:
new.append(key)
state.env[key] = value
try:
yield
finally:
if clean_revert:
for key, value in variables.items():
# If the current env value for this key still matches the
# value we set it to beforehand, we are OK to revert it to the
# pre-block value.
if key in state.env and value == state.env[key]:
if key in previous:
state.env[key] = previous[key]
else:
del state.env[key]
else:
state.env.update(previous)
for key in new:
del state.env[key]
def settings(*args, **kwargs):
"""
Nest context managers and/or override ``env`` variables.
`settings` serves two purposes:
* Most usefully, it allows temporary overriding/updating of ``env`` with
any provided keyword arguments, e.g. ``with settings(user='foo'):``.
Original values, if any, will be restored once the ``with`` block closes.
* The keyword argument ``clean_revert`` has special meaning for
``settings`` itself (see below) and will be stripped out before
execution.
* In addition, it will use `contextlib.nested`_ to nest any given
non-keyword arguments, which should be other context managers, e.g.
``with settings(hide('stderr'), show('stdout')):``.
.. _contextlib.nested: http://docs.python.org/library/contextlib.html#contextlib.nested
These behaviors may be specified at the same time if desired. An example
will hopefully illustrate why this is considered useful::
def my_task():
with settings(
hide('warnings', 'running', 'stdout', 'stderr'),
warn_only=True
):
if run('ls /etc/lsb-release'):
return 'Ubuntu'
elif run('ls /etc/redhat-release'):
return 'RedHat'
The above task executes a `run` statement, but will warn instead of
aborting if the ``ls`` fails, and all output -- including the warning
itself -- is prevented from printing to the user. The end result, in this
scenario, is a completely silent task that allows the caller to figure out
what type of system the remote host is, without incurring the handful of
output that would normally occur.
Thus, `settings` may be used to set any combination of environment
variables in tandem with hiding (or showing) specific levels of output, or
in tandem with any other piece of Fabric functionality implemented as a
context manager.
If ``clean_revert`` is set to ``True``, ``settings`` will **not** revert
keys which are altered within the nested block, instead only reverting keys
whose values remain the same as those given. More examples will make this
clear; below is how ``settings`` operates normally::
# Before the block, env.parallel defaults to False, host_string to None
with settings(parallel=True, host_string='myhost'):
# env.parallel is True
# env.host_string is 'myhost'
env.host_string = 'otherhost'
# env.host_string is now 'otherhost'
# Outside the block:
# * env.parallel is False again
# * env.host_string is None again
The internal modification of ``env.host_string`` is nullified -- not always
desirable. That's where ``clean_revert`` comes in::
# Before the block, env.parallel defaults to False, host_string to None
with settings(parallel=True, host_string='myhost', clean_revert=True):
# env.parallel is True
# env.host_string is 'myhost'
env.host_string = 'otherhost'
# env.host_string is now 'otherhost'
# Outside the block:
# * env.parallel is False again
# * env.host_string remains 'otherhost'
Brand new keys which did not exist in ``env`` prior to using ``settings``
are also preserved if ``clean_revert`` is active. When ``False``, such keys
are removed when the block exits.
.. versionadded:: 1.4.1
The ``clean_revert`` kwarg.
"""
managers = list(args)
if kwargs:
managers.append(_setenv(kwargs))
nested = ExitStack()
nested.create_queue(managers)
return nested
def cd(path):
"""
Context manager that keeps directory state when calling remote operations.
Any calls to `run`, `sudo`, `get`, or `put` within the wrapped block will
implicitly have a string similar to ``"cd <path> && "`` prefixed in order
to give the sense that there is actually statefulness involved.
.. note::
`cd` only affects *remote* paths -- to modify *local* paths, use
`~fabric.context_managers.lcd`.
Because use of `cd` affects all such invocations, any code making use of
those operations, such as much of the ``contrib`` section, will also be
affected by use of `cd`.
Like the actual 'cd' shell builtin, `cd` may be called with relative paths
(keep in mind that your default starting directory is your remote user's
``$HOME``) and may be nested as well.
Below is a "normal" attempt at using the shell 'cd', which doesn't work due
to how shell-less SSH connections are implemented -- state is **not** kept
between invocations of `run` or `sudo`::
run('cd /var/www')
run('ls')
The above snippet will list the contents of the remote user's ``$HOME``
instead of ``/var/www``. With `cd`, however, it will work as expected::
with cd('/var/www'):
run('ls') # Turns into "cd /var/www && ls"
Finally, a demonstration (see inline comments) of nesting::
with cd('/var/www'):
run('ls') # cd /var/www && ls
with cd('website1'):
run('ls') # cd /var/www/website1 && ls
.. note::
This context manager is currently implemented by appending to (and, as
always, restoring afterwards) the current value of an environment
variable, ``env.cwd``. However, this implementation may change in the
future, so we do not recommend manually altering ``env.cwd`` -- only
the *behavior* of `cd` will have any guarantee of backwards
compatibility.
.. note::
Space characters will be escaped automatically to make dealing with
such directory names easier.
.. versionchanged:: 1.0
Applies to `get` and `put` in addition to the command-running
operations.
.. seealso:: `~fabric.context_managers.lcd`
"""
return _change_cwd('cwd', path)
def lcd(path):
"""
Context manager for updating local current working directory.
This context manager is identical to `~fabric.context_managers.cd`, except
that it changes a different env var (`lcwd`, instead of `cwd`) and thus
only affects the invocation of `~fabric.operations.local` and the local
arguments to `~fabric.operations.get`/`~fabric.operations.put`.
Relative path arguments are relative to the local user's current working
directory, which will vary depending on where Fabric (or Fabric-using code)
was invoked. You can check what this is with `os.getcwd
<http://docs.python.org/release/2.6/library/os.html#os.getcwd>`_. It may be
useful to pin things relative to the location of the fabfile in use, which
may be found in :ref:`env.real_fabfile <real-fabfile>`
.. versionadded:: 1.0
"""
return _change_cwd('lcwd', path)
def _change_cwd(which, path):
path = path.replace(' ', '\ ')
if state.env.get(which) and not path.startswith('/') and not path.startswith('~'):
new_cwd = state.env.get(which) + '/' + path
else:
new_cwd = path
return _setenv({which: new_cwd})
def path(path, behavior='append'):
"""
Append the given ``path`` to the PATH used to execute any wrapped commands.
Any calls to `run` or `sudo` within the wrapped block will implicitly have
a string similar to ``"PATH=$PATH:<path> "`` prepended before the given
command.
You may customize the behavior of `path` by specifying the optional
``behavior`` keyword argument, as follows:
* ``'append'``: append given path to the current ``$PATH``, e.g.
``PATH=$PATH:<path>``. This is the default behavior.
* ``'prepend'``: prepend given path to the current ``$PATH``, e.g.
``PATH=<path>:$PATH``.
* ``'replace'``: ignore previous value of ``$PATH`` altogether, e.g.
``PATH=<path>``.
.. note::
This context manager is currently implemented by modifying (and, as
always, restoring afterwards) the current value of environment
variables, ``env.path`` and ``env.path_behavior``. However, this
implementation may change in the future, so we do not recommend
manually altering them directly.
.. versionadded:: 1.0
"""
return _setenv({'path': path, 'path_behavior': behavior})
def prefix(command):
"""
Prefix all wrapped `run`/`sudo` commands with given command plus ``&&``.
This is nearly identical to `~fabric.operations.cd`, except that nested
invocations append to a list of command strings instead of modifying a
single string.
Most of the time, you'll want to be using this alongside a shell script
which alters shell state, such as ones which export or alter shell
environment variables.
For example, one of the most common uses of this tool is with the
``workon`` command from `virtualenvwrapper
<http://www.doughellmann.com/projects/virtualenvwrapper/>`_::
with prefix('workon myvenv'):
run('./manage.py syncdb')
In the above snippet, the actual shell command run would be this::
$ workon myvenv && ./manage.py syncdb
This context manager is compatible with `~fabric.context_managers.cd`, so
if your virtualenv doesn't ``cd`` in its ``postactivate`` script, you could
do the following::
with cd('/path/to/app'):
with prefix('workon myvenv'):
run('./manage.py syncdb')
run('./manage.py loaddata myfixture')
Which would result in executions like so::
$ cd /path/to/app && workon myvenv && ./manage.py syncdb
$ cd /path/to/app && workon myvenv && ./manage.py loaddata myfixture
Finally, as alluded to near the beginning,
`~fabric.context_managers.prefix` may be nested if desired, e.g.::
with prefix('workon myenv'):
run('ls')
with prefix('source /some/script'):
run('touch a_file')
The result::
$ workon myenv && ls
$ workon myenv && source /some/script && touch a_file
Contrived, but hopefully illustrative.
"""
return _setenv(lambda: {'command_prefixes': state.env.command_prefixes + [command]})
@documented_contextmanager
def char_buffered(pipe):
"""
Force local terminal ``pipe`` be character, not line, buffered.
Only applies on Unix-based systems; on Windows this is a no-op.
"""
if win32 or not isatty(pipe):
yield
else:
old_settings = termios.tcgetattr(pipe)
tty.setcbreak(pipe)
try:
yield
finally:
termios.tcsetattr(pipe, termios.TCSADRAIN, old_settings)
def shell_env(**kw):
"""
Set shell environment variables for wrapped commands.
For example, the below shows how you might set a ZeroMQ related environment
variable when installing a Python ZMQ library::
with shell_env(ZMQ_DIR='/home/user/local'):
run('pip install pyzmq')
As with `~fabric.context_managers.prefix`, this effectively turns the
``run`` command into::
$ export ZMQ_DIR='/home/user/local' && pip install pyzmq
Multiple key-value pairs may be given simultaneously.
.. note::
If used to affect the behavior of `~fabric.operations.local` when
running from a Windows localhost, ``SET`` commands will be used to
implement this feature.
"""
return _setenv({'shell_env': kw})
def _forwarder(chan, sock):
# Bidirectionally forward data between a socket and a Paramiko channel.
while True:
r, w, x = select.select([sock, chan], [], [])
if sock in r:
data = sock.recv(1024)
if len(data) == 0:
break
chan.send(data)
if chan in r:
data = chan.recv(1024)
if len(data) == 0:
break
sock.send(data)
chan.close()
sock.close()
@documented_contextmanager
def remote_tunnel(remote_port, local_port=None, local_host="localhost",
remote_bind_address="127.0.0.1"):
"""
Create a tunnel forwarding a locally-visible port to the remote target.
For example, you can let the remote host access a database that is
installed on the client host::
# Map localhost:6379 on the server to localhost:6379 on the client,
# so that the remote 'redis-cli' program ends up speaking to the local
# redis-server.
with remote_tunnel(6379):
run("redis-cli -i")
The database might be installed on a client only reachable from the client
host (as opposed to *on* the client itself)::
# Map localhost:6379 on the server to redis.internal:6379 on the client
with remote_tunnel(6379, local_host="redis.internal")
run("redis-cli -i")
``remote_tunnel`` accepts up to four arguments:
* ``remote_port`` (mandatory) is the remote port to listen to.
* ``local_port`` (optional) is the local port to connect to; the default is
the same port as the remote one.
* ``local_host`` (optional) is the locally-reachable computer (DNS name or
IP address) to connect to; the default is ``localhost`` (that is, the
same computer Fabric is running on).
* ``remote_bind_address`` (optional) is the remote IP address to bind to
for listening, on the current target. It should be an IP address assigned
to an interface on the target (or a DNS name that resolves to such IP).
You can use "0.0.0.0" to bind to all interfaces.
.. note::
By default, most SSH servers only allow remote tunnels to listen to the
localhost interface (127.0.0.1). In these cases, `remote_bind_address`
is ignored by the server, and the tunnel will listen only to 127.0.0.1.
.. versionadded: 1.6
"""
if local_port is None:
local_port = remote_port
sockets = []
channels = []
threads = []
def accept(channel, src, dest):
(src_addr, src_port) = src
(dest_addr, dest_port) = dest
channels.append(channel)
sock = socket.socket()
sockets.append(sock)
try:
sock.connect((local_host, local_port))
except Exception as e:
print("[%s] rtunnel: cannot connect to %s:%d (from local)" % (env.host_string, local_host, local_port))
channel.close()
return
print("[%s] rtunnel: opened reverse tunnel: %r -> %r -> %r"\
% (env.host_string, channel.origin_addr,
channel.getpeername(), (local_host, local_port)))
th = ThreadHandler('fwd', _forwarder, channel, sock)
threads.append(th)
transport = connections[env.host_string].get_transport()
transport.request_port_forward(remote_bind_address, remote_port, handler=accept)
try:
yield
finally:
for sock, chan, th in zip(sockets, channels, threads):
sock.close()
chan.close()
th.thread.join()
th.raise_if_needed()
transport.cancel_port_forward(remote_bind_address, remote_port)
quiet = lambda: settings(hide('everything'), warn_only=True)
quiet.__doc__ = """
Alias to ``settings(hide('everything'), warn_only=True)``.
Useful for wrapping remote interrogative commands which you expect to fail
occasionally, and/or which you want to silence.
Example::
with quiet():
have_build_dir = run("test -e /tmp/build").succeeded
When used in a task, the above snippet will not produce any ``run: test -e
/tmp/build`` line, nor will any stdout/stderr display, and command failure
is ignored.
.. seealso::
:ref:`env.warn_only <warn_only>`,
`~fabric.context_managers.settings`,
`~fabric.context_managers.hide`
.. versionadded:: 1.5
"""
warn_only = lambda: settings(warn_only=True)
warn_only.__doc__ = """
Alias to ``settings(warn_only=True)``.
.. seealso::
:ref:`env.warn_only <warn_only>`,
`~fabric.context_managers.settings`,
`~fabric.context_managers.quiet`
"""
|
|
"""Real-time information about public transport departures in Norway."""
from datetime import datetime, timedelta
from enturclient import EnturPublicTransportData
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
ATTR_ATTRIBUTION,
CONF_LATITUDE,
CONF_LONGITUDE,
CONF_NAME,
CONF_SHOW_ON_MAP,
TIME_MINUTES,
)
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle
import homeassistant.util.dt as dt_util
API_CLIENT_NAME = "homeassistant-homeassistant"
ATTRIBUTION = "Data provided by entur.org under NLOD"
CONF_STOP_IDS = "stop_ids"
CONF_EXPAND_PLATFORMS = "expand_platforms"
CONF_WHITELIST_LINES = "line_whitelist"
CONF_OMIT_NON_BOARDING = "omit_non_boarding"
CONF_NUMBER_OF_DEPARTURES = "number_of_departures"
DEFAULT_NAME = "Entur"
DEFAULT_ICON_KEY = "bus"
ICONS = {
"air": "mdi:airplane",
"bus": "mdi:bus",
"metro": "mdi:subway",
"rail": "mdi:train",
"tram": "mdi:tram",
"water": "mdi:ferry",
}
SCAN_INTERVAL = timedelta(seconds=45)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_STOP_IDS): vol.All(cv.ensure_list, [cv.string]),
vol.Optional(CONF_EXPAND_PLATFORMS, default=True): cv.boolean,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_SHOW_ON_MAP, default=False): cv.boolean,
vol.Optional(CONF_WHITELIST_LINES, default=[]): cv.ensure_list,
vol.Optional(CONF_OMIT_NON_BOARDING, default=True): cv.boolean,
vol.Optional(CONF_NUMBER_OF_DEPARTURES, default=2): vol.All(
cv.positive_int, vol.Range(min=2, max=10)
),
}
)
ATTR_STOP_ID = "stop_id"
ATTR_ROUTE = "route"
ATTR_ROUTE_ID = "route_id"
ATTR_EXPECTED_AT = "due_at"
ATTR_DELAY = "delay"
ATTR_REALTIME = "real_time"
ATTR_NEXT_UP_IN = "next_due_in"
ATTR_NEXT_UP_ROUTE = "next_route"
ATTR_NEXT_UP_ROUTE_ID = "next_route_id"
ATTR_NEXT_UP_AT = "next_due_at"
ATTR_NEXT_UP_DELAY = "next_delay"
ATTR_NEXT_UP_REALTIME = "next_real_time"
ATTR_TRANSPORT_MODE = "transport_mode"
def due_in_minutes(timestamp: datetime) -> int:
"""Get the time in minutes from a timestamp."""
if timestamp is None:
return None
diff = timestamp - dt_util.now()
return int(diff.total_seconds() / 60)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the Entur public transport sensor."""
expand = config.get(CONF_EXPAND_PLATFORMS)
line_whitelist = config.get(CONF_WHITELIST_LINES)
name = config.get(CONF_NAME)
show_on_map = config.get(CONF_SHOW_ON_MAP)
stop_ids = config.get(CONF_STOP_IDS)
omit_non_boarding = config.get(CONF_OMIT_NON_BOARDING)
number_of_departures = config.get(CONF_NUMBER_OF_DEPARTURES)
stops = [s for s in stop_ids if "StopPlace" in s]
quays = [s for s in stop_ids if "Quay" in s]
data = EnturPublicTransportData(
API_CLIENT_NAME,
stops=stops,
quays=quays,
line_whitelist=line_whitelist,
omit_non_boarding=omit_non_boarding,
number_of_departures=number_of_departures,
web_session=async_get_clientsession(hass),
)
if expand:
await data.expand_all_quays()
await data.update()
proxy = EnturProxy(data)
entities = []
for place in data.all_stop_places_quays():
try:
given_name = f"{name} {data.get_stop_info(place).name}"
except KeyError:
given_name = f"{name} {place}"
entities.append(
EnturPublicTransportSensor(proxy, given_name, place, show_on_map)
)
async_add_entities(entities, True)
class EnturProxy:
"""Proxy for the Entur client.
Ensure throttle to not hit rate limiting on the API.
"""
def __init__(self, api):
"""Initialize the proxy."""
self._api = api
@Throttle(timedelta(seconds=15))
async def async_update(self) -> None:
"""Update data in client."""
await self._api.update()
def get_stop_info(self, stop_id: str) -> dict:
"""Get info about specific stop place."""
return self._api.get_stop_info(stop_id)
class EnturPublicTransportSensor(Entity):
"""Implementation of a Entur public transport sensor."""
def __init__(self, api: EnturProxy, name: str, stop: str, show_on_map: bool):
"""Initialize the sensor."""
self.api = api
self._stop = stop
self._show_on_map = show_on_map
self._name = name
self._state = None
self._icon = ICONS[DEFAULT_ICON_KEY]
self._attributes = {}
@property
def name(self) -> str:
"""Return the name of the sensor."""
return self._name
@property
def state(self) -> str:
"""Return the state of the sensor."""
return self._state
@property
def device_state_attributes(self) -> dict:
"""Return the state attributes."""
self._attributes[ATTR_ATTRIBUTION] = ATTRIBUTION
self._attributes[ATTR_STOP_ID] = self._stop
return self._attributes
@property
def unit_of_measurement(self) -> str:
"""Return the unit this state is expressed in."""
return TIME_MINUTES
@property
def icon(self) -> str:
"""Icon to use in the frontend."""
return self._icon
async def async_update(self) -> None:
"""Get the latest data and update the states."""
await self.api.async_update()
self._attributes = {}
data = self.api.get_stop_info(self._stop)
if data is None:
self._state = None
return
if self._show_on_map and data.latitude and data.longitude:
self._attributes[CONF_LATITUDE] = data.latitude
self._attributes[CONF_LONGITUDE] = data.longitude
calls = data.estimated_calls
if not calls:
self._state = None
return
self._state = due_in_minutes(calls[0].expected_departure_time)
self._icon = ICONS.get(calls[0].transport_mode, ICONS[DEFAULT_ICON_KEY])
self._attributes[ATTR_ROUTE] = calls[0].front_display
self._attributes[ATTR_ROUTE_ID] = calls[0].line_id
self._attributes[ATTR_EXPECTED_AT] = calls[0].expected_departure_time.strftime(
"%H:%M"
)
self._attributes[ATTR_REALTIME] = calls[0].is_realtime
self._attributes[ATTR_DELAY] = calls[0].delay_in_min
number_of_calls = len(calls)
if number_of_calls < 2:
return
self._attributes[ATTR_NEXT_UP_ROUTE] = calls[1].front_display
self._attributes[ATTR_NEXT_UP_ROUTE_ID] = calls[1].line_id
self._attributes[ATTR_NEXT_UP_AT] = calls[1].expected_departure_time.strftime(
"%H:%M"
)
self._attributes[
ATTR_NEXT_UP_IN
] = f"{due_in_minutes(calls[1].expected_departure_time)} min"
self._attributes[ATTR_NEXT_UP_REALTIME] = calls[1].is_realtime
self._attributes[ATTR_NEXT_UP_DELAY] = calls[1].delay_in_min
if number_of_calls < 3:
return
for i, call in enumerate(calls[2:]):
key_name = f"departure_#{i + 3}"
self._attributes[key_name] = (
f"{'' if bool(call.is_realtime) else 'ca. '}"
f"{call.expected_departure_time.strftime('%H:%M')} {call.front_display}"
)
|
|
"""Miscellaneous network and PF-related utilities"""
import re
import glob
import time
import ctypes
from socket import *
from fcntl import ioctl
from pf.constants import *
from pf.exceptions import PFError
from pf._struct import ifreq, if_data, timeval
# Dictionaries for mapping strings to constants
# Debug levels
dbg_levels = {
"emerg": LOG_EMERG,
"alert": LOG_ALERT,
"crit": LOG_CRIT,
"err": LOG_ERR,
"warn": LOG_WARNING,
"notice": LOG_NOTICE,
"info": LOG_INFO,
"debug": LOG_DEBUG
}
# Memory limits
pf_limits = {
"states": PF_LIMIT_STATES,
"src-nodes": PF_LIMIT_SRC_NODES,
"frags": PF_LIMIT_FRAGS,
"tables": PF_LIMIT_TABLES,
"table-entries": PF_LIMIT_TABLE_ENTRIES
}
# Ports, UIDs and GIDs operators
pf_ops = {
"": PF_OP_NONE,
"><": PF_OP_IRG,
"<>": PF_OP_XRG,
"=": PF_OP_EQ,
"!=": PF_OP_NE,
"<": PF_OP_LT,
"<=": PF_OP_LE,
">": PF_OP_GT,
">=": PF_OP_GE,
":": PF_OP_RRG
}
# Interface modifiers
pf_if_mods = {
"network": PFI_AFLAG_NETWORK,
"broadcast": PFI_AFLAG_BROADCAST,
"peer": PFI_AFLAG_PEER,
"0": PFI_AFLAG_NOALIAS
}
# Global timeouts
pf_timeouts = {
"tcp.first": PFTM_TCP_FIRST_PACKET,
"tcp.opening": PFTM_TCP_OPENING,
"tcp.established": PFTM_TCP_ESTABLISHED,
"tcp.closing": PFTM_TCP_CLOSING,
"tcp.finwait": PFTM_TCP_FIN_WAIT,
"tcp.closed": PFTM_TCP_CLOSED,
"tcp.tsdiff": PFTM_TS_DIFF,
"udp.first": PFTM_UDP_FIRST_PACKET,
"udp.single": PFTM_UDP_SINGLE,
"udp.multiple": PFTM_UDP_MULTIPLE,
"icmp.first": PFTM_ICMP_FIRST_PACKET,
"icmp.error": PFTM_ICMP_ERROR_REPLY,
"other.first": PFTM_OTHER_FIRST_PACKET,
"other.single": PFTM_OTHER_SINGLE,
"other.multiple": PFTM_OTHER_MULTIPLE,
"frag": PFTM_FRAG,
"interval": PFTM_INTERVAL,
"adaptive.start": PFTM_ADAPTIVE_START,
"adaptive.end": PFTM_ADAPTIVE_END,
"src.track": PFTM_SRC_NODE
}
# Syncookies modes
pf_syncookies_modes = {
"never": PF_SYNCOOKIES_NEVER,
"always": PF_SYNCOOKIES_ALWAYS,
"adaptive": PF_SYNCOOKIES_ADAPTIVE
}
# PF Optimization Hints
pf_hint_normal = {
"tcp.first": 2 * 60,
"tcp.opening": 30,
"tcp.established": 24 * 60 * 60,
"tcp.closing": 15 * 60,
"tcp.finwait": 45,
"tcp.closed": 90,
"tcp.tsdiff": 30
}
pf_hint_sattelite = {
"tcp.first": 3 * 60,
"tcp.opening": 30 + 5,
"tcp.established": 24 * 60 * 60,
"tcp.closing": 15 * 60 + 5,
"tcp.finwait": 45 + 5,
"tcp.closed": 90 + 5,
"tcp.tsdiff": 60
}
pf_hint_conservative = {
"tcp.first": 60 * 60,
"tcp.opening": 15 * 60,
"tcp.established": 5 * 24 * 60 * 60,
"tcp.closing": 60 * 60,
"tcp.finwait": 10 * 60,
"tcp.closed": 3 * 90,
"tcp.tsdiff": 60
}
pf_hint_aggressive = {
"tcp.first": 30,
"tcp.opening": 5,
"tcp.established": 5 * 60 * 60,
"tcp.closing": 60,
"tcp.finwait": 30,
"tcp.closed": 30,
"tcp.tsdiff": 10
}
pf_hints = {
"normal": pf_hint_normal,
"sattelite": pf_hint_sattelite,
"high-latency": pf_hint_sattelite,
"conservative": pf_hint_conservative,
"aggressive": pf_hint_aggressive
}
# Dictionaries for mapping constants to strings
# TCP states
tcpstates = {TCPS_CLOSED: "CLOSED",
TCPS_LISTEN: "LISTEN",
TCPS_SYN_SENT: "SYN_SENT",
TCPS_SYN_RECEIVED: "SYN_RCVD",
TCPS_ESTABLISHED: "ESTABLISHED",
TCPS_CLOSE_WAIT: "CLOSE_WAIT",
TCPS_FIN_WAIT_1: "FIN_WAIT_1",
TCPS_CLOSING: "CLOSING",
TCPS_LAST_ACK: "LAST_ACK",
TCPS_FIN_WAIT_2: "FIN_WAIT_2",
TCPS_TIME_WAIT: "TIME_WAIT"}
# UDP states
udpstates = {PFUDPS_NO_TRAFFIC: "NO_TRAFFIC",
PFUDPS_SINGLE: "SINGLE",
PFUDPS_MULTIPLE: "MULTIPLE"}
# ICMP and ICMPv6 codes and types
icmp_codes = {
(ICMP_UNREACH, ICMP_UNREACH_NET): "net-unr",
(ICMP_UNREACH, ICMP_UNREACH_HOST): "host-unr",
(ICMP_UNREACH, ICMP_UNREACH_PROTOCOL): "proto-unr",
(ICMP_UNREACH, ICMP_UNREACH_PORT): "port-unr",
(ICMP_UNREACH, ICMP_UNREACH_NEEDFRAG): "needfrag",
(ICMP_UNREACH, ICMP_UNREACH_SRCFAIL): "srcfail",
(ICMP_UNREACH, ICMP_UNREACH_NET_UNKNOWN): "net-unk",
(ICMP_UNREACH, ICMP_UNREACH_HOST_UNKNOWN): "host-unk",
(ICMP_UNREACH, ICMP_UNREACH_ISOLATED): "isolate",
(ICMP_UNREACH, ICMP_UNREACH_NET_PROHIB): "net-prohib",
(ICMP_UNREACH, ICMP_UNREACH_HOST_PROHIB): "host-prohib",
(ICMP_UNREACH, ICMP_UNREACH_TOSNET): "net-tos",
(ICMP_UNREACH, ICMP_UNREACH_TOSHOST): "host-tos",
(ICMP_UNREACH, ICMP_UNREACH_FILTER_PROHIB): "filter-prohib",
(ICMP_UNREACH, ICMP_UNREACH_HOST_PRECEDENCE): "host-preced",
(ICMP_UNREACH, ICMP_UNREACH_PRECEDENCE_CUTOFF): "cutoff-preced",
(ICMP_REDIRECT, ICMP_REDIRECT_NET): "redir-net",
(ICMP_REDIRECT, ICMP_REDIRECT_HOST): "redir-host",
(ICMP_REDIRECT, ICMP_REDIRECT_TOSNET): "redir-tos-net",
(ICMP_REDIRECT, ICMP_REDIRECT_TOSHOST): "redir-tos-host",
(ICMP_ROUTERADVERT, ICMP_ROUTERADVERT_NORMAL): "normal-adv",
(ICMP_ROUTERADVERT, ICMP_ROUTERADVERT_NOROUTE_COMMON): "common-adv",
(ICMP_TIMXCEED, ICMP_TIMXCEED_INTRANS): "transit",
(ICMP_TIMXCEED, ICMP_TIMXCEED_REASS): "reassemb",
(ICMP_PARAMPROB, ICMP_PARAMPROB_ERRATPTR): "badhead",
(ICMP_PARAMPROB, ICMP_PARAMPROB_OPTABSENT): "optmiss",
(ICMP_PARAMPROB, ICMP_PARAMPROB_LENGTH): "badlen",
(ICMP_PHOTURIS, ICMP_PHOTURIS_UNKNOWN_INDEX): "unknown-ind",
(ICMP_PHOTURIS, ICMP_PHOTURIS_AUTH_FAILED): "auth-fail",
(ICMP_PHOTURIS, ICMP_PHOTURIS_DECRYPT_FAILED): "decrypt-fail"}
icmp6_codes = {
(ICMP6_DST_UNREACH, ICMP6_DST_UNREACH_ADMIN): "admin-unr",
(ICMP6_DST_UNREACH, ICMP6_DST_UNREACH_NOROUTE): "noroute-unr",
(ICMP6_DST_UNREACH, ICMP6_DST_UNREACH_NOTNEIGHBOR): "notnbr-unr",
(ICMP6_DST_UNREACH, ICMP6_DST_UNREACH_BEYONDSCOPE): "beyond-unr",
(ICMP6_DST_UNREACH, ICMP6_DST_UNREACH_ADDR): "addr-unr",
(ICMP6_DST_UNREACH, ICMP6_DST_UNREACH_NOPORT): "port-unr",
(ICMP6_TIME_EXCEEDED, ICMP6_TIME_EXCEED_TRANSIT): "transit",
(ICMP6_TIME_EXCEEDED, ICMP6_TIME_EXCEED_REASSEMBLY): "reassemb",
(ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER): "badhead",
(ICMP6_PARAM_PROB, ICMP6_PARAMPROB_NEXTHEADER): "nxthdr",
(ND_REDIRECT, ND_REDIRECT_ONLINK): "redironlink",
(ND_REDIRECT, ND_REDIRECT_ROUTER): "redirrouter"}
icmp_types = {
ICMP_ECHO: "echoreq",
ICMP_ECHOREPLY: "echorep",
ICMP_UNREACH: "unreach",
ICMP_SOURCEQUENCH: "squench",
ICMP_REDIRECT: "redir",
ICMP_ALTHOSTADDR: "althost",
ICMP_ROUTERADVERT: "routeradv",
ICMP_ROUTERSOLICIT: "routersol",
ICMP_TIMXCEED: "timex",
ICMP_PARAMPROB: "paramprob",
ICMP_TSTAMP: "timereq",
ICMP_TSTAMPREPLY: "timerep",
ICMP_IREQ: "inforeq",
ICMP_IREQREPLY: "inforep",
ICMP_MASKREQ: "maskreq",
ICMP_MASKREPLY: "maskrep",
ICMP_TRACEROUTE: "trace",
ICMP_DATACONVERR: "dataconv",
ICMP_MOBILE_REDIRECT: "mobredir",
ICMP_IPV6_WHEREAREYOU: "ipv6-where",
ICMP_IPV6_IAMHERE: "ipv6-here",
ICMP_MOBILE_REGREQUEST: "mobregreq",
ICMP_MOBILE_REGREPLY: "mobregrep",
ICMP_SKIP: "skip",
ICMP_PHOTURIS: "photuris"}
icmp6_types = {
ICMP6_DST_UNREACH: "unreach",
ICMP6_PACKET_TOO_BIG: "toobig",
ICMP6_TIME_EXCEEDED: "timex",
ICMP6_PARAM_PROB: "paramprob",
ICMP6_ECHO_REQUEST: "echoreq",
ICMP6_ECHO_REPLY: "echorep",
ICMP6_MEMBERSHIP_QUERY: "groupqry",
MLD_LISTENER_QUERY: "listqry",
ICMP6_MEMBERSHIP_REPORT: "grouprep",
MLD_LISTENER_REPORT: "listenrep",
ICMP6_MEMBERSHIP_REDUCTION: "groupterm",
MLD_LISTENER_DONE: "listendone",
ND_ROUTER_SOLICIT: "routersol",
ND_ROUTER_ADVERT: "routeradv",
ND_NEIGHBOR_SOLICIT: "neighbrsol",
ND_NEIGHBOR_ADVERT: "neighbradv",
ND_REDIRECT: "redir",
ICMP6_ROUTER_RENUMBERING: "routrrenum",
ICMP6_WRUREQUEST: "wrureq",
ICMP6_WRUREPLY: "wrurep",
ICMP6_FQDN_QUERY: "fqdnreq",
ICMP6_FQDN_REPLY: "fqdnrep",
ICMP6_NI_QUERY: "niqry",
ICMP6_NI_REPLY: "nirep",
MLD_MTRACE_RESP: "mtraceresp",
MLD_MTRACE: "mtrace"}
# Helper functions
def getprotobynumber(number, file="/etc/protocols"):
"""Map a protocol number to a name.
Return the protocol name or None if no match is found.
"""
r = re.compile("(?P<proto>\S+)\s+(?P<num>\d+)")
with open(file, 'r') as f:
for line in f:
m = r.match(line)
if m and int(m.group("num")) == number:
return m.group("proto")
def geticmpcodebynumber(type, code, af):
"""Return the ICMP code as a string."""
ic = icmp_codes if (af != AF_INET6) else icmp6_codes
try:
return ic[(type, code)]
except KeyError:
return None
def geticmptypebynumber(type, af):
"""Return the ICMP type as a string."""
it = icmp_types if (af != AF_INET6) else icmp6_types
try:
return it[type]
except KeyError:
return None
def ctonm(cidr, af):
"""Convert netmask from CIDR to dotted decimal notation."""
try:
l = {AF_INET: 32, AF_INET6: 128}[af]
except KeyError:
raise ValueError("Invalid address family")
b = "1" * cidr + "0" * (l - cidr)
mask = "".join([chr(int(b[i:i+8], 2)) for i in range(0, l, 8)])
return inet_ntop(af, mask)
def nmtoc(netmask, af):
"""Convert netmask from dotted decimal to CIDR notation."""
cidr = 0
for b in map(ord, inet_pton(af, netmask)):
while b:
cidr += b & 1
b >>= 1
return cidr
def is_IPaddr(addr):
"""Return True if addr is a valid IPv4 address"""
return _is_valid_addr(AF_INET, addr)
def is_IP6addr(addr):
"""Return True if addr is a valid IPv6 address"""
return _is_valid_addr(AF_INET6, addr)
def _is_valid_addr(af, addr):
"""Return True is addr is a valid address in the address family specified"""
try:
inet_pton(af, addr)
except error: # socket.error
return False
return True
def rate2str(bw):
"""Return the string representation of the network speed rate."""
units = [" ", "K", "M", "G"]
for unit in units:
if bw >= 1000:
bw /= 1000.0
else:
break
if int(bw * 100 % 100):
return "{:.2f}{}".format(bw, unit)
else:
return "{}{}".format(int(bw), unit)
def getifmtu(ifname):
"""Quick hack to get MTU and speed for a specified interface."""
from pf.filter import _IOWR
SIOCGIFMTU = _IOWR('i', 126, ifreq)
s = socket(AF_INET, SOCK_DGRAM)
ifrdat = if_data()
ifr = ifreq(ifr_name=ifname, ifru_data=ctypes.addressof(ifrdat))
try:
ioctl(s, SIOCGIFMTU, ifr.asBuffer())
except IOError:
pass
s.close()
mtu = (ifr.ifru_metric if (ifr.ifru_metric > 0) else 1500)
speed = ifrdat.ifi_baudrate
return (mtu, speed)
def uptime():
"""Return system uptime in seconds"""
CTL_KERN = 1 # From /usr/include/sys/sysctl.h
KERN_BOOTTIME = 21 # From /usr/include/sys/sysctl.h
mib = (ctypes.c_int * 2)(CTL_KERN, KERN_BOOTTIME)
tv = timeval()
size = ctypes.c_size_t(ctypes.sizeof(timeval))
libc = ctypes.CDLL(glob.glob("/usr/lib/libc.so*")[0], use_errno=True)
libc.sysctl.argtypes = [ctypes.c_void_p, ctypes.c_uint, ctypes.c_void_p,
ctypes.c_void_p, ctypes.c_void_p, ctypes.c_size_t]
if libc.sysctl(mib, 2, ctypes.addressof(tv),
ctypes.addressof(size), 0, 0) == -1:
raise PFError("Call to sysctl() failed")
return int(time.time()) - tv.tv_sec
|
|
#!/usr/bin/env python3
# Copyright (c) 2015-2020 The Fujicoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test node responses to invalid network messages."""
import struct
import time
from test_framework.messages import (
CBlockHeader,
CInv,
MAX_HEADERS_RESULTS,
MAX_INV_SIZE,
MAX_PROTOCOL_MESSAGE_LENGTH,
msg_getdata,
msg_headers,
msg_inv,
msg_ping,
MSG_TX,
msg_version,
ser_string,
)
from test_framework.p2p import (
P2PDataStore,
P2PInterface,
)
from test_framework.test_framework import FujicoinTestFramework
from test_framework.util import (
assert_equal,
hex_str_to_bytes,
)
VALID_DATA_LIMIT = MAX_PROTOCOL_MESSAGE_LENGTH - 5 # Account for the 5-byte length prefix
class msg_unrecognized:
"""Nonsensical message. Modeled after similar types in test_framework.messages."""
msgtype = b'badmsg\x01'
def __init__(self, *, str_data):
self.str_data = str_data.encode() if not isinstance(str_data, bytes) else str_data
def serialize(self):
return ser_string(self.str_data)
def __repr__(self):
return "{}(data={})".format(self.msgtype, self.str_data)
class SenderOfAddrV2(P2PInterface):
def wait_for_sendaddrv2(self):
self.wait_until(lambda: 'sendaddrv2' in self.last_message)
class InvalidMessagesTest(FujicoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
self.extra_args = [["-whitelist=addr@127.0.0.1"]]
def run_test(self):
self.test_buffer()
self.test_duplicate_version_msg()
self.test_magic_bytes()
self.test_checksum()
self.test_size()
self.test_msgtype()
self.test_addrv2_empty()
self.test_addrv2_no_addresses()
self.test_addrv2_too_long_address()
self.test_addrv2_unrecognized_network()
self.test_oversized_inv_msg()
self.test_oversized_getdata_msg()
self.test_oversized_headers_msg()
self.test_resource_exhaustion()
def test_buffer(self):
self.log.info("Test message with header split across two buffers is received")
conn = self.nodes[0].add_p2p_connection(P2PDataStore())
# Create valid message
msg = conn.build_message(msg_ping(nonce=12345))
cut_pos = 12 # Chosen at an arbitrary position within the header
# Send message in two pieces
before = self.nodes[0].getnettotals()['totalbytesrecv']
conn.send_raw_message(msg[:cut_pos])
# Wait until node has processed the first half of the message
self.wait_until(lambda: self.nodes[0].getnettotals()['totalbytesrecv'] != before)
middle = self.nodes[0].getnettotals()['totalbytesrecv']
# If this assert fails, we've hit an unlikely race
# where the test framework sent a message in between the two halves
assert_equal(middle, before + cut_pos)
conn.send_raw_message(msg[cut_pos:])
conn.sync_with_ping(timeout=1)
self.nodes[0].disconnect_p2ps()
def test_duplicate_version_msg(self):
self.log.info("Test duplicate version message is ignored")
conn = self.nodes[0].add_p2p_connection(P2PDataStore())
with self.nodes[0].assert_debug_log(['redundant version message from peer']):
conn.send_and_ping(msg_version())
self.nodes[0].disconnect_p2ps()
def test_magic_bytes(self):
self.log.info("Test message with invalid magic bytes disconnects peer")
conn = self.nodes[0].add_p2p_connection(P2PDataStore())
with self.nodes[0].assert_debug_log(['Header error: Wrong MessageStart ffffffff received']):
msg = conn.build_message(msg_unrecognized(str_data="d"))
# modify magic bytes
msg = b'\xff' * 4 + msg[4:]
conn.send_raw_message(msg)
conn.wait_for_disconnect(timeout=1)
self.nodes[0].disconnect_p2ps()
def test_checksum(self):
self.log.info("Test message with invalid checksum logs an error")
conn = self.nodes[0].add_p2p_connection(P2PDataStore())
with self.nodes[0].assert_debug_log(['Header error: Wrong checksum (badmsg, 2 bytes), expected 78df0a04 was ffffffff']):
msg = conn.build_message(msg_unrecognized(str_data="d"))
# Checksum is after start bytes (4B), message type (12B), len (4B)
cut_len = 4 + 12 + 4
# modify checksum
msg = msg[:cut_len] + b'\xff' * 4 + msg[cut_len + 4:]
conn.send_raw_message(msg)
conn.sync_with_ping(timeout=1)
# Check that traffic is accounted for (24 bytes header + 2 bytes payload)
assert_equal(self.nodes[0].getpeerinfo()[0]['bytesrecv_per_msg']['*other*'], 26)
self.nodes[0].disconnect_p2ps()
def test_size(self):
self.log.info("Test message with oversized payload disconnects peer")
conn = self.nodes[0].add_p2p_connection(P2PDataStore())
with self.nodes[0].assert_debug_log(['Header error: Size too large (badmsg, 4000001 bytes)']):
msg = msg_unrecognized(str_data="d" * (VALID_DATA_LIMIT + 1))
msg = conn.build_message(msg)
conn.send_raw_message(msg)
conn.wait_for_disconnect(timeout=1)
self.nodes[0].disconnect_p2ps()
def test_msgtype(self):
self.log.info("Test message with invalid message type logs an error")
conn = self.nodes[0].add_p2p_connection(P2PDataStore())
with self.nodes[0].assert_debug_log(['Header error: Invalid message type']):
msg = msg_unrecognized(str_data="d")
msg = conn.build_message(msg)
# Modify msgtype
msg = msg[:7] + b'\x00' + msg[7 + 1:]
conn.send_raw_message(msg)
conn.sync_with_ping(timeout=1)
# Check that traffic is accounted for (24 bytes header + 2 bytes payload)
assert_equal(self.nodes[0].getpeerinfo()[0]['bytesrecv_per_msg']['*other*'], 26)
self.nodes[0].disconnect_p2ps()
def test_addrv2(self, label, required_log_messages, raw_addrv2):
node = self.nodes[0]
conn = node.add_p2p_connection(SenderOfAddrV2())
# Make sure fujicoind signals support for ADDRv2, otherwise this test
# will bombard an old node with messages it does not recognize which
# will produce unexpected results.
conn.wait_for_sendaddrv2()
self.log.info('Test addrv2: ' + label)
msg = msg_unrecognized(str_data=b'')
msg.msgtype = b'addrv2'
with node.assert_debug_log(required_log_messages):
# override serialize() which would include the length of the data
msg.serialize = lambda: raw_addrv2
conn.send_raw_message(conn.build_message(msg))
conn.sync_with_ping()
node.disconnect_p2ps()
def test_addrv2_empty(self):
self.test_addrv2('empty',
[
'received: addrv2 (0 bytes)',
'ProcessMessages(addrv2, 0 bytes): Exception',
'end of data',
],
b'')
def test_addrv2_no_addresses(self):
self.test_addrv2('no addresses',
[
'received: addrv2 (1 bytes)',
],
hex_str_to_bytes('00'))
def test_addrv2_too_long_address(self):
self.test_addrv2('too long address',
[
'received: addrv2 (525 bytes)',
'ProcessMessages(addrv2, 525 bytes): Exception',
'Address too long: 513 > 512',
],
hex_str_to_bytes(
'01' + # number of entries
'61bc6649' + # time, Fri Jan 9 02:54:25 UTC 2009
'00' + # service flags, COMPACTSIZE(NODE_NONE)
'01' + # network type (IPv4)
'fd0102' + # address length (COMPACTSIZE(513))
'ab' * 513 + # address
'208d')) # port
def test_addrv2_unrecognized_network(self):
now_hex = struct.pack('<I', int(time.time())).hex()
self.test_addrv2('unrecognized network',
[
'received: addrv2 (25 bytes)',
'IP 9.9.9.9 mapped',
'Added 1 addresses',
],
hex_str_to_bytes(
'02' + # number of entries
# this should be ignored without impeding acceptance of subsequent ones
now_hex + # time
'01' + # service flags, COMPACTSIZE(NODE_NETWORK)
'99' + # network type (unrecognized)
'02' + # address length (COMPACTSIZE(2))
'ab' * 2 + # address
'208d' + # port
# this should be added:
now_hex + # time
'01' + # service flags, COMPACTSIZE(NODE_NETWORK)
'01' + # network type (IPv4)
'04' + # address length (COMPACTSIZE(4))
'09' * 4 + # address
'208d')) # port
def test_oversized_msg(self, msg, size):
msg_type = msg.msgtype.decode('ascii')
self.log.info("Test {} message of size {} is logged as misbehaving".format(msg_type, size))
with self.nodes[0].assert_debug_log(['Misbehaving', '{} message size = {}'.format(msg_type, size)]):
self.nodes[0].add_p2p_connection(P2PInterface()).send_and_ping(msg)
self.nodes[0].disconnect_p2ps()
def test_oversized_inv_msg(self):
size = MAX_INV_SIZE + 1
self.test_oversized_msg(msg_inv([CInv(MSG_TX, 1)] * size), size)
def test_oversized_getdata_msg(self):
size = MAX_INV_SIZE + 1
self.test_oversized_msg(msg_getdata([CInv(MSG_TX, 1)] * size), size)
def test_oversized_headers_msg(self):
size = MAX_HEADERS_RESULTS + 1
self.test_oversized_msg(msg_headers([CBlockHeader()] * size), size)
def test_resource_exhaustion(self):
self.log.info("Test node stays up despite many large junk messages")
conn = self.nodes[0].add_p2p_connection(P2PDataStore())
conn2 = self.nodes[0].add_p2p_connection(P2PDataStore())
msg_at_size = msg_unrecognized(str_data="b" * VALID_DATA_LIMIT)
assert len(msg_at_size.serialize()) == MAX_PROTOCOL_MESSAGE_LENGTH
self.log.info("(a) Send 80 messages, each of maximum valid data size (4MB)")
for _ in range(80):
conn.send_message(msg_at_size)
# Check that, even though the node is being hammered by nonsense from one
# connection, it can still service other peers in a timely way.
self.log.info("(b) Check node still services peers in a timely way")
for _ in range(20):
conn2.sync_with_ping(timeout=2)
self.log.info("(c) Wait for node to drop junk messages, while remaining connected")
conn.sync_with_ping(timeout=400)
# Despite being served up a bunch of nonsense, the peers should still be connected.
assert conn.is_connected
assert conn2.is_connected
self.nodes[0].disconnect_p2ps()
if __name__ == '__main__':
InvalidMessagesTest().main()
|
|
# original Copyright Ruben Decrop
# modifications by Chessdevil Consulting BVBA
import logging
log = logging.getLogger(__name__)
import simplejson as json
from django.shortcuts import render
from django.db.models import Max
from rest_framework import status
from rest_framework.decorators import api_view
from rest_framework.response import Response
from .models import (
CdSwarTournament,
CdSwarJson,
CdTournament,
)
from .serializers import (
SwarTournamentSerializer,
SwarJsonSerializer,
SwarJsonSerializerSmall,
TournamentSerializer,
)
from .swarconvert import pairingsfromswar, standingsfromswar
def viewtrn(request, trnshort):
return render(request, 'cdp/viewtrn.html', dict(trnshort=trnshort))
def managetrn(request):
return render(request, 'cdp/managetrn.html')
def manageswar(request):
return render(request, 'cdp/manageswar.html')
@api_view(['GET', 'POST'])
def tournament_all(request):
if request.method == 'POST':
trn_serializer = TournamentSerializer(data=request.data)
if trn_serializer.is_valid():
trn_serializer.save()
return Response(trn_serializer.data)
else:
return Response(status=status.HTTP_400_BAD_REQUEST)
if request.method == 'GET':
shortname = request.GET.get('shortname', None)
trns = CdTournament.objects.all()
if shortname:
trns = trns.filter(shortname=shortname)
trn_serializer = TournamentSerializer(trns, many=True)
return Response(trn_serializer.data)
@api_view(['GET', 'DELETE'])
def tournament_one(request, id_trn):
try:
trn = CdSwarTournament.objects.get(id=id_trn)
except CdSwarTournament.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
if request.method == 'GET':
trn_serializer = SwarTournamentSerializer(trn)
return Response(trn_serializer.data)
if request.method == 'DELETE':
trn.delete()
trns = CdSwarTournament.objects.all()
trn_serializer = SwarTournamentSerializer(trns, many=True)
return Response(trn_serializer.data)
@api_view(['GET'])
def tournament_pairings(request, id_trn, round):
try:
trn = CdTournament.objects.get(id=id_trn)
except CdTournament.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
swarround = int(round) + 1
try:
swartrn = CdSwarTournament.objects.get(tournament=trn)
except CdSwarTournament.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
try:
swarjson = CdSwarJson.objects.get(tournament=swartrn, round=swarround,
status='ACT')
except CdSwarJson.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
trndata = json.loads(swarjson.jsonfile)
data = {
'id_trn': trn.id,
'tournament': trn.name,
'round': int(round),
'pairings': pairingsfromswar(trndata)
}
return Response(data)
@api_view(['GET'])
def tournament_standings(request, id_trn, round):
try:
trn = CdTournament.objects.get(id=id_trn)
except CdTournament.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
swarround = int(round) + 1
try:
swartrn = CdSwarTournament.objects.get(tournament=trn)
except CdSwarTournament.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
try:
swarjson = CdSwarJson.objects.get(tournament=swartrn, round=swarround,
status='ACT')
except CdSwarJson.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
trndata = json.loads(swarjson.jsonfile)
data = {
'id_trn': trn.id,
'tournament': trn.name,
'round': int(round),
'standings': standingsfromswar(trndata)
}
return Response(data)
@api_view(['POST'])
def tournament_swar(request, id_trn):
"""
enable swar on a tournament
"""
try:
trn = CdTournament.objects.get(id=id_trn)
except CdSwarTournament.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
if request.method == 'POST':
swartrn_serializer = SwarTournamentSerializer(data=request.data)
if swartrn_serializer.is_valid():
swartrn = swartrn_serializer.save(tournament=trn)
swardata = SwarTournamentSerializer(swartrn).data
swardata.update(TournamentSerializer(trn).data)
return Response(swardata)
else:
return Response(status=status.HTTP_400_BAD_REQUEST)
else:
return Response(status=status.HTTP_404_NOT_FOUND)
@api_view(['GET'])
def swartrn_all(request):
swardata = {}
for swartrn in CdSwarTournament.objects.all():
srl = SwarTournamentSerializer(swartrn)
swardata[swartrn.tournament_id] = srl.data
for trn in CdTournament.objects.all():
if trn.id in swardata:
swardata[trn.id].update(TournamentSerializer(trn).data)
return Response(swardata.values())
@api_view(['GET', 'DELETE'])
def swartrn_one(request, id_trn):
try:
trn = CdSwarTournament.objects.get(tournament_id=id_trn)
except CdSwarTournament.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
if request.method == 'GET':
trn_serializer = SwarTournamentSerializer(trn)
return Response(trn_serializer.data)
if request.method == 'DELETE':
trn.delete()
trns = CdSwarTournament.objects.all()
trn_serializer = SwarTournamentSerializer(trns, many=True)
return Response(trn_serializer.data)
@api_view(['POST'])
def swarfile_publication(request, id_trn, id_swar):
try:
trn = CdSwarTournament.objects.get(tournament_id=id_trn)
except CdSwarTournament.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
try:
swar = CdSwarJson.objects.get(id=id_swar)
except:
return Response(status=status.HTTP_404_NOT_FOUND)
CdSwarJson.objects.filter(tournament=id_trn, status='ACT',
round=swar.round).update(status='OUT')
CdSwarJson.objects.filter(tournament=id_trn, id=id_swar).update(status='ACT')
# TODO create a optimized publishing JSON string
swarjsons = CdSwarJson.objects.filter(tournament=id_trn).order_by(
'-round', '-uploaddate')
ss = SwarJsonSerializerSmall(swarjsons, many=True)
return Response(ss.data)
@api_view(['GET', 'POST'])
def swarfile_all(request, id_trn):
try:
trn = CdSwarTournament.objects.get(tournament_id=id_trn)
except CdSwarTournament.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
if request.method == 'POST':
# add a new Swar file
data = request.data
data['tournament'] = id_trn
swar_serializer = SwarJsonSerializer(data=data)
if swar_serializer.is_valid():
swar_serializer.save()
trn.swarname = swar_serializer.data['name']
trn.save()
swarjsons = CdSwarJson.objects.filter(tournament=id_trn).order_by(
'-round', '-uploaddate')
ss = SwarJsonSerializerSmall(swarjsons, many=True)
return Response(ss.data)
else:
log.debug('invalid data %s', swar_serializer.data)
return Response(status=status.HTTP_400_BAD_REQUEST)
if request.method == 'GET':
# get all swarupload files
swarjsons = CdSwarJson.objects.filter(tournament=id_trn).order_by(
'-round', '-uploaddate')
ss = SwarJsonSerializerSmall(swarjsons, many=True)
return Response(ss.data)
@api_view(['GET', 'DELETE'])
def swarfile_one(request, id_trn, id_swar):
try:
swar = CdSwarJson.objects.get(tournament=id_trn, id=id_swar)
except CdSwarJson.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
if request.method == 'GET':
ss = SwarJsonSerializer(swar)
return Response(ss.data)
if request.method == 'DELETE':
swar.delete()
swarjsons = CdSwarJson.objects.filter(tournament=id_trn).order_by(
'-round', '-uploaddate')
ss = SwarJsonSerializerSmall(swarjsons, many=True)
return Response(ss.data)
@api_view(['GET'])
def topround(request, id_trn):
try:
trn = CdTournament.objects.get(id=id_trn)
except CdTournament.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
try:
swartrn = CdSwarTournament.objects.get(tournament=trn)
except CdSwarTournament.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
swarjsons = CdSwarJson.objects.filter(tournament=swartrn)
topround = swarjsons.aggregate(topround=Max('round')).get('topround', 1) - 1
return Response(topround)
|
|
#
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://aws.amazon.com/apache2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
#
"""
This is an integration test for the use of dynamic record sizes in TLS connections.
The function s2n_connection_set_dynamic_record_threshold can be used to dynamically change the size of TCP packets to
optimize for both latency and throughput. This is done by first setting a threshold, until this threshold has been
met the connection uses small TLS records that fit into a single TCP segment (mss). This optimizes the connection
for low latency. Once the number of bytes transferred in the connection has met this threshold, the size of the
records can exceed this maximum bound - in order to optimize throughput.
This test sets up an s2nc connection against OpenSSL s_server to transfer a test file using each of the cipher
suites supported. In each connection, we test:
- That all segment sizes before the threshold is met are less than the mss (usually mss = 1460B, but if the mss cannot
be obtained a default of 65496B is used).
- That at least one segment size after the threshold has been met is greater than 1500B.
"""
import argparse
import os
import sys
import subprocess
import itertools
import multiprocessing
from multiprocessing.pool import ThreadPool
from os import environ
from s2n_test_constants import *
from time import sleep
PROTO_VERS_TO_S_SERVER_ARG = {
S2N_TLS10: "-tls1",
S2N_TLS11: "-tls1_1",
S2N_TLS12: "-tls1_2",
}
test_file = './data/test_buf'
file_size = os.path.getsize(test_file)
def cleanup_processes(*processes):
for p in processes:
p.kill()
p.wait()
def try_dynamic_record(endpoint, port, cipher, ssl_version, threshold, server_cert=None, server_key=None, sig_algs=None, curves=None, dh_params=None, fips_mode=False):
"""
Attempt to handshake against Openssl s_server listening on `endpoint` and `port` using s2nc
:param int endpoint: endpoint for Openssl s_server to listen on
:param int port: port for Openssl s_server to listen on
:param str cipher: ciphers for Openssl s_server to use. See https://www.openssl.org/docs/man1.0.2/apps/ciphers.html
:param int ssl_version: SSL version for Openssl s_server to use
:param int threshold: the number of bytes sent before switch over from low latency to high throughput
:param str server_cert: path to certificate for Openssl s_server to use
:param str server_key: path to private key for Openssl s_server to use
:param str sig_algs: Signature algorithms for Openssl s_server to accept
:param str curves: Elliptic curves for Openssl s_server to accept
:param str dh_params: path to DH params for Openssl s_server to use
:param fips_mode: if s2n client has to enable FIPS mode in the underlying crypto library
:return: 0 on successfully negotiation(s), -1 on failure
"""
# Override certificate for ECDSA if unspecified. We can remove this when we
# support multiple certificates
if server_cert is None and cipher is not None and "ECDSA" in cipher:
server_cert = TEST_ECDSA_CERT
server_key = TEST_ECDSA_KEY
if server_cert is None:
server_cert = TEST_RSA_CERT
server_key = TEST_RSA_KEY
if dh_params is None:
dh_params = TEST_DH_PARAMS
# Start Openssl s_server
s_server_cmd = ["openssl", "s_server", PROTO_VERS_TO_S_SERVER_ARG[ssl_version],
"-accept", str(port)]
if server_cert is not None:
s_server_cmd.extend(["-cert", server_cert])
if server_key is not None:
s_server_cmd.extend(["-key", server_key])
if cipher is not None:
s_server_cmd.extend(["-cipher", cipher])
if sig_algs is not None:
s_server_cmd.extend(["-sigalgs", sig_algs])
if curves is not None:
s_server_cmd.extend(["-curves", curves])
if dh_params is not None:
s_server_cmd.extend(["-dhparam", dh_params])
# Fire up s_server
s_server = subprocess.Popen(s_server_cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# Make sure it's accepting
found = 0
for line in range(0, 10):
output = s_server.stdout.readline().decode("utf-8")
if output.strip() == "ACCEPT":
# Openssl first prints ACCEPT and only then actually binds the socket, so wait for a bit...
sleep(0.1)
found = 1
break
if not found:
server_error = s_server.stderr.read().decode("utf-8")
if "no cipher match" in server_error:
# print ("Skipped unsupported cipher: {}".format(cipher))
return -2
sys.stderr.write("Failed to start s_server: {}\nSTDERR: {}\n".format(" ".join(s_server_cmd), server_error))
cleanup_processes(s_server)
return -1
# Fire up s2nc
# print("\n\tRunning s2n dynamic record size tests with threshold:", threshold)
s2nc_cmd = ["../../bin/s2nc", "-e", "-D", str(threshold), "-t", "1", "-c", "test_all", "-i"]
if fips_mode:
s2nc_cmd += ["--enter-fips-mode"]
s2nc_cmd.extend([str(endpoint), str(port)])
file_input = open(test_file)
s2nc = subprocess.Popen(s2nc_cmd, stdin=file_input, stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding="utf-8")
# Wait file send complete
s2nc.wait()
cleanup_processes(s_server)
# Read from s2nc until we get successful connection message
found = 0
right_version = 0
for line in s2nc.stdout:
if line.strip() == "Connected to {}:{}".format(endpoint, port):
found = 1
if ACTUAL_VERSION_STR.format(ssl_version or S2N_TLS12) in line:
right_version = 1
if not found or not right_version:
sys.stderr.write("= TEST FAILED =\ns_server cmd: {}\n s_server STDERR: {}\n\ns2nc cmd: {}\nSTDERR {}\n".format(" ".join(s_server_cmd), s_server.stderr.read(), " ".join(s2nc_cmd), s2nc.stderr.read()))
return -1
return 0
def print_result(result_prefix, return_code):
suffix = ""
if return_code == 0:
if sys.stdout.isatty():
suffix = "\033[32;1mPASSED\033[0m"
else:
suffix = "PASSED"
else:
if sys.stdout.isatty():
suffix = "\033[31;1mFAILED\033[0m"
else:
suffix = "FAILED"
print(result_prefix + suffix)
def run_test(host, port, ssl_version, cipher, threshold, fips_mode):
cipher_name = cipher.openssl_name
failed = 0
tcpdump_filter = "dst port " + str(port)
tcpdump_cmd = ["sudo", "tcpdump", "-l", "-i", "lo", "-n", "-B", "65535", tcpdump_filter]
tcpdump = subprocess.Popen(tcpdump_cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
ret = try_dynamic_record(host, port, cipher_name, ssl_version, threshold, fips_mode=fips_mode)
# wait for pipe ready
sleep(2)
subprocess.call(["sudo", "killall", "-9", "tcpdump"])
out = tcpdump.communicate()[0].decode("utf-8")
if out == '':
print ("No output from PIPE, skip")
return 0
out_array = out.split('\n')
# Skip no cipher match error
if ret != -2:
failed += ret
if 0 == ret:
# print("\nAnalyzing tcpdump results for cipher {}".format(cipher_name))
failed += analyze_tcp_dump(out_array, threshold)
result_prefix = "Cipher: %-28s Vers: %-8s ... " % (cipher_name, S2N_PROTO_VERS_TO_STR[ssl_version])
print_result(result_prefix, failed)
return failed
def test(host, port, test_ciphers, threshold, fips_mode):
failed = 0
ssl_version = S2N_TLS12
for cipher in test_ciphers:
cipher_vers = cipher.min_tls_vers
if not cipher.openssl_1_1_1_compatible:
continue
if ssl_version < cipher_vers:
continue
result = run_test(host, port, ssl_version, cipher, threshold, fips_mode)
if result != 0:
failed += 1
break
return failed
def analyze_tcp_dump(array, threshold):
"""
This function iterates though each line of the TCP dump and reads the length of each segment, to check that it is
the correct size. It keeps account the total bytes_transferred and therefore tests that all segment sizes before
the threshold is met are less than the maximum segment size (mss). Once this threshold has been met, it tests
that the segment size has been increased by verifying that at least one segment is greater than MTU_bytes.
The mss is read from the first message in the TCP dump, however if this cannot be found a default value is used.
:param list array: this is an array of strings where each list element is a segment from the TCP dump
:param int threshold: the number of bytes sent before switch over from low latency to high throughput mode
"""
failed = 1
bytes_transferred = 0
array_len = len(array)
MTU_bytes = 1500
# get the mss from first message
mss = get_local_mtu() - 40
first_line = array[0]
if "mss" in first_line:
mss_pos = first_line.find("mss")
mss_str = first_line[mss_pos : mss_pos + 10]
mss = int(mss_str[4 : mss_str.find(',')])
else:
print ("using default mss")
for i in range(0, array_len):
# record the length of each packet in TCP dump
pos = array[i].find("length")
if pos < 0:
continue
length = array[i][pos + 6 : len(array[i])]
bytes_transferred += int(length)
# print ("Packet:",i, "packet size:", length, "bytes transferred:",bytes_transferred, "threshold met:", bytes_transferred > threshold)
# optimized for latency - before the threshold has been met, the TCP packet size should always <= mss
if bytes_transferred < threshold and int(length) > mss:
# if this condition has been met, the length of a segment is greater than the mss, but the threshold has
# not been met, so we return with failed = 1
break
elif bytes_transferred > threshold and int(length) > MTU_bytes:
# optimized for throughput - after the threshold has been met TCP packet size can exceed MTU, which results in segementation
failed = 0 # we just need a single packet to be larger than MTU_bytes to show dynamic record size.
break
return failed
def get_local_mtu():
cmd = ["ifconfig", "lo"]
p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
mtu = 65536
for line in range(0, 5):
output = p.stdout.readline().decode("utf-8")
if ("MTU:" in output):
word_list = output.split()
mtu_list = word_list[3].split(':')
mtu = mtu_list[1]
break
p.wait()
return int(mtu)
def main():
parser = argparse.ArgumentParser(description='Runs TLS server integration tests against Openssl s_server using s2nc')
parser.add_argument('host', help='The host for s2nc to connect to')
parser.add_argument('port', type=int, help='The port for s_server to bind to')
parser.add_argument('--libcrypto', default='openssl-1.1.1', choices=S2N_LIBCRYPTO_CHOICES,
help="""The Libcrypto that s2n was built with. s2n supports different cipher suites depending on
libcrypto version. Defaults to openssl-1.1.1.""")
args = parser.parse_args()
fips_mode = False
if environ.get("S2N_TEST_IN_FIPS_MODE") is not None:
fips_mode = True
print("\nRunning s2nd in FIPS mode.")
# Retrieve the test ciphers to use based on the libcrypto version s2n was built with
test_ciphers = S2N_LIBCRYPTO_TO_TEST_CIPHERS[args.libcrypto]
host = args.host
port = args.port
local_mtu = get_local_mtu()
# Simulate common MTU 1500
subprocess.call(["sudo", "ifconfig", "lo", "mtu", "1500"])
failed = 0
print("\n\tRunning s2n dynamic record size tests\n\t")
# Set the threshold - the number of bytes transferred in low latency mode before switching to high throughput
threshold = 10000
# test that the file size of the test file is greater than the threshold (otherwise we cannot implement the test)
if file_size < threshold:
failed = 1
print ("test file: %s file size too small (less than threshold of 10KB)" % test_file)
return failed
failed += test(host, port, test_ciphers, threshold, fips_mode)
# Recover localhost MTU
subprocess.call(["sudo", "ifconfig", "lo", "mtu", str(local_mtu)])
# print_result("TLS dynamic record size test " , failed)
return failed
if __name__ == "__main__":
sys.exit(main())
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from pylons import tmpl_context as c
from datadiff.tools import assert_equal
from mock import patch
from allura.lib import helpers as h
from allura.tests import decorators as td
from allura import model as M
from alluratest.controller import TestRestApiBase
from forgetracker import model as TM
class TestTrackerApiBase(TestRestApiBase):
def setUp(self):
super(TestTrackerApiBase, self).setUp()
self.setup_with_tools()
@td.with_tool('test', 'Tickets', 'bugs',
TicketMonitoringEmail='test@localhost',
TicketMonitoringType='AllTicketChanges')
def setup_with_tools(self):
h.set_context('test', 'bugs', neighborhood='Projects')
self.tracker_globals = c.app.globals
def create_ticket(self):
return self.api_post(
'/rest/p/test/bugs/new',
wrap_args='ticket_form',
params=dict(
summary='test new ticket',
status=self.tracker_globals.open_status_names.split()[0],
labels='',
description='',
assigned_to='',
**{'custom_fields._milestone': ''})
)
class TestRestNewTicket(TestTrackerApiBase):
def test_new_ticket(self):
summary = 'test new ticket'
ticket_view = self.api_post(
'/rest/p/test/bugs/new',
wrap_args='ticket_form',
params=dict(
summary=summary,
status=self.tracker_globals.open_status_names.split()[0],
labels='foo,bar',
description='descr',
assigned_to='',
**{'custom_fields._milestone': ''}
))
json = ticket_view.json['ticket']
assert json['status'] == 'open', json
assert json['summary'] == 'test new ticket', json
assert json['reported_by'] == 'test-admin'
assert json['labels'] == ['foo', 'bar'], json
assert json['description'] == 'descr', json
assert json['private'] == False, json
def test_invalid_ticket(self):
self.app.get('/rest/p/test/bugs/2', status=404)
class TestRestUpdateTicket(TestTrackerApiBase):
def setUp(self):
super(TestRestUpdateTicket, self).setUp()
ticket_view = self.create_ticket()
self.ticket_args = ticket_view.json['ticket']
def test_update_ticket(self):
args = dict(self.ticket_args, summary='test update ticket', labels='',
assigned_to=self.ticket_args['assigned_to_id'] or '')
for bad_key in ('ticket_num', 'assigned_to_id', 'created_date',
'reported_by', 'reported_by_id', '_id', 'votes_up', 'votes_down'):
del args[bad_key]
args['private'] = str(args['private'])
ticket_view = self.api_post(
'/rest/p/test/bugs/1/save', wrap_args='ticket_form', params=h.encode_keys(args))
assert ticket_view.status_int == 200, ticket_view.showbrowser()
json = ticket_view.json['ticket']
assert int(json['ticket_num']) == 1
assert json['summary'] == 'test update ticket', json
class TestRestIndex(TestTrackerApiBase):
def setUp(self):
super(TestRestIndex, self).setUp()
self.create_ticket()
def test_ticket_index(self):
tickets = self.api_get('/rest/p/test/bugs/')
assert len(tickets.json['tickets']) == 1, tickets.json
assert (tickets.json['tickets'][0]
== dict(ticket_num=1, summary='test new ticket')), tickets.json['tickets'][0]
assert tickets.json['tracker_config'][
'options']['mount_point'] == 'bugs'
assert tickets.json['tracker_config']['options'][
'TicketMonitoringType'] == 'AllTicketChanges'
assert not tickets.json['tracker_config']['options']['EnableVoting']
assert tickets.json['tracker_config']['options'][
'TicketMonitoringEmail'] == 'test@localhost'
assert tickets.json['tracker_config'][
'options']['mount_label'] == 'Tickets'
assert tickets.json['saved_bins'][0]['sort'] == 'mod_date_dt desc'
assert tickets.json['saved_bins'][0][
'terms'] == '!status:wont-fix && !status:closed'
assert tickets.json['saved_bins'][0]['summary'] == 'Changes'
assert len(tickets.json['saved_bins'][0]) == 4
assert tickets.json['milestones'][0]['name'] == '1.0'
assert tickets.json['milestones'][1]['name'] == '2.0'
def test_ticket_index_noauth(self):
tickets = self.api_get('/rest/p/test/bugs', user='*anonymous')
assert 'TicketMonitoringEmail' not in tickets.json[
'tracker_config']['options']
# make sure it didn't get removed from the db too
ticket_config = M.AppConfig.query.get(
project_id=c.project._id, tool_name='tickets')
assert_equal(ticket_config.options.get('TicketMonitoringEmail'),
'test@localhost')
@td.with_tool('test', 'Tickets', 'dummy')
def test_move_ticket_redirect(self):
p = M.Project.query.get(shortname='test')
dummy_tracker = p.app_instance('dummy')
self.app.post(
'/p/test/bugs/1/move',
params={'tracker': str(dummy_tracker.config._id)}).follow()
ticket = self.api_get('/rest/p/test/bugs/1/')
assert_equal(ticket.request.path, '/rest/p/test/dummy/1/')
class TestRestDiscussion(TestTrackerApiBase):
def setUp(self):
super(TestRestDiscussion, self).setUp()
ticket_view = self.create_ticket()
self.ticket_args = ticket_view.json['ticket']
def test_index(self):
r = self.api_get('/rest/p/test/bugs/_discuss/')
assert len(r.json['discussion']['threads']) == 1, r.json
for t in r.json['discussion']['threads']:
r = self.api_get('/rest/p/test/bugs/_discuss/thread/%s/' %
t['_id'])
assert len(r.json['thread']['posts']) == 0, r.json
def test_post(self):
discussion = self.api_get(
'/rest/p/test/bugs/_discuss/').json['discussion']
post = self.api_post(
'/rest/p/test/bugs/_discuss/thread/%s/new' % discussion['threads'][0]['_id'],
text='This is a comment', wrap_args=None)
thread = self.api_get('/rest/p/test/bugs/_discuss/thread/%s/' %
discussion['threads'][0]['_id'])
assert len(thread.json['thread']['posts']) == 1, thread.json
assert post.json['post']['text'] == 'This is a comment', post.json
reply = self.api_post(
'/rest/p/test/bugs/_discuss/thread/%s/%s/reply' % (thread.json['thread']
['_id'], post.json['post']['slug']),
text='This is a reply', wrap_args=None)
assert reply.json['post']['text'] == 'This is a reply', reply.json
thread = self.api_get('/rest/p/test/bugs/_discuss/thread/%s/' %
discussion['threads'][0]['_id'])
assert len(thread.json['thread']['posts']) == 2, thread.json
class TestRestSearch(TestTrackerApiBase):
@patch('forgetracker.model.Ticket.paged_search')
def test_no_criteria(self, paged_search):
paged_search.return_value = dict(tickets=[
TM.Ticket(ticket_num=5, summary='our test ticket'),
])
r = self.api_get('/rest/p/test/bugs/search')
assert_equal(r.status_int, 200)
assert_equal(r.json, {'tickets': [
{'summary': 'our test ticket', 'ticket_num': 5},
]})
@patch('forgetracker.model.Ticket.paged_search')
def test_some_criteria(self, paged_search):
q = 'labels:testing && status:open'
paged_search.return_value = dict(tickets=[
TM.Ticket(ticket_num=5, summary='our test ticket'),
],
sort='status',
limit=2,
count=1,
page=0,
q=q,
)
r = self.api_get('/rest/p/test/bugs/search',
q=q, sort='status', limit='2')
assert_equal(r.status_int, 200)
assert_equal(r.json, {'limit': 2, 'q': q, 'sort': 'status', 'count': 1,
'page': 0, 'tickets': [
{'summary': 'our test ticket',
'ticket_num': 5},
]
})
|
|
#!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_vpn_ipsec_phase1_interface
short_description: Configure VPN remote gateway in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify vpn_ipsec feature and phase1_interface category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.5
version_added: "2.8"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate IP address.
type: str
required: false
username:
description:
- FortiOS or FortiGate username.
type: str
required: false
password:
description:
- FortiOS or FortiGate password.
type: str
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS protocol.
type: bool
default: true
ssl_verify:
description:
- Ensures FortiGate certificate must be verified by a proper CA.
type: bool
default: true
version_added: 2.9
state:
description:
- Indicates whether to create or remove the object.
This attribute was present already in previous version in a deeper level.
It has been moved out to this outer level.
type: str
required: false
choices:
- present
- absent
version_added: 2.9
vpn_ipsec_phase1_interface:
description:
- Configure VPN remote gateway.
default: null
type: dict
suboptions:
state:
description:
- B(Deprecated)
- Starting with Ansible 2.9 we recommend using the top-level 'state' parameter.
- HORIZONTALLINE
- Indicates whether to create or remove the object.
type: str
required: false
choices:
- present
- absent
acct_verify:
description:
- Enable/disable verification of RADIUS accounting record.
type: str
choices:
- enable
- disable
add_gw_route:
description:
- Enable/disable automatically add a route to the remote gateway.
type: str
choices:
- enable
- disable
add_route:
description:
- Enable/disable control addition of a route to peer destination selector.
type: str
choices:
- disable
- enable
assign_ip:
description:
- Enable/disable assignment of IP to IPsec interface via configuration method.
type: str
choices:
- disable
- enable
assign_ip_from:
description:
- Method by which the IP address will be assigned.
type: str
choices:
- range
- usrgrp
- dhcp
- name
authmethod:
description:
- Authentication method.
type: str
choices:
- psk
- signature
authmethod_remote:
description:
- Authentication method (remote side).
type: str
choices:
- psk
- signature
authpasswd:
description:
- XAuth password (max 35 characters).
type: str
authusr:
description:
- XAuth user name.
type: str
authusrgrp:
description:
- Authentication user group. Source user.group.name.
type: str
auto_discovery_forwarder:
description:
- Enable/disable forwarding auto-discovery short-cut messages.
type: str
choices:
- enable
- disable
auto_discovery_psk:
description:
- Enable/disable use of pre-shared secrets for authentication of auto-discovery tunnels.
type: str
choices:
- enable
- disable
auto_discovery_receiver:
description:
- Enable/disable accepting auto-discovery short-cut messages.
type: str
choices:
- enable
- disable
auto_discovery_sender:
description:
- Enable/disable sending auto-discovery short-cut messages.
type: str
choices:
- enable
- disable
auto_negotiate:
description:
- Enable/disable automatic initiation of IKE SA negotiation.
type: str
choices:
- enable
- disable
backup_gateway:
description:
- Instruct unity clients about the backup gateway address(es).
type: list
suboptions:
address:
description:
- Address of backup gateway.
required: true
type: str
banner:
description:
- Message that unity client should display after connecting.
type: str
cert_id_validation:
description:
- Enable/disable cross validation of peer ID and the identity in the peer's certificate as specified in RFC 4945.
type: str
choices:
- enable
- disable
certificate:
description:
- The names of up to 4 signed personal certificates.
type: list
suboptions:
name:
description:
- Certificate name. Source vpn.certificate.local.name.
required: true
type: str
childless_ike:
description:
- Enable/disable childless IKEv2 initiation (RFC 6023).
type: str
choices:
- enable
- disable
client_auto_negotiate:
description:
- Enable/disable allowing the VPN client to bring up the tunnel when there is no traffic.
type: str
choices:
- disable
- enable
client_keep_alive:
description:
- Enable/disable allowing the VPN client to keep the tunnel up when there is no traffic.
type: str
choices:
- disable
- enable
comments:
description:
- Comment.
type: str
default_gw:
description:
- IPv4 address of default route gateway to use for traffic exiting the interface.
type: str
default_gw_priority:
description:
- Priority for default gateway route. A higher priority number signifies a less preferred route.
type: int
dhgrp:
description:
- DH group.
type: str
choices:
- 1
- 2
- 5
- 14
- 15
- 16
- 17
- 18
- 19
- 20
- 21
- 27
- 28
- 29
- 30
- 31
digital_signature_auth:
description:
- Enable/disable IKEv2 Digital Signature Authentication (RFC 7427).
type: str
choices:
- enable
- disable
distance:
description:
- Distance for routes added by IKE (1 - 255).
type: int
dns_mode:
description:
- DNS server mode.
type: str
choices:
- manual
- auto
domain:
description:
- Instruct unity clients about the default DNS domain.
type: str
dpd:
description:
- Dead Peer Detection mode.
type: str
choices:
- disable
- on-idle
- on-demand
dpd_retrycount:
description:
- Number of DPD retry attempts.
type: int
dpd_retryinterval:
description:
- DPD retry interval.
type: str
eap:
description:
- Enable/disable IKEv2 EAP authentication.
type: str
choices:
- enable
- disable
eap_identity:
description:
- IKEv2 EAP peer identity type.
type: str
choices:
- use-id-payload
- send-request
encap_local_gw4:
description:
- Local IPv4 address of GRE/VXLAN tunnel.
type: str
encap_local_gw6:
description:
- Local IPv6 address of GRE/VXLAN tunnel.
type: str
encap_remote_gw4:
description:
- Remote IPv4 address of GRE/VXLAN tunnel.
type: str
encap_remote_gw6:
description:
- Remote IPv6 address of GRE/VXLAN tunnel.
type: str
encapsulation:
description:
- Enable/disable GRE/VXLAN encapsulation.
type: str
choices:
- none
- gre
- vxlan
encapsulation_address:
description:
- Source for GRE/VXLAN tunnel address.
type: str
choices:
- ike
- ipv4
- ipv6
enforce_unique_id:
description:
- Enable/disable peer ID uniqueness check.
type: str
choices:
- disable
- keep-new
- keep-old
exchange_interface_ip:
description:
- Enable/disable exchange of IPsec interface IP address.
type: str
choices:
- enable
- disable
exchange_ip_addr4:
description:
- IPv4 address to exchange with peers.
type: str
exchange_ip_addr6:
description:
- IPv6 address to exchange with peers
type: str
forticlient_enforcement:
description:
- Enable/disable FortiClient enforcement.
type: str
choices:
- enable
- disable
fragmentation:
description:
- Enable/disable fragment IKE message on re-transmission.
type: str
choices:
- enable
- disable
fragmentation_mtu:
description:
- IKE fragmentation MTU (500 - 16000).
type: int
group_authentication:
description:
- Enable/disable IKEv2 IDi group authentication.
type: str
choices:
- enable
- disable
group_authentication_secret:
description:
- Password for IKEv2 IDi group authentication. (ASCII string or hexadecimal indicated by a leading 0x.)
type: str
ha_sync_esp_seqno:
description:
- Enable/disable sequence number jump ahead for IPsec HA.
type: str
choices:
- enable
- disable
idle_timeout:
description:
- Enable/disable IPsec tunnel idle timeout.
type: str
choices:
- enable
- disable
idle_timeoutinterval:
description:
- IPsec tunnel idle timeout in minutes (5 - 43200).
type: int
ike_version:
description:
- IKE protocol version.
type: str
choices:
- 1
- 2
include_local_lan:
description:
- Enable/disable allow local LAN access on unity clients.
type: str
choices:
- disable
- enable
interface:
description:
- Local physical, aggregate, or VLAN outgoing interface. Source system.interface.name.
type: str
ip_version:
description:
- IP version to use for VPN interface.
type: str
choices:
- 4
- 6
ipv4_dns_server1:
description:
- IPv4 DNS server 1.
type: str
ipv4_dns_server2:
description:
- IPv4 DNS server 2.
type: str
ipv4_dns_server3:
description:
- IPv4 DNS server 3.
type: str
ipv4_end_ip:
description:
- End of IPv4 range.
type: str
ipv4_exclude_range:
description:
- Configuration Method IPv4 exclude ranges.
type: list
suboptions:
end_ip:
description:
- End of IPv4 exclusive range.
type: str
id:
description:
- ID.
required: true
type: int
start_ip:
description:
- Start of IPv4 exclusive range.
type: str
ipv4_name:
description:
- IPv4 address name. Source firewall.address.name firewall.addrgrp.name.
type: str
ipv4_netmask:
description:
- IPv4 Netmask.
type: str
ipv4_split_exclude:
description:
- IPv4 subnets that should not be sent over the IPsec tunnel. Source firewall.address.name firewall.addrgrp.name.
type: str
ipv4_split_include:
description:
- IPv4 split-include subnets. Source firewall.address.name firewall.addrgrp.name.
type: str
ipv4_start_ip:
description:
- Start of IPv4 range.
type: str
ipv4_wins_server1:
description:
- WINS server 1.
type: str
ipv4_wins_server2:
description:
- WINS server 2.
type: str
ipv6_dns_server1:
description:
- IPv6 DNS server 1.
type: str
ipv6_dns_server2:
description:
- IPv6 DNS server 2.
type: str
ipv6_dns_server3:
description:
- IPv6 DNS server 3.
type: str
ipv6_end_ip:
description:
- End of IPv6 range.
type: str
ipv6_exclude_range:
description:
- Configuration method IPv6 exclude ranges.
type: list
suboptions:
end_ip:
description:
- End of IPv6 exclusive range.
type: str
id:
description:
- ID.
required: true
type: int
start_ip:
description:
- Start of IPv6 exclusive range.
type: str
ipv6_name:
description:
- IPv6 address name. Source firewall.address6.name firewall.addrgrp6.name.
type: str
ipv6_prefix:
description:
- IPv6 prefix.
type: int
ipv6_split_exclude:
description:
- IPv6 subnets that should not be sent over the IPsec tunnel. Source firewall.address6.name firewall.addrgrp6.name.
type: str
ipv6_split_include:
description:
- IPv6 split-include subnets. Source firewall.address6.name firewall.addrgrp6.name.
type: str
ipv6_start_ip:
description:
- Start of IPv6 range.
type: str
keepalive:
description:
- NAT-T keep alive interval.
type: int
keylife:
description:
- Time to wait in seconds before phase 1 encryption key expires.
type: int
local_gw:
description:
- IPv4 address of the local gateway's external interface.
type: str
local_gw6:
description:
- IPv6 address of the local gateway's external interface.
type: str
localid:
description:
- Local ID.
type: str
localid_type:
description:
- Local ID type.
type: str
choices:
- auto
- fqdn
- user-fqdn
- keyid
- address
- asn1dn
mesh_selector_type:
description:
- Add selectors containing subsets of the configuration depending on traffic.
type: str
choices:
- disable
- subnet
- host
mode:
description:
- The ID protection mode used to establish a secure channel.
type: str
choices:
- aggressive
- main
mode_cfg:
description:
- Enable/disable configuration method.
type: str
choices:
- disable
- enable
monitor:
description:
- IPsec interface as backup for primary interface. Source vpn.ipsec.phase1-interface.name.
type: str
monitor_hold_down_delay:
description:
- Time to wait in seconds before recovery once primary re-establishes.
type: int
monitor_hold_down_time:
description:
- Time of day at which to fail back to primary after it re-establishes.
type: str
monitor_hold_down_type:
description:
- Recovery time method when primary interface re-establishes.
type: str
choices:
- immediate
- delay
- time
monitor_hold_down_weekday:
description:
- Day of the week to recover once primary re-establishes.
type: str
choices:
- everyday
- sunday
- monday
- tuesday
- wednesday
- thursday
- friday
- saturday
name:
description:
- IPsec remote gateway name.
required: true
type: str
nattraversal:
description:
- Enable/disable NAT traversal.
type: str
choices:
- enable
- disable
- forced
negotiate_timeout:
description:
- IKE SA negotiation timeout in seconds (1 - 300).
type: int
net_device:
description:
- Enable/disable kernel device creation for dialup instances.
type: str
choices:
- enable
- disable
passive_mode:
description:
- Enable/disable IPsec passive mode for static tunnels.
type: str
choices:
- enable
- disable
peer:
description:
- Accept this peer certificate. Source user.peer.name.
type: str
peergrp:
description:
- Accept this peer certificate group. Source user.peergrp.name.
type: str
peerid:
description:
- Accept this peer identity.
type: str
peertype:
description:
- Accept this peer type.
type: str
choices:
- any
- one
- dialup
- peer
- peergrp
ppk:
description:
- Enable/disable IKEv2 Postquantum Preshared Key (PPK).
type: str
choices:
- disable
- allow
- require
ppk_identity:
description:
- IKEv2 Postquantum Preshared Key Identity.
type: str
ppk_secret:
description:
- IKEv2 Postquantum Preshared Key (ASCII string or hexadecimal encoded with a leading 0x).
type: str
priority:
description:
- Priority for routes added by IKE (0 - 4294967295).
type: int
proposal:
description:
- Phase1 proposal.
type: str
choices:
- des-md5
- des-sha1
- des-sha256
- des-sha384
- des-sha512
psksecret:
description:
- Pre-shared secret for PSK authentication (ASCII string or hexadecimal encoded with a leading 0x).
type: str
psksecret_remote:
description:
- Pre-shared secret for remote side PSK authentication (ASCII string or hexadecimal encoded with a leading 0x).
type: str
reauth:
description:
- Enable/disable re-authentication upon IKE SA lifetime expiration.
type: str
choices:
- disable
- enable
rekey:
description:
- Enable/disable phase1 rekey.
type: str
choices:
- enable
- disable
remote_gw:
description:
- IPv4 address of the remote gateway's external interface.
type: str
remote_gw6:
description:
- IPv6 address of the remote gateway's external interface.
type: str
remotegw_ddns:
description:
- Domain name of remote gateway (eg. name.DDNS.com).
type: str
rsa_signature_format:
description:
- Digital Signature Authentication RSA signature format.
type: str
choices:
- pkcs1
- pss
save_password:
description:
- Enable/disable saving XAuth username and password on VPN clients.
type: str
choices:
- disable
- enable
send_cert_chain:
description:
- Enable/disable sending certificate chain.
type: str
choices:
- enable
- disable
signature_hash_alg:
description:
- Digital Signature Authentication hash algorithms.
type: str
choices:
- sha1
- sha2-256
- sha2-384
- sha2-512
split_include_service:
description:
- Split-include services. Source firewall.service.group.name firewall.service.custom.name.
type: str
suite_b:
description:
- Use Suite-B.
type: str
choices:
- disable
- suite-b-gcm-128
- suite-b-gcm-256
tunnel_search:
description:
- Tunnel search method for when the interface is shared.
type: str
choices:
- selectors
- nexthop
type:
description:
- Remote gateway type.
type: str
choices:
- static
- dynamic
- ddns
unity_support:
description:
- Enable/disable support for Cisco UNITY Configuration Method extensions.
type: str
choices:
- disable
- enable
usrgrp:
description:
- User group name for dialup peers. Source user.group.name.
type: str
vni:
description:
- VNI of VXLAN tunnel.
type: int
wizard_type:
description:
- GUI VPN Wizard Type.
type: str
choices:
- custom
- dialup-forticlient
- dialup-ios
- dialup-android
- dialup-windows
- dialup-cisco
- static-fortigate
- dialup-fortigate
- static-cisco
- dialup-cisco-fw
xauthtype:
description:
- XAuth type.
type: str
choices:
- disable
- client
- pap
- chap
- auto
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
ssl_verify: "False"
tasks:
- name: Configure VPN remote gateway.
fortios_vpn_ipsec_phase1_interface:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
state: "present"
vpn_ipsec_phase1_interface:
acct_verify: "enable"
add_gw_route: "enable"
add_route: "disable"
assign_ip: "disable"
assign_ip_from: "range"
authmethod: "psk"
authmethod_remote: "psk"
authpasswd: "<your_own_value>"
authusr: "<your_own_value>"
authusrgrp: "<your_own_value> (source user.group.name)"
auto_discovery_forwarder: "enable"
auto_discovery_psk: "enable"
auto_discovery_receiver: "enable"
auto_discovery_sender: "enable"
auto_negotiate: "enable"
backup_gateway:
-
address: "<your_own_value>"
banner: "<your_own_value>"
cert_id_validation: "enable"
certificate:
-
name: "default_name_23 (source vpn.certificate.local.name)"
childless_ike: "enable"
client_auto_negotiate: "disable"
client_keep_alive: "disable"
comments: "<your_own_value>"
default_gw: "<your_own_value>"
default_gw_priority: "29"
dhgrp: "1"
digital_signature_auth: "enable"
distance: "32"
dns_mode: "manual"
domain: "<your_own_value>"
dpd: "disable"
dpd_retrycount: "36"
dpd_retryinterval: "<your_own_value>"
eap: "enable"
eap_identity: "use-id-payload"
encap_local_gw4: "<your_own_value>"
encap_local_gw6: "<your_own_value>"
encap_remote_gw4: "<your_own_value>"
encap_remote_gw6: "<your_own_value>"
encapsulation: "none"
encapsulation_address: "ike"
enforce_unique_id: "disable"
exchange_interface_ip: "enable"
exchange_ip_addr4: "<your_own_value>"
exchange_ip_addr6: "<your_own_value>"
forticlient_enforcement: "enable"
fragmentation: "enable"
fragmentation_mtu: "52"
group_authentication: "enable"
group_authentication_secret: "<your_own_value>"
ha_sync_esp_seqno: "enable"
idle_timeout: "enable"
idle_timeoutinterval: "57"
ike_version: "1"
include_local_lan: "disable"
interface: "<your_own_value> (source system.interface.name)"
ip_version: "4"
ipv4_dns_server1: "<your_own_value>"
ipv4_dns_server2: "<your_own_value>"
ipv4_dns_server3: "<your_own_value>"
ipv4_end_ip: "<your_own_value>"
ipv4_exclude_range:
-
end_ip: "<your_own_value>"
id: "68"
start_ip: "<your_own_value>"
ipv4_name: "<your_own_value> (source firewall.address.name firewall.addrgrp.name)"
ipv4_netmask: "<your_own_value>"
ipv4_split_exclude: "<your_own_value> (source firewall.address.name firewall.addrgrp.name)"
ipv4_split_include: "<your_own_value> (source firewall.address.name firewall.addrgrp.name)"
ipv4_start_ip: "<your_own_value>"
ipv4_wins_server1: "<your_own_value>"
ipv4_wins_server2: "<your_own_value>"
ipv6_dns_server1: "<your_own_value>"
ipv6_dns_server2: "<your_own_value>"
ipv6_dns_server3: "<your_own_value>"
ipv6_end_ip: "<your_own_value>"
ipv6_exclude_range:
-
end_ip: "<your_own_value>"
id: "83"
start_ip: "<your_own_value>"
ipv6_name: "<your_own_value> (source firewall.address6.name firewall.addrgrp6.name)"
ipv6_prefix: "86"
ipv6_split_exclude: "<your_own_value> (source firewall.address6.name firewall.addrgrp6.name)"
ipv6_split_include: "<your_own_value> (source firewall.address6.name firewall.addrgrp6.name)"
ipv6_start_ip: "<your_own_value>"
keepalive: "90"
keylife: "91"
local_gw: "<your_own_value>"
local_gw6: "<your_own_value>"
localid: "<your_own_value>"
localid_type: "auto"
mesh_selector_type: "disable"
mode: "aggressive"
mode_cfg: "disable"
monitor: "<your_own_value> (source vpn.ipsec.phase1-interface.name)"
monitor_hold_down_delay: "100"
monitor_hold_down_time: "<your_own_value>"
monitor_hold_down_type: "immediate"
monitor_hold_down_weekday: "everyday"
name: "default_name_104"
nattraversal: "enable"
negotiate_timeout: "106"
net_device: "enable"
passive_mode: "enable"
peer: "<your_own_value> (source user.peer.name)"
peergrp: "<your_own_value> (source user.peergrp.name)"
peerid: "<your_own_value>"
peertype: "any"
ppk: "disable"
ppk_identity: "<your_own_value>"
ppk_secret: "<your_own_value>"
priority: "116"
proposal: "des-md5"
psksecret: "<your_own_value>"
psksecret_remote: "<your_own_value>"
reauth: "disable"
rekey: "enable"
remote_gw: "<your_own_value>"
remote_gw6: "<your_own_value>"
remotegw_ddns: "<your_own_value>"
rsa_signature_format: "pkcs1"
save_password: "disable"
send_cert_chain: "enable"
signature_hash_alg: "sha1"
split_include_service: "<your_own_value> (source firewall.service.group.name firewall.service.custom.name)"
suite_b: "disable"
tunnel_search: "selectors"
type: "static"
unity_support: "disable"
usrgrp: "<your_own_value> (source user.group.name)"
vni: "135"
wizard_type: "custom"
xauthtype: "disable"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG
def login(data, fos):
host = data['host']
username = data['username']
password = data['password']
ssl_verify = data['ssl_verify']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password, verify=ssl_verify)
def filter_vpn_ipsec_phase1_interface_data(json):
option_list = ['acct_verify', 'add_gw_route', 'add_route',
'assign_ip', 'assign_ip_from', 'authmethod',
'authmethod_remote', 'authpasswd', 'authusr',
'authusrgrp', 'auto_discovery_forwarder', 'auto_discovery_psk',
'auto_discovery_receiver', 'auto_discovery_sender', 'auto_negotiate',
'backup_gateway', 'banner', 'cert_id_validation',
'certificate', 'childless_ike', 'client_auto_negotiate',
'client_keep_alive', 'comments', 'default_gw',
'default_gw_priority', 'dhgrp', 'digital_signature_auth',
'distance', 'dns_mode', 'domain',
'dpd', 'dpd_retrycount', 'dpd_retryinterval',
'eap', 'eap_identity', 'encap_local_gw4',
'encap_local_gw6', 'encap_remote_gw4', 'encap_remote_gw6',
'encapsulation', 'encapsulation_address', 'enforce_unique_id',
'exchange_interface_ip', 'exchange_ip_addr4', 'exchange_ip_addr6',
'forticlient_enforcement', 'fragmentation', 'fragmentation_mtu',
'group_authentication', 'group_authentication_secret', 'ha_sync_esp_seqno',
'idle_timeout', 'idle_timeoutinterval', 'ike_version',
'include_local_lan', 'interface', 'ip_version',
'ipv4_dns_server1', 'ipv4_dns_server2', 'ipv4_dns_server3',
'ipv4_end_ip', 'ipv4_exclude_range', 'ipv4_name',
'ipv4_netmask', 'ipv4_split_exclude', 'ipv4_split_include',
'ipv4_start_ip', 'ipv4_wins_server1', 'ipv4_wins_server2',
'ipv6_dns_server1', 'ipv6_dns_server2', 'ipv6_dns_server3',
'ipv6_end_ip', 'ipv6_exclude_range', 'ipv6_name',
'ipv6_prefix', 'ipv6_split_exclude', 'ipv6_split_include',
'ipv6_start_ip', 'keepalive', 'keylife',
'local_gw', 'local_gw6', 'localid',
'localid_type', 'mesh_selector_type', 'mode',
'mode_cfg', 'monitor', 'monitor_hold_down_delay',
'monitor_hold_down_time', 'monitor_hold_down_type', 'monitor_hold_down_weekday',
'name', 'nattraversal', 'negotiate_timeout',
'net_device', 'passive_mode', 'peer',
'peergrp', 'peerid', 'peertype',
'ppk', 'ppk_identity', 'ppk_secret',
'priority', 'proposal', 'psksecret',
'psksecret_remote', 'reauth', 'rekey',
'remote_gw', 'remote_gw6', 'remotegw_ddns',
'rsa_signature_format', 'save_password', 'send_cert_chain',
'signature_hash_alg', 'split_include_service', 'suite_b',
'tunnel_search', 'type', 'unity_support',
'usrgrp', 'vni', 'wizard_type',
'xauthtype']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for elem in data:
elem = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def vpn_ipsec_phase1_interface(data, fos):
vdom = data['vdom']
if 'state' in data and data['state']:
state = data['state']
elif 'state' in data['vpn_ipsec_phase1_interface'] and data['vpn_ipsec_phase1_interface']:
state = data['vpn_ipsec_phase1_interface']['state']
else:
state = True
vpn_ipsec_phase1_interface_data = data['vpn_ipsec_phase1_interface']
filtered_data = underscore_to_hyphen(filter_vpn_ipsec_phase1_interface_data(vpn_ipsec_phase1_interface_data))
if state == "present":
return fos.set('vpn.ipsec',
'phase1-interface',
data=filtered_data,
vdom=vdom)
elif state == "absent":
return fos.delete('vpn.ipsec',
'phase1-interface',
mkey=filtered_data['name'],
vdom=vdom)
def is_successful_status(status):
return status['status'] == "success" or \
status['http_method'] == "DELETE" and status['http_status'] == 404
def fortios_vpn_ipsec(data, fos):
if data['vpn_ipsec_phase1_interface']:
resp = vpn_ipsec_phase1_interface(data, fos)
return not is_successful_status(resp), \
resp['status'] == "success", \
resp
def main():
fields = {
"host": {"required": False, "type": "str"},
"username": {"required": False, "type": "str"},
"password": {"required": False, "type": "str", "default": "", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"ssl_verify": {"required": False, "type": "bool", "default": True},
"state": {"required": False, "type": "str",
"choices": ["present", "absent"]},
"vpn_ipsec_phase1_interface": {
"required": False, "type": "dict", "default": None,
"options": {
"state": {"required": False, "type": "str",
"choices": ["present", "absent"]},
"acct_verify": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"add_gw_route": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"add_route": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"assign_ip": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"assign_ip_from": {"required": False, "type": "str",
"choices": ["range", "usrgrp", "dhcp",
"name"]},
"authmethod": {"required": False, "type": "str",
"choices": ["psk", "signature"]},
"authmethod_remote": {"required": False, "type": "str",
"choices": ["psk", "signature"]},
"authpasswd": {"required": False, "type": "str"},
"authusr": {"required": False, "type": "str"},
"authusrgrp": {"required": False, "type": "str"},
"auto_discovery_forwarder": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"auto_discovery_psk": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"auto_discovery_receiver": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"auto_discovery_sender": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"auto_negotiate": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"backup_gateway": {"required": False, "type": "list",
"options": {
"address": {"required": True, "type": "str"}
}},
"banner": {"required": False, "type": "str"},
"cert_id_validation": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"certificate": {"required": False, "type": "list",
"options": {
"name": {"required": True, "type": "str"}
}},
"childless_ike": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"client_auto_negotiate": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"client_keep_alive": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"comments": {"required": False, "type": "str"},
"default_gw": {"required": False, "type": "str"},
"default_gw_priority": {"required": False, "type": "int"},
"dhgrp": {"required": False, "type": "str",
"choices": ["1", "2", "5",
"14", "15", "16",
"17", "18", "19",
"20", "21", "27",
"28", "29", "30",
"31"]},
"digital_signature_auth": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"distance": {"required": False, "type": "int"},
"dns_mode": {"required": False, "type": "str",
"choices": ["manual", "auto"]},
"domain": {"required": False, "type": "str"},
"dpd": {"required": False, "type": "str",
"choices": ["disable", "on-idle", "on-demand"]},
"dpd_retrycount": {"required": False, "type": "int"},
"dpd_retryinterval": {"required": False, "type": "str"},
"eap": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"eap_identity": {"required": False, "type": "str",
"choices": ["use-id-payload", "send-request"]},
"encap_local_gw4": {"required": False, "type": "str"},
"encap_local_gw6": {"required": False, "type": "str"},
"encap_remote_gw4": {"required": False, "type": "str"},
"encap_remote_gw6": {"required": False, "type": "str"},
"encapsulation": {"required": False, "type": "str",
"choices": ["none", "gre", "vxlan"]},
"encapsulation_address": {"required": False, "type": "str",
"choices": ["ike", "ipv4", "ipv6"]},
"enforce_unique_id": {"required": False, "type": "str",
"choices": ["disable", "keep-new", "keep-old"]},
"exchange_interface_ip": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"exchange_ip_addr4": {"required": False, "type": "str"},
"exchange_ip_addr6": {"required": False, "type": "str"},
"forticlient_enforcement": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"fragmentation": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"fragmentation_mtu": {"required": False, "type": "int"},
"group_authentication": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"group_authentication_secret": {"required": False, "type": "str"},
"ha_sync_esp_seqno": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"idle_timeout": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"idle_timeoutinterval": {"required": False, "type": "int"},
"ike_version": {"required": False, "type": "str",
"choices": ["1", "2"]},
"include_local_lan": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"interface": {"required": False, "type": "str"},
"ip_version": {"required": False, "type": "str",
"choices": ["4", "6"]},
"ipv4_dns_server1": {"required": False, "type": "str"},
"ipv4_dns_server2": {"required": False, "type": "str"},
"ipv4_dns_server3": {"required": False, "type": "str"},
"ipv4_end_ip": {"required": False, "type": "str"},
"ipv4_exclude_range": {"required": False, "type": "list",
"options": {
"end_ip": {"required": False, "type": "str"},
"id": {"required": True, "type": "int"},
"start_ip": {"required": False, "type": "str"}
}},
"ipv4_name": {"required": False, "type": "str"},
"ipv4_netmask": {"required": False, "type": "str"},
"ipv4_split_exclude": {"required": False, "type": "str"},
"ipv4_split_include": {"required": False, "type": "str"},
"ipv4_start_ip": {"required": False, "type": "str"},
"ipv4_wins_server1": {"required": False, "type": "str"},
"ipv4_wins_server2": {"required": False, "type": "str"},
"ipv6_dns_server1": {"required": False, "type": "str"},
"ipv6_dns_server2": {"required": False, "type": "str"},
"ipv6_dns_server3": {"required": False, "type": "str"},
"ipv6_end_ip": {"required": False, "type": "str"},
"ipv6_exclude_range": {"required": False, "type": "list",
"options": {
"end_ip": {"required": False, "type": "str"},
"id": {"required": True, "type": "int"},
"start_ip": {"required": False, "type": "str"}
}},
"ipv6_name": {"required": False, "type": "str"},
"ipv6_prefix": {"required": False, "type": "int"},
"ipv6_split_exclude": {"required": False, "type": "str"},
"ipv6_split_include": {"required": False, "type": "str"},
"ipv6_start_ip": {"required": False, "type": "str"},
"keepalive": {"required": False, "type": "int"},
"keylife": {"required": False, "type": "int"},
"local_gw": {"required": False, "type": "str"},
"local_gw6": {"required": False, "type": "str"},
"localid": {"required": False, "type": "str"},
"localid_type": {"required": False, "type": "str",
"choices": ["auto", "fqdn", "user-fqdn",
"keyid", "address", "asn1dn"]},
"mesh_selector_type": {"required": False, "type": "str",
"choices": ["disable", "subnet", "host"]},
"mode": {"required": False, "type": "str",
"choices": ["aggressive", "main"]},
"mode_cfg": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"monitor": {"required": False, "type": "str"},
"monitor_hold_down_delay": {"required": False, "type": "int"},
"monitor_hold_down_time": {"required": False, "type": "str"},
"monitor_hold_down_type": {"required": False, "type": "str",
"choices": ["immediate", "delay", "time"]},
"monitor_hold_down_weekday": {"required": False, "type": "str",
"choices": ["everyday", "sunday", "monday",
"tuesday", "wednesday", "thursday",
"friday", "saturday"]},
"name": {"required": True, "type": "str"},
"nattraversal": {"required": False, "type": "str",
"choices": ["enable", "disable", "forced"]},
"negotiate_timeout": {"required": False, "type": "int"},
"net_device": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"passive_mode": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"peer": {"required": False, "type": "str"},
"peergrp": {"required": False, "type": "str"},
"peerid": {"required": False, "type": "str"},
"peertype": {"required": False, "type": "str",
"choices": ["any", "one", "dialup",
"peer", "peergrp"]},
"ppk": {"required": False, "type": "str",
"choices": ["disable", "allow", "require"]},
"ppk_identity": {"required": False, "type": "str"},
"ppk_secret": {"required": False, "type": "str"},
"priority": {"required": False, "type": "int"},
"proposal": {"required": False, "type": "str",
"choices": ["des-md5", "des-sha1", "des-sha256",
"des-sha384", "des-sha512"]},
"psksecret": {"required": False, "type": "str"},
"psksecret_remote": {"required": False, "type": "str"},
"reauth": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"rekey": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"remote_gw": {"required": False, "type": "str"},
"remote_gw6": {"required": False, "type": "str"},
"remotegw_ddns": {"required": False, "type": "str"},
"rsa_signature_format": {"required": False, "type": "str",
"choices": ["pkcs1", "pss"]},
"save_password": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"send_cert_chain": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"signature_hash_alg": {"required": False, "type": "str",
"choices": ["sha1", "sha2-256", "sha2-384",
"sha2-512"]},
"split_include_service": {"required": False, "type": "str"},
"suite_b": {"required": False, "type": "str",
"choices": ["disable", "suite-b-gcm-128", "suite-b-gcm-256"]},
"tunnel_search": {"required": False, "type": "str",
"choices": ["selectors", "nexthop"]},
"type": {"required": False, "type": "str",
"choices": ["static", "dynamic", "ddns"]},
"unity_support": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"usrgrp": {"required": False, "type": "str"},
"vni": {"required": False, "type": "int"},
"wizard_type": {"required": False, "type": "str",
"choices": ["custom", "dialup-forticlient", "dialup-ios",
"dialup-android", "dialup-windows", "dialup-cisco",
"static-fortigate", "dialup-fortigate", "static-cisco",
"dialup-cisco-fw"]},
"xauthtype": {"required": False, "type": "str",
"choices": ["disable", "client", "pap",
"chap", "auto"]}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
# legacy_mode refers to using fortiosapi instead of HTTPAPI
legacy_mode = 'host' in module.params and module.params['host'] is not None and \
'username' in module.params and module.params['username'] is not None and \
'password' in module.params and module.params['password'] is not None
if not legacy_mode:
if module._socket_path:
connection = Connection(module._socket_path)
fos = FortiOSHandler(connection)
is_error, has_changed, result = fortios_vpn_ipsec(module.params, fos)
else:
module.fail_json(**FAIL_SOCKET_MSG)
else:
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
fos = FortiOSAPI()
login(module.params, fos)
is_error, has_changed, result = fortios_vpn_ipsec(module.params, fos)
fos.logout()
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
|
|
from __future__ import unicode_literals, division, absolute_import
from builtins import * # pylint: disable=unused-import, redefined-builtin
from collections import defaultdict
import logging
import re
from datetime import datetime
from sqlalchemy.orm import relationship
from sqlalchemy.orm.exc import NoResultFound
from sqlalchemy.schema import Table, ForeignKey
from sqlalchemy import Column, Integer, DateTime, Unicode, Index
from flexget import db_schema, options, plugin
from flexget.event import event
from flexget.entry import Entry
from flexget.logger import console
from flexget.options import ParseExtrasAction, get_parser
from flexget.utils.sqlalchemy_utils import table_schema, get_index_by_name
from flexget.utils.tools import strip_html
from flexget.manager import Session
log = logging.getLogger('archive')
SCHEMA_VER = 0
Base = db_schema.versioned_base('archive', SCHEMA_VER)
archive_tags_table = Table('archive_entry_tags', Base.metadata,
Column('entry_id', Integer, ForeignKey('archive_entry.id')),
Column('tag_id', Integer, ForeignKey('archive_tag.id')),
Index('ix_archive_tags', 'entry_id', 'tag_id'))
Base.register_table(archive_tags_table)
archive_sources_table = Table('archive_entry_sources', Base.metadata,
Column('entry_id', Integer, ForeignKey('archive_entry.id')),
Column('source_id', Integer, ForeignKey('archive_source.id')),
Index('ix_archive_sources', 'entry_id', 'source_id'))
Base.register_table(archive_sources_table)
class ArchiveEntry(Base):
__tablename__ = 'archive_entry'
__table_args__ = (Index('ix_archive_title_url', 'title', 'url'),)
id = Column(Integer, primary_key=True)
title = Column(Unicode, index=True)
url = Column(Unicode, index=True)
description = Column(Unicode)
task = Column('feed', Unicode) # DEPRECATED, but SQLite does not support drop column
added = Column(DateTime, index=True)
tags = relationship("ArchiveTag", secondary=archive_tags_table)
sources = relationship("ArchiveSource", secondary=archive_sources_table, backref='archive_entries')
def __init__(self):
self.added = datetime.now()
def __str__(self):
return '<ArchiveEntry(title=%s,url=%s,task=%s,added=%s)>' %\
(self.title, self.url, self.task, self.added.strftime('%Y-%m-%d %H:%M'))
class ArchiveTag(Base):
__tablename__ = 'archive_tag'
id = Column(Integer, primary_key=True)
name = Column(Unicode, index=True)
def __init__(self, name):
self.name = name
def __str__(self):
return '<ArchiveTag(id=%s,name=%s)>' % (self.id, self.name)
class ArchiveSource(Base):
__tablename__ = 'archive_source'
id = Column(Integer, primary_key=True)
name = Column(Unicode, index=True)
def __init__(self, name):
self.name = name
def __str__(self):
return '<ArchiveSource(id=%s,name=%s)>' % (self.id, self.name)
def get_source(name, session):
"""
:param string name: Source name
:param session: SQLAlchemy session
:return: ArchiveSource from db or new one
"""
try:
return session.query(ArchiveSource).filter(ArchiveSource.name == name).one()
except NoResultFound:
source = ArchiveSource(name)
return source
def get_tag(name, session):
"""
:param string name: Tag name
:param session: SQLAlchemy session
:return: ArchiveTag from db or new one
"""
try:
return session.query(ArchiveTag).filter(ArchiveTag.name == name).one()
except NoResultFound:
source = ArchiveTag(name)
return source
@db_schema.upgrade('archive')
def upgrade(ver, session):
if ver is None:
# get rid of old index
aet = table_schema('archive_entry', session)
old_index = get_index_by_name(aet, 'archive_feed_title')
if old_index is not None:
log.info('Dropping legacy index (may take a while) ...')
old_index.drop()
# create new index by title, url
new_index = get_index_by_name(Base.metadata.tables['archive_entry'], 'ix_archive_title_url')
if new_index:
log.info('Creating new index (may take a while) ...')
new_index.create(bind=session.connection())
else:
# maybe removed from the model by later migrations?
log.error('Unable to create index `ix_archive_title_url`, removed from the model?')
# TODO: nag about this ?
# This is safe as long as we don't delete the model completely :)
# But generally never use Declarative Models in migrate!
if session.query(ArchiveEntry).first():
log.critical('----------------------------------------------')
log.critical('You should run `--archive consolidate` ')
log.critical('one time when you have time, it may take hours')
log.critical('----------------------------------------------')
ver = 0
return ver
class Archive(object):
"""
Archives all new items into database where they can be later searched and injected.
Stores the entries in the state as they are at the exit phase, this way task cleanup for title
etc is stored into the database. This may however make injecting them back to the original task work
wrongly.
"""
schema = {
'oneOf': [
{'type': 'boolean'},
{'type': 'array', 'items': {'type': 'string'}}
]
}
def on_task_learn(self, task, config):
"""Add new entries into archive. We use learn phase in case the task corrects title or url via some plugins."""
if isinstance(config, bool):
tag_names = []
else:
tag_names = config
tags = []
for tag_name in set(tag_names):
tags.append(get_tag(tag_name, task.session))
count = 0
processed = []
for entry in task.entries + task.rejected + task.failed:
# I think entry can be in multiple of those lists .. not sure though!
if entry in processed:
continue
else:
processed.append(entry)
ae = task.session.query(ArchiveEntry).\
filter(ArchiveEntry.title == entry['title']).\
filter(ArchiveEntry.url == entry['url']).first()
if ae:
# add (missing) sources
source = get_source(task.name, task.session)
if source not in ae.sources:
log.debug('Adding `%s` into `%s` sources' % (task.name, ae))
ae.sources.append(source)
# add (missing) tags
for tag_name in tag_names:
atag = get_tag(tag_name, task.session)
if atag not in ae.tags:
log.debug('Adding tag %s into %s' % (tag_name, ae))
ae.tags.append(atag)
else:
# create new archive entry
ae = ArchiveEntry()
ae.title = entry['title']
ae.url = entry['url']
if 'description' in entry:
ae.description = entry['description']
ae.task = task.name
ae.sources.append(get_source(task.name, task.session))
if tags:
# note, we're extending empty list
ae.tags.extend(tags)
log.debug('Adding `%s` with %i tags to archive' % (ae, len(tags)))
task.session.add(ae)
count += 1
if count:
log.verbose('Added %i new entries to archive' % count)
def on_task_abort(self, task, config):
"""
Archive even on task abort, except if the abort has happened before session
was started.
"""
if task.session is not None:
self.on_task_learn(task, config)
class UrlrewriteArchive(object):
"""
Provides capability to rewrite urls from archive or make searches with discover.
"""
entry_map = {'title': 'title',
'url': 'url',
'description': 'description'}
schema = {'oneOf': [
{'type': 'boolean'},
{'type': 'array', 'items': {'type': 'string'}}
]}
def search(self, task, entry, config=None):
"""Search plugin API method"""
session = Session()
entries = set()
if isinstance(config, bool):
tag_names = None
else:
tag_names = config
try:
for query in entry.get('search_strings', [entry['title']]):
# clean some characters out of the string for better results
query = re.sub(r'[ \(\)]+', ' ', query).strip()
log.debug('looking for `%s` config: %s' % (query, config))
for archive_entry in search(session, query, tags=tag_names, desc=True):
log.debug('rewrite search result: %s' % archive_entry)
entry = Entry()
entry.update_using_map(self.entry_map, archive_entry, ignore_none=True)
if entry.isvalid():
entries.add(entry)
finally:
session.close()
log.debug('found %i entries' % len(entries))
return entries
def consolidate():
"""
Converts previous archive data model to new one.
"""
session = Session()
try:
log.verbose('Checking archive size ...')
count = session.query(ArchiveEntry).count()
log.verbose('Found %i items to migrate, this can be aborted with CTRL-C safely.' % count)
# consolidate old data
from progressbar import ProgressBar, Percentage, Bar, ETA
widgets = ['Process - ', ETA(), ' ', Percentage(), ' ', Bar(left='[', right=']')]
bar = ProgressBar(widgets=widgets, maxval=count).start()
# id's for duplicates
duplicates = []
for index, orig in enumerate(session.query(ArchiveEntry).yield_per(5)):
bar.update(index)
# item already processed
if orig.id in duplicates:
continue
# item already migrated
if orig.sources:
log.info('Database looks like it has already been consolidated, '
'item %s has already sources ...' % orig.title)
session.rollback()
return
# add legacy task to the sources list
orig.sources.append(get_source(orig.task, session))
# remove task, deprecated .. well, let's still keep it ..
# orig.task = None
for dupe in session.query(ArchiveEntry).\
filter(ArchiveEntry.id != orig.id).\
filter(ArchiveEntry.title == orig.title).\
filter(ArchiveEntry.url == orig.url).all():
orig.sources.append(get_source(dupe.task, session))
duplicates.append(dupe.id)
if duplicates:
log.info('Consolidated %i items, removing duplicates ...' % len(duplicates))
for id in duplicates:
session.query(ArchiveEntry).filter(ArchiveEntry.id == id).delete()
session.commit()
log.info('Completed! This does NOT need to be ran again.')
except KeyboardInterrupt:
session.rollback()
log.critical('Aborted, no changes saved')
finally:
session.close()
def tag_source(source_name, tag_names=None):
"""
Tags all archived entries within a source with supplied tags
:param string source_name: Source name
:param list tag_names: List of tag names to add
"""
if not tag_names or tag_names is None:
return
session = Session()
try:
# check that source exists
source = session.query(ArchiveSource).filter(ArchiveSource.name == source_name).first()
if not source:
log.critical('Source `%s` does not exists' % source_name)
srcs = ', '.join([s.name for s in session.query(ArchiveSource).order_by(ArchiveSource.name)])
if srcs:
log.info('Known sources: %s' % srcs)
return
# construct tags list
tags = []
for tag_name in tag_names:
tags.append(get_tag(tag_name, session))
# tag 'em
log.verbose('Please wait while adding tags %s ...' % (', '.join(tag_names)))
for a in session.query(ArchiveEntry).\
filter(ArchiveEntry.sources.any(name=source_name)).yield_per(5):
a.tags.extend(tags)
finally:
session.commit()
session.close()
# API function, was also used from webui .. needs to be rethinked
def search(session, text, tags=None, sources=None, desc=False):
"""
Search from the archive.
:param string text: Search text, spaces and dots are tried to be ignored.
:param Session session: SQLAlchemy session, should not be closed while iterating results.
:param list tags: Optional list of acceptable tags
:param list sources: Optional list of acceptable sources
:param bool desc: Sort results descending
:return: ArchiveEntries responding to query
"""
keyword = str(text).replace(' ', '%').replace('.', '%')
# clean the text from any unwanted regexp, convert spaces and keep dots as dots
normalized_re = re.escape(text.replace('.', ' ')).replace('\\ ', ' ').replace(' ', '.')
find_re = re.compile(normalized_re, re.IGNORECASE)
query = session.query(ArchiveEntry).filter(ArchiveEntry.title.like('%' + keyword + '%'))
if tags:
query = query.filter(ArchiveEntry.tags.any(ArchiveTag.name.in_(tags)))
if sources:
query = query.filter(ArchiveEntry.sources.any(ArchiveSource.name.in_(sources)))
if desc:
query = query.order_by(ArchiveEntry.added.desc())
else:
query = query.order_by(ArchiveEntry.added.asc())
for a in query.yield_per(5):
if find_re.match(a.title):
yield a
else:
log.trace('title %s is too wide match' % a.title)
def cli_search(options):
search_term = ' '.join(options.keywords)
tags = options.tags
sources = options.sources
def print_ae(ae):
diff = datetime.now() - ae.added
console('ID: %-6s | Title: %s\nAdded: %s (%d days ago)\nURL: %s' %
(ae.id, ae.title, ae.added, diff.days, ae.url))
source_names = ', '.join([s.name for s in ae.sources])
tag_names = ', '.join([t.name for t in ae.tags])
console('Source(s): %s | Tag(s): %s' % (source_names or 'N/A', tag_names or 'N/A'))
if ae.description:
console('Description: %s' % strip_html(ae.description))
console('---')
session = Session()
try:
console('Searching: %s' % search_term)
if tags:
console('Tags: %s' % ', '.join(tags))
if sources:
console('Sources: %s' % ', '.join(sources))
console('Please wait...')
console('')
results = False
query = re.sub(r'[ \(\)]+', ' ', search_term).strip()
for ae in search(session, query, tags=tags, sources=sources):
print_ae(ae)
results = True
if not results:
console('No results found.')
finally:
session.close()
def cli_inject(manager, options):
log.debug('Finding inject content')
inject_entries = defaultdict(list)
with Session() as session:
for id in options.ids:
archive_entry = session.query(ArchiveEntry).get(id)
# not found
if not archive_entry:
log.critical('There\'s no archived item with ID `%s`' % id)
continue
# find if there is no longer any task within sources
if not any(source.name in manager.tasks for source in archive_entry.sources):
log.error('None of sources (%s) exists anymore, cannot inject `%s` from archive!' %
(', '.join([s.name for s in archive_entry.sources]), archive_entry.title))
continue
inject_entry = Entry(archive_entry.title, archive_entry.url)
if archive_entry.description:
inject_entry['description'] = archive_entry.description
if options.immortal:
log.debug('Injecting as immortal')
inject_entry['immortal'] = True
inject_entry['accepted_by'] = 'archive inject'
inject_entry.accept('injected')
# update list of tasks to be injected
for source in archive_entry.sources:
inject_entries[source.name].append(inject_entry)
for task_name in inject_entries:
for inject_entry in inject_entries[task_name]:
log.info('Injecting from archive `%s` into `%s`' % (inject_entry['title'], task_name))
for index, task_name in enumerate(inject_entries):
options.inject = inject_entries[task_name]
options.tasks = [task_name]
# TODO: This is a bit hacky, consider a better way
if index == len(inject_entries) - 1:
# We use execute_command on the last item, rather than regular execute, to start FlexGet running.
break
manager.execute(options)
manager.execute_command(options)
def do_cli(manager, options):
action = options.archive_action
if action == 'tag-source':
tag_source(options.source, tag_names=options.tags)
elif action == 'consolidate':
consolidate()
elif action == 'search':
cli_search(options)
elif action == 'inject':
cli_inject(manager, options)
@event('plugin.register')
def register_plugin():
plugin.register(Archive, 'archive', api_ver=2)
plugin.register(UrlrewriteArchive, 'flexget_archive', groups=['search'], api_ver=2)
@event('options.register')
def register_parser_arguments():
archive_parser = options.register_command('archive', do_cli, help='search and manipulate the archive database')
archive_parser.add_subparsers(title='Actions', metavar='<action>', dest='archive_action')
# Default usage shows the positional arguments after the optional ones, override usage to fix it
search_parser = archive_parser.add_subparser('search', help='search from the archive',
usage='%(prog)s [-h] <keyword> [<keyword> ...] [optional arguments]')
search_parser.add_argument('keywords', metavar='<keyword>', nargs='+', help='keyword(s) to search for')
search_parser.add_argument('--tags', metavar='TAG', nargs='+', default=[], help='tag(s) to search within')
search_parser.add_argument('--sources', metavar='SOURCE', nargs='+', default=[], help='source(s) to search within')
inject_parser = archive_parser.add_subparser('inject', help='inject entries from the archive back into tasks')
inject_parser.add_argument('ids', nargs='+', type=int, metavar='ID', help='archive ID of an item to inject')
inject_parser.add_argument('--immortal', action='store_true', help='injected entries will not be able to be '
'rejected by any plugins')
exec_group = inject_parser.add_argument_group('execute arguments')
exec_group.add_argument('execute_options', action=ParseExtrasAction, parser=get_parser('execute'))
tag_parser = archive_parser.add_subparser('tag-source', help='tag all archived entries within a given source')
tag_parser.add_argument('source', metavar='<source>', help='the source whose entries you would like to tag')
tag_parser.add_argument('tags', nargs='+', metavar='<tag>',
help='the tag(s) you would like to apply to the entries')
archive_parser.add_subparser('consolidate', help='migrate old archive data to new model, may take a long time')
|
|
# -*- coding: iso-8859-1 -*-
# Copyright (C) 2004-2005 Tristan Seligmann and Jonathan Jacobs
# Copyright (C) 2012-2014 Bastian Kleineidam
import requests
import time
import random
import os
import re
try:
from urllib.parse import urljoin
except ImportError:
from urlparse import urljoin
try:
from lxml import html
from lxml.html.defs import link_attrs as html_link_attrs
except ImportError:
html = None
try:
import cssselect
except ImportError:
cssselect = None
from . import loader, configuration, util
from .util import (getPageContent, makeSequence, get_system_uid, urlopen,
getDirname, unescape, tagre, normaliseURL, prettyMatcherList)
from .comic import ComicStrip
from .output import out
from .events import getHandler
class Scraper(object):
'''Base class for all comic scraper, but without a specific scrape implementation.'''
# The URL for the comic strip
url = None
# A string that is interpolated with the strip index to yield the URL for a particular strip.
stripUrl = None
# Stop search for previous URLs at this URL
firstStripUrl = None
# if more than one image per URL is expected
multipleImagesPerStrip = False
# set to False if previous URLs do not match the strip URL (ie. because of redirects)
prevUrlMatchesStripUrl = True
# set to True if this comic contains adult content
adult = False
# set to True if this comic will not get updated anymore
endOfLife = False
# langauge of the comic (two-letter ISO 639-1 code)
lang = 'en'
# an expression that will locate the URL for the previous strip in a page
# this can also be a list or tuple
prevSearch = None
# an expression that will locate the strip image URLs strip in a page
# this can also be a list or tuple
imageSearch = None
# an expression to store a text together with the image
# sometimes comic strips have additional text info for each comic
textSearch = None
# Is the additional text required or optional? When it is required (the
# default), you see an error message whenever a comic page is encountered
# that does not have the text
textOptional = False
# usually the index format help
help = ''
# HTTP session storing cookies
session = requests.session()
def __init__(self, indexes=None):
"""Initialize internal variables."""
self.urls = set()
if indexes:
self.indexes = tuple(sorted(indexes))
else:
self.indexes = tuple()
self.skippedUrls = set()
self.hitFirstStripUrl = False
def __cmp__(self, other):
"""Compare scraper by name and index list."""
if not isinstance(other, Scraper):
return 1
# first, order by name
d = cmp(self.getName(), other.getName())
if d != 0:
return d
# then by indexes
return cmp(self.indexes, other.indexes)
def __hash__(self):
"""Get hash value from name and index list."""
return hash((self.getName(), self.indexes))
def shouldSkipUrl(self, url, data):
"""Determine if search for images in given URL should be skipped."""
return False
def getComicStrip(self, url, data):
"""Get comic strip downloader for given URL and data."""
imageUrls = self.fetchUrls(url, data, self.imageSearch)
# map modifier function on image URLs
imageUrls = [self.imageUrlModifier(x, data) for x in imageUrls]
# remove duplicate URLs
imageUrls = set(imageUrls)
if len(imageUrls) > 1 and not self.multipleImagesPerStrip:
out.warn(u"Found %d images instead of 1 at %s with expressions %s" % (len(imageUrls), url, prettyMatcherList(self.imageSearch)))
image = sorted(imageUrls)[0]
out.warn(u"Choosing image %s" % image)
imageUrls = (image,)
elif not imageUrls:
out.warn(u"Found no images at %s with expressions %s" % (url, prettyMatcherList(self.imageSearch)))
if self.textSearch:
text = self.fetchText(url, data, self.textSearch, optional=self.textOptional)
else:
text = None
return ComicStrip(self.getName(), url, imageUrls, self.namer, self.session, text=text)
def getStrips(self, maxstrips=None):
"""Get comic strips."""
if maxstrips:
word = u"strip" if maxstrips == 1 else "strips"
msg = u'Retrieving %d %s' % (maxstrips, word)
else:
msg = u'Retrieving all strips'
if self.indexes:
if len(self.indexes) == 1:
msg += u" for index %s" % self.indexes[0]
else:
msg += u" for indexes %s" % self.indexes
# Always call starter() since it might initialize cookies.
# See for example Oglaf comic.
self.starter()
urls = [self.getIndexStripUrl(index) for index in self.indexes]
else:
urls = [self.getLatestUrl()]
if self.adult:
msg += u" (including adult content)"
out.info(msg)
for url in urls:
for strip in self.getStripsFor(url, maxstrips):
yield strip
def getStripsFor(self, url, maxstrips):
"""Get comic strips for an URL. If maxstrips is a positive number, stop after
retrieving the given number of strips."""
self.hitFirstStripUrl = False
seen_urls = set()
while url:
out.info(u'Get strip URL %s' % url, level=1)
data = self.getPage(url)
if self.shouldSkipUrl(url, data):
out.info(u'Skipping URL %s' % url)
self.skippedUrls.add(url)
else:
try:
yield self.getComicStrip(url, data)
except ValueError as msg:
# image not found
out.exception(msg)
if self.firstStripUrl == url:
out.debug(u"Stop at first URL %s" % url)
self.hitFirstStripUrl = True
break
if maxstrips is not None:
maxstrips -= 1
if maxstrips <= 0:
break
prevUrl = self.getPrevUrl(url, data)
seen_urls.add(url)
if prevUrl in seen_urls:
# avoid recursive URL loops
out.warn(u"Already seen previous URL %r" % prevUrl)
break
url = prevUrl
if url:
# wait up to 2 seconds for next URL
time.sleep(1.0 + random.random())
def getPrevUrl(self, url, data):
"""Find previous URL."""
prevUrl = None
if self.prevSearch:
try:
prevUrl = self.fetchUrl(url, data, self.prevSearch)
except ValueError as msg:
# assume there is no previous URL, but print a warning
out.warn(u"%s Assuming no previous comic strips exist." % msg)
else:
prevUrl = self.prevUrlModifier(prevUrl)
out.debug(u"Found previous URL %s" % prevUrl)
getHandler().comicPageLink(self.getName(), url, prevUrl)
return prevUrl
def getIndexStripUrl(self, index):
"""Get comic strip URL from index."""
return self.stripUrl % index
@classmethod
def getName(cls):
"""Get scraper name."""
if hasattr(cls, 'name'):
return cls.name
return cls.__name__
@classmethod
def starter(cls):
"""Get starter URL from where to scrape comic strips."""
return cls.url
@classmethod
def namer(cls, imageUrl, pageUrl):
"""Return filename for given image and page URL."""
return None
@classmethod
def prevUrlModifier(cls, prevUrl):
"""Optional modification of parsed previous URLs. Useful if
there are domain redirects. The default implementation does
not modify the URL.
"""
return prevUrl
@classmethod
def imageUrlModifier(cls, imageUrl, data):
"""Optional modification of parsed image URLs. Useful if the URL
needs to be fixed before usage. The default implementation does
not modify the URL. The given data is the URL page data.
"""
return imageUrl
def getLatestUrl(self):
"""Get starter URL from where to scrape comic strips."""
return self.starter()
@classmethod
def vote(cls):
"""Cast a public vote for this comic."""
url = configuration.VoteUrl + 'count/'
uid = get_system_uid()
data = {"name": cls.getName().replace('/', '_'), "uid": uid}
page = urlopen(url, cls.session, data=data)
return page.text
def getCompleteFile(self, basepath):
"""Get filename indicating all comics are downloaded."""
dirname = getDirname(self.getName())
return os.path.join(basepath, dirname, "complete.txt")
def isComplete(self, basepath):
"""Check if all comics are downloaded."""
return os.path.isfile(self.getCompleteFile(basepath))
def setComplete(self, basepath):
"""Set complete flag for this comic, ie. all comics are downloaded."""
if self.endOfLife:
filename = self.getCompleteFile(basepath)
if not os.path.exists(filename):
with open(filename, 'w') as f:
f.write('All comics should be downloaded here.')
@classmethod
def getPage(cls, url):
"""
Fetch a page and return the opaque repesentation for the data parameter
of fetchUrls and fetchText.
Implementation notes: While this base class does not restrict how the
returned data is structured, subclasses (specific scrapers) should specify
how this data works, since the stracture is passed into different methods
which can be defined by comic modules and these methods should be able to
use the data if they so desire... (Affected methods: shouldSkipUrl,
imageUrlModifier)
"""
raise ValueError("No implementation for getPage!")
@classmethod
def fetchUrls(cls, url, data, urlSearch):
raise ValueError("No implementation for fetchUrls!")
@classmethod
def fetchUrl(cls, url, data, urlSearch):
return cls.fetchUrls(url, data, urlSearch)[0]
@classmethod
def fetchText(cls, url, data, textSearch, optional):
raise ValueError("No implementation for fetchText!")
@classmethod
def getDisabledReasons(cls):
"""
Get a dict of reasons why this comic module is disabled. The key is a
short (unique) identifier, the value is a string explaining why the
module is deactivated. If the module is not disabled, just return an
empty dict.
"""
return {}
class _BasicScraper(Scraper):
"""
Scraper base class that matches regular expressions against HTML pages.
Subclasses of this scraper should use compiled regular expressions as
values for prevSearch, imageSearch and textSearch.
Implementation note: The return value of getPage is a tuple: the first
element is the raw HTML page text, the second element is the base URL (if
any).
"""
BASE_SEARCH = re.compile(tagre("base", "href", '([^"]*)'))
@classmethod
def getPage(cls, url):
content = getPageContent(url, cls.session)
# determine base URL
baseUrl = None
match = cls.BASE_SEARCH.search(content)
if match:
baseUrl = match.group(1)
else:
baseUrl = url
return (content, baseUrl)
@classmethod
def fetchUrls(cls, url, data, urlSearch):
"""Search all entries for given URL pattern(s) in a HTML page."""
searchUrls = []
searches = makeSequence(urlSearch)
for search in searches:
for match in search.finditer(data[0]):
searchUrl = match.group(1)
if not searchUrl:
raise ValueError("Pattern %s matched empty URL at %s." % (search.pattern, url))
out.debug(u'matched URL %r with pattern %s' % (searchUrl, search.pattern))
searchUrls.append(normaliseURL(urljoin(data[1], searchUrl)))
if searchUrls:
# do not search other links if one pattern matched
break
if not searchUrls:
patterns = [x.pattern for x in searches]
raise ValueError("Patterns %s not found at URL %s." % (patterns, url))
return searchUrls
@classmethod
def fetchText(cls, url, data, textSearch, optional):
"""Search text entry for given text pattern in a HTML page."""
if textSearch:
match = textSearch.search(data[0])
if match:
text = match.group(1)
out.debug(u'matched text %r with pattern %s' % (text, textSearch.pattern))
return unescape(text).strip()
if optional:
return None
else:
raise ValueError("Pattern %s not found at URL %s." % (textSearch.pattern, url))
else:
return None
class _ParserScraper(Scraper):
"""
Scraper base class that uses a HTML parser and XPath expressions.
All links are resolved before XPath searches are applied, so all URLs are
absolute!
Subclasses of this class should use XPath expressions as values for
prevSearch, imageSearch and textSearch. When the XPath directly selects an
attribute, it is used as the output.
All those searches try to do something intelligent when they match a
complete HTML Element: prevSearch and imageSearch try to find a "link
attribute" and use that as URL. textSearch strips all tags from the content
of the HTML element and returns that.
"""
# Switch between CSS and XPath selectors for this class. Since CSS needs
# another Python module, XPath is the default for now.
css = False
@classmethod
def getPage(cls, url):
tree = html.document_fromstring(getPageContent(url, cls.session))
tree.make_links_absolute(url)
return tree
@classmethod
def fetchUrls(cls, url, data, urlSearch):
"""Search all entries for given XPath in a HTML page."""
searchUrls = []
if cls.css:
searchFun = data.cssselect
else:
searchFun = data.xpath
searches = makeSequence(urlSearch)
for search in searches:
for match in searchFun(search):
try:
for attrib in html_link_attrs:
if attrib in match.attrib:
searchUrls.append(match.get(attrib))
except AttributeError:
searchUrls.append(str(match))
if searchUrls:
# do not search other links if one pattern matched
break
if not searchUrls:
raise ValueError("XPath %s not found at URL %s." % (searches, url))
return searchUrls
@classmethod
def fetchText(cls, url, data, textSearch, optional):
"""Search text entry for given text XPath in a HTML page."""
if textSearch:
text = ''
for match in data.xpath(textSearch):
try:
text += ' ' + match.text_content()
except AttributeError:
text += ' ' + unicode(match)
if text.strip() == '':
if optional:
return None
else:
raise ValueError("XPath %s did not match anything at URL %s." % (textSearch, url))
out.debug(u'Matched text %r with XPath %s' % (text, textSearch))
return unescape(text).strip()
else:
return None
@classmethod
def getDisabledReasons(cls):
res = {}
if cls.css and cssselect is None:
res['css'] = u"This module needs the cssselect (python-cssselect) python module which is not installed."
if html is None:
res['lxml'] = u"This module needs the lxml (python-lxml) python module which is not installed."
return res
def find_scraperclasses(comic, multiple_allowed=False):
"""Get a list comic scraper classes. Can return more than one entries if
multiple_allowed is True, else it raises a ValueError if multiple
modules match. The match is a case insensitive substring search."""
if not comic:
raise ValueError("empty comic name")
candidates = []
cname = comic.lower()
for scraperclass in get_scraperclasses():
lname = scraperclass.getName().lower()
if lname == cname:
# perfect match
if not multiple_allowed:
return [scraperclass]
else:
candidates.append(scraperclass)
elif cname in lname:
candidates.append(scraperclass)
if len(candidates) > 1 and not multiple_allowed:
comics = ", ".join(x.getName() for x in candidates)
raise ValueError('multiple comics found: %s' % comics)
elif not candidates:
raise ValueError('comic %r not found' % comic)
return candidates
_scraperclasses = None
def get_scraperclasses():
"""Find all comic scraper classes in the plugins directory.
The result is cached.
@return: list of Scraper classes
@rtype: list of Scraper
"""
global _scraperclasses
if _scraperclasses is None:
out.debug(u"Loading comic modules...")
modules = loader.get_modules('plugins')
plugins = loader.get_plugins(modules, Scraper)
_scraperclasses = list(plugins)
check_scrapers()
out.debug(u"... %d modules loaded." % len(_scraperclasses))
return _scraperclasses
def check_scrapers():
"""Check for duplicate scraper class names."""
d = {}
for scraperclass in _scraperclasses:
name = scraperclass.getName().lower()
if name in d:
name1 = scraperclass.getName()
name2 = d[name].getName()
raise ValueError('duplicate scrapers %s and %s found' % (name1, name2))
d[name] = scraperclass
def make_scraper(classname, scraperType = _BasicScraper, **attributes):
"""Make a new scraper class with given name and attributes."""
return type(classname, (scraperType,), attributes)
|
|
#!/usr/bin/python
#======================================================================
#
# Project : hpp_IOStressTest
# File : Libs/IOST_WMain/IOST_WMain_I2C.py
# Date : Oct 20, 2016
# Author : HuuHoang Nguyen
# Contact : hhnguyen@apm.com
# : hoangnh.hpp@gmail.com
# License : MIT License
# Copyright : 2016
# Description: The hpp_IOStressTest is under the MIT License, a copy of license which may be found in LICENSE
#
#======================================================================
import io
import os
import sys
import time
from Libs import IOST_Basic
from Libs import IOST_Config
from Libs import IOST_WSetupTestcase
import gtk
import gtk.glade
#======================================================================
IOST_WMain_I2C_Debug_Enable = 0
#======================================================================
class IOST_WMain_I2C():
"""
This is class to get all I2C object from IOST_WMain_Skylark window and control to these
component
"""
def __init__(self, glade_filename, window_name, builder=None):
"""
"""
self.IOST_WMain_I2C_window_name=window_name
if not builder:
self.IOST_I2C_Builder = gtk.Builder()
self.IOST_I2C_Builder.add_from_file(glade_filename)
self.IOST_I2C_Builder.connect_signals(self)
else:
self.IOST_I2C_Builder = builder
#----------------------------------------------------------------------
def GetI2C_Obj(self, window_name):
"""
Get all I2C objects on WMain window
"""
self.IOST_Objs[window_name][window_name+"_IP_Enable_I2C_CB"] = self.IOST_I2C_Builder.get_object(self.IOST_Objs[window_name]["_IP_Enable_I2C_CB"])
for i in range(0, self.IOST_Data["I2C_PortNum"]):
self.IOST_Objs[window_name][window_name+"_Config_I2C"+str(i)+"_CB"] = \
self.IOST_I2C_Builder.get_object(self.IOST_Objs[window_name]["_Config_I2C"+str(i)+"_CB"])
self.IOST_Objs[window_name][window_name+"_Config_I2C"+str(i)+"_B"] = \
self.IOST_I2C_Builder.get_object(self.IOST_Objs[window_name]["_Config_I2C"+str(i)+"_B"])
#----------------------------------------------------------------------
def SetValueToI2C_Obj(self, window_name):
"""
Init all I2C objects when start IOST Wmain program
"""
if self.IOST_Data["I2C"] == "Enable":
self.IOST_Objs[window_name][window_name+"_IP_Enable_I2C_CB"].set_active(True)
for i in range(0, self.IOST_Data["I2C_PortNum"]):
if self.IOST_Data["I2C"+str(i)][0] == "Disable":
self.IOST_Objs[window_name][window_name+"_Config_I2C"+str(i)+"_CB"].set_active(False)
self.IOST_Objs[window_name][window_name+"_Config_I2C"+str(i)+"_B"].set_sensitive(False)
else:
self.IOST_Objs[window_name][window_name+"_Config_I2C"+str(i)+"_CB"].set_active(True)
self.IOST_Objs[window_name][window_name+"_Config_I2C"+str(i)+"_B"].set_sensitive(True)
else:
self.IOST_Objs[window_name][window_name+"_IP_Enable_I2C_CB"].set_active(False)
for i in range(0, self.IOST_Data["I2C_PortNum"]):
self.IOST_Objs[window_name][window_name+"_Config_I2C"+str(i)+"_CB"].set_sensitive(False)
self.IOST_Objs[window_name][window_name+"_Config_I2C"+str(i)+"_B"].set_sensitive(False)
#Update test case
for i in range(0, self.IOST_Data["I2C_PortNum"]):
self.IOST_Data["I2C"+str(i)+"_TestCaseNum"] = len(self.IOST_Data["I2C"+str(i)]) - 1
#----------------------------------------------------------------------
def on_IOST_WMain_Config_I2C0_B_clicked(self, object, data=None):
"Control to ConfigI2C-0 button "
self.WSetupTestcase_SetupTestcase("IOST_WSetupTestcase", "_Skylark", "I2C0")
#----------------------------------------------------------------------
def on_IOST_WMain_Config_I2C0_C_toggled(self, object, data=None):
""
Res = self.IOST_Objs[self.IOST_WMain_I2C_window_name][self.IOST_WMain_I2C_window_name+"_Config_I2C0_CB"].get_active()
self.IOST_Objs[self.IOST_WMain_I2C_window_name][self.IOST_WMain_I2C_window_name+"_Config_I2C0_B"].set_sensitive(Res)
if (Res):
self.IOST_Data["I2C0"][0] = 'Enable'
else:
self.IOST_Data["I2C0"][0] = 'Disable'
if IOST_WMain_I2C_Debug_Enable:
print self.IOST_Data["I2C0"][0]
#----------------------------------------------------------------------
def on_IOST_WMain_Config_I2C1_B_clicked(self, object, data=None):
"Control to ConfigI2C-1 button "
self.WSetupTestcase_SetupTestcase("IOST_WSetupTestcase", "_Skylark", "I2C1")
#----------------------------------------------------------------------
def on_IOST_WMain_Config_I2C1_C_toggled(self, object, data=None):
""
Res = self.IOST_Objs[self.IOST_WMain_I2C_window_name][self.IOST_WMain_I2C_window_name+"_Config_I2C1_CB"].get_active()
self.IOST_Objs[self.IOST_WMain_I2C_window_name][self.IOST_WMain_I2C_window_name+"_Config_I2C1_B"].set_sensitive(Res)
if (Res):
self.IOST_Data["I2C1"][0] = 'Enable'
else:
self.IOST_Data["I2C1"][0] = 'Disable'
if IOST_WMain_I2C_Debug_Enable:
print self.IOST_Data["I2C1"][0]
#----------------------------------------------------------------------
def on_IOST_WMain_Config_I2C2_B_clicked(self, object, data=None):
"Control to ConfigI2C-2 button "
self.WSetupTestcase_SetupTestcase("IOST_WSetupTestcase", "_Skylark", "I2C2")
#----------------------------------------------------------------------
def on_IOST_WMain_Config_I2C2_C_toggled(self, object, data=None):
""
Res = self.IOST_Objs[self.IOST_WMain_I2C_window_name][self.IOST_WMain_I2C_window_name+"_Config_I2C2_CB"].get_active()
self.IOST_Objs[self.IOST_WMain_I2C_window_name][self.IOST_WMain_I2C_window_name+"_Config_I2C2_B"].set_sensitive(Res)
if (Res):
self.IOST_Data["I2C2"][0] = 'Enable'
else:
self.IOST_Data["I2C2"][0] = 'Disable'
if IOST_WMain_I2C_Debug_Enable:
print self.IOST_Data["I2C2"][0]
#----------------------------------------------------------------------
def on_IOST_WMain_Config_I2C3_B_clicked(self, object, data=None):
"Control to ConfigI2C-3 button "
self.WSetupTestcase_SetupTestcase("IOST_WSetupTestcase", "_Skylark", "I2C3")
#----------------------------------------------------------------------
def on_IOST_WMain_Config_I2C3_C_toggled(self, object, data=None):
""
Res = self.IOST_Objs[self.IOST_WMain_I2C_window_name][self.IOST_WMain_I2C_window_name+"_Config_I2C3_CB"].get_active()
self.IOST_Objs[self.IOST_WMain_I2C_window_name][self.IOST_WMain_I2C_window_name+"_Config_I2C3_B"].set_sensitive(Res)
if (Res):
self.IOST_Data["I2C3"][0] = 'Enable'
else:
self.IOST_Data["I2C3"][0] = 'Disable'
if IOST_WMain_I2C_Debug_Enable:
print self.IOST_Data["I2C3"][0]
#----------------------------------------------------------------------
def on_IOST_WMain_Config_I2C4_B_clicked(self, object, data=None):
"Control to ConfigI2C-4 button "
self.WSetupTestcase_SetupTestcase("IOST_WSetupTestcase", "_Skylark", "I2C4")
#----------------------------------------------------------------------
def on_IOST_WMain_Config_I2C4_C_toggled(self, object, data=None):
""
Res = self.IOST_Objs[self.IOST_WMain_I2C_window_name][self.IOST_WMain_I2C_window_name+"_Config_I2C4_CB"].get_active()
self.IOST_Objs[self.IOST_WMain_I2C_window_name][self.IOST_WMain_I2C_window_name+"_Config_I2C4_B"].set_sensitive(Res)
if (Res):
self.IOST_Data["I2C4"][0] = 'Enable'
else:
self.IOST_Data["I2C4"][0] = 'Disable'
if IOST_WMain_I2C_Debug_Enable:
print self.IOST_Data["I2C4"][0]
#----------------------------------------------------------------------
def on_IOST_WMain_Config_I2C5_B_clicked(self, object, data=None):
"Control to ConfigI2C-5 button "
self.WSetupTestcase_SetupTestcase("IOST_WSetupTestcase", "_Skylark", "I2C5")
#----------------------------------------------------------------------
def on_IOST_WMain_Config_I2C5_C_toggled(self, object, data=None):
""
Res = self.IOST_Objs[self.IOST_WMain_I2C_window_name][self.IOST_WMain_I2C_window_name+"_Config_I2C5_CB"].get_active()
self.IOST_Objs[self.IOST_WMain_I2C_window_name][self.IOST_WMain_I2C_window_name+"_Config_I2C5_B"].set_sensitive(Res)
if (Res):
self.IOST_Data["I2C5"][0] = 'Enable'
else:
self.IOST_Data["I2C5"][0] = 'Disable'
if IOST_WMain_I2C_Debug_Enable:
print self.IOST_Data["I2C5"][0]
#----------------------------------------------------------------------
def on_IOST_WMain_IP_Enable_I2C_CB_toggled(self, object, data=None):
Res = self.IOST_Objs[self.IOST_WMain_I2C_window_name][self.IOST_WMain_I2C_window_name+"_IP_Enable_I2C_CB"].get_active()
self.IOST_WMain_I2C_set_sensitive_all(Res)
if Res:
self.IOST_Data["I2C"] = 'Enable'
else:
self.IOST_Data["I2C"] = 'Disable'
#----------------------------------------------------------------------
def IOST_WMain_I2C_set_sensitive_all(self, value):
for i in range(0, self.IOST_Data["I2C_PortNum"]):
self.IOST_Objs[self.IOST_WMain_I2C_window_name][self.IOST_WMain_I2C_window_name+"_Config_I2C"+str(i)+"_CB"].set_sensitive(value)
if self.IOST_Data["I2C"+str(i)][0] == "Enable" and value:
self.IOST_Objs[self.IOST_WMain_I2C_window_name][self.IOST_WMain_I2C_window_name+"_Config_I2C"+str(i)+"_B"].set_sensitive(value)
else:
self.IOST_Objs[self.IOST_WMain_I2C_window_name][self.IOST_WMain_I2C_window_name+"_Config_I2C"+str(i)+"_B"].set_sensitive(False)
#----------------------------------------------------------------------
#----------------------------------------------------------------------
#----------------------------------------------------------------------
|
|
'''
CmndHelperPQ is a helper class for dealing with commands
sent to a PyQt piped viewer.
This package was developed by the Thermal Modeling and Analysis
Project (TMAP) of the National Oceanographic and Atmospheric
Administration's (NOAA) Pacific Marine Environmental Lab (PMEL).
'''
import sys
# First try to import PySide2, then try PyQt5 if that fails, and finally try PyQt4 if that fails
try:
import PySide2
PYTHONQT_VERSION = 'PySide2'
except ImportError:
try:
import PyQt5
PYTHONQT_VERSION = 'PyQt5'
except ImportError:
import PyQt4
PYTHONQT_VERSION = 'PyQt4'
# Now that the Python Qt version is determined, import the parts
# allowing any import errors to propagate out
if PYTHONQT_VERSION == 'PySide2':
from PySide2.QtCore import Qt, QPointF, QSizeF
from PySide2.QtGui import QBrush, QColor, QFont, QPainterPath, QPen
elif PYTHONQT_VERSION == 'PyQt5':
from PyQt5.QtCore import Qt, QPointF, QSizeF
from PyQt5.QtGui import QBrush, QColor, QFont, QPainterPath, QPen
else:
from PyQt4.QtCore import Qt, QPointF, QSizeF
from PyQt4.QtGui import QBrush, QColor, QFont, QPainterPath, QPen
class SidesRectF(object):
'''
Trivial helper class for defining a rectangle with floating point
values for the left-x, top-y, right-x, and bottom-y edges.
'''
def __init__(self, left, top, right, bottom):
'''
Create a SidesRectF with the given left, top, right,
and bottom as float values.
'''
super(SidesRectF, self).__init__()
self.__left = float(left)
self.__top = float(top)
self.__right = float(right)
self.__bottom = float(bottom)
def left(self):
'''
Return the left value as a float.
'''
return self.__left
def setLeft(self, val):
'''
Set the SidesRectF left as a float value of the argument.
'''
self.__left = float(val)
def top(self):
'''
Return the top value as a float.
'''
return self.__top
def setTop(self, val):
'''
Set the SidesRectF top as a float value of the argument.
'''
self.__top = float(val)
def right(self):
'''
Return the right value as a float.
'''
return self.__right
def setRight(self, val):
'''
Set the SidesRectF right as a float value of the argument.
'''
self.__right = float(val)
def bottom(self):
'''
Return the bottom value as a float.
'''
return self.__bottom
def setBottom(self, val):
'''
Set the SidesRectF bottom as a float value of the argument.
'''
self.__bottom = float(val)
class SymbolPath(object):
'''
Trivial helper class for defining a symbol
'''
def __init__(self, painterpath, isfilled):
'''
Create a SymbolPath representing a symbol.
Arguments:
painterpath: the QPainterPath representing this symbol
isfilled: if True, the symbol should be drawn with a
solid brush; if False, the symbol should be
drawn with a solid pen
'''
super(SymbolPath, self).__init__()
self.__painterpath = painterpath
self.__isfilled = isfilled
if isfilled:
try:
self.__painterpath = painterpath.simplified()
except:
pass
def painterPath(self):
'''
Return the QPainterPath for this symbol
'''
return self.__painterpath
def isFilled(self):
'''
Return True if the symbol should be drawn with a solid brush;
return False if the symbol should be drawn with a solid pen.
'''
return self.__isfilled
class CmndHelperPQ(object):
'''
Helper class of static methods for dealing with commands
sent to a PyQt piped viewer.
'''
def __init__(self, viewer):
'''
Creates a cmndpipe command helper. The widget viewer
is only used for determining the default font and for
translation of error messages.
'''
super(CmndHelperPQ, self).__init__()
self.__viewer = viewer
self.__symbolpaths = { }
def getFontFromCmnd(self, fontinfo):
'''
Returns a QFont based on the information in the dictionary
fontinfo.
Recognized keys in the font dictionary are:
"family": font family name (string)
"size": text size in points (1/72 inches)
"italic": italicize? (False/True)
"bold": make bold? (False/True)
"underline": underline? (False/True)
'''
try:
myfont = QFont(fontinfo["family"])
except KeyError:
myfont = self.__viewer.font()
try:
myfont.setPointSizeF(fontinfo["size"])
except KeyError:
pass
try:
myfont.setItalic(fontinfo["italic"])
except KeyError:
pass
try:
myfont.setBold(fontinfo["bold"])
except KeyError:
pass
try:
myfont.setUnderline(fontinfo["underline"])
except KeyError:
pass
return myfont
def getBrushFromCmnd(self, brushinfo):
'''
Returns a QBrush based on the information in the dictionary
brushinfo. A ValueError is raised if the value for the
"style" key, if given, is not recognized.
Recognized keys in the fill dictionary are:
"color": color name or 24-bit RGB integer value
(eg, 0xFF0088)
"alpha": alpha value from 0 (transparent) to 255 (opaque)
"style": brush style name ("solid", "dense1" to "dense7",
"none", "hor", "ver", "cross",
"bdiag", "fdiag", "diagcross")
'''
try:
mycolor = self.getColorFromCmnd(brushinfo)
mybrush = QBrush(mycolor)
except KeyError:
mybrush = QBrush()
try:
mystyle = brushinfo["style"]
if mystyle == "solid":
mystyle = Qt.SolidPattern
elif mystyle == "dense1":
mystyle = Qt.Dense1Pattern
elif mystyle == "dense2":
mystyle = Qt.Dense2Pattern
elif mystyle == "dense3":
mystyle = Qt.Dense3Pattern
elif mystyle == "dense4":
mystyle = Qt.Dense4Pattern
elif mystyle == "dense5":
mystyle = Qt.Dense5Pattern
elif mystyle == "dense6":
mystyle = Qt.Dense6Pattern
elif mystyle == "dense7":
mystyle = Qt.Dense7Pattern
elif mystyle == "none":
mystyle = Qt.NoBrush
elif mystyle == "hor":
mystyle = Qt.HorPattern
elif mystyle == "ver":
mystyle = Qt.VerPattern
elif mystyle == "cross":
mystyle = Qt.CrossPattern
elif mystyle == "bdiag":
mystyle = Qt.BDiagPattern
elif mystyle == "fdiag":
mystyle = Qt.FDiagPattern
elif mystyle == "diagcross":
mystyle = Qt.DiagCrossPattern
else:
raise ValueError("Unknown brush style '%s'" % str(mystyle))
mybrush.setStyle(mystyle)
except KeyError:
pass
return mybrush
def getPenFromCmnd(self, peninfo):
'''
Returns a QPen based on the information in the dictionary
peninfo. A ValueError is raised if the value for the
"style", "capstyle", or "joinstyle" key, if given, is not
recognized.
Recognized keys in the outline dictionary are:
"color": color name or 24-bit RGB integer value
(eg, 0xFF0088)
"alpha": alpha value from 0 (transparent) to 255 (opaque)
"width": pen width in points (1/72 inches); possibly
further scaled by the width scaling factor
"style": pen style name ("solid", "dash", "dot", "dashdot",
"dashdotdot")
"capstyle": pen cap style name ("square", "flat", "round")
"joinstyle": pen join style name ("bevel", "miter", "round")
'''
try:
mycolor = self.getColorFromCmnd(peninfo)
mypen = QPen(mycolor)
except KeyError:
mypen = QPen()
try:
penwidth = float(peninfo["width"])
penwidth *= self.__viewer.widthScalingFactor()
mypen.setWidthF(penwidth)
except KeyError:
pass
try:
mystyle = peninfo["style"]
if mystyle == "solid":
mystyle = Qt.SolidLine
elif mystyle == "dash":
mystyle = Qt.DashLine
elif mystyle == "dot":
mystyle = Qt.DotLine
elif mystyle == "dashdot":
mystyle = Qt.DashDotLine
elif mystyle == "dashdotdot":
mystyle = Qt.DashDotDotLine
else:
raise ValueError("Unknown pen style '%s'" % str(mystyle))
mypen.setStyle(mystyle)
except KeyError:
pass
try:
mystyle = peninfo["capstyle"]
if mystyle == "square":
mystyle = Qt.SquareCap
elif mystyle == "flat":
mystyle = Qt.FlatCap
elif mystyle == "round":
mystyle = Qt.RoundCap
else:
raise ValueError("Unknown pen cap style '%s'" % str(mystyle))
mypen.setCapStyle(mystyle)
except KeyError:
pass
try:
mystyle = peninfo["joinstyle"]
if mystyle == "bevel":
mystyle = Qt.BevelJoin
elif mystyle == "miter":
mystyle = Qt.MiterJoin
elif mystyle == "round":
mystyle = Qt.RoundJoin
else:
raise ValueError("Unknown pen join style '%s'" % str(mystyle))
mypen.setJoinStyle(mystyle)
except KeyError:
pass
return mypen
def getSymbolFromCmnd(self, symbolinfo):
'''
Returns the SymbolPath for the symbol described in symbolinfo,
which can either be a string or a dictionary.
If symbolinfo is a string, it should be the name of a symbol that
has already been defined, either as a pre-defined symbol or from
a previous symbol definition.
Current pre-defined symbol names are ones involving circles:
'dot': very small filled circle
'dotex': very small filled circle and outer lines of an ex mark
'dotplus': very small filled circle and outer lines of a plus mark
'circle': unfilled circle
'circfill': normal-sized filled circle
'circex': small unfilled circle and outer lines of an ex mark
'circplus': small unfilled circle and outer lines of a plus mark
If symbolinfo is a dictionary, the following key/value pairs are
recognized:
'name' : (string) symbol name (required)
'pts' : (sequence of pairs of floats) vertex coordinates
'fill' : (bool) color-fill symbol?
If 'pts' is given, the value is coordinates that define the symbol
as multiline subpaths in a [-50,50] square for typical size. The
location of the point this symbol represents will be at the center
of the square. A coordinate outside [-100,100] will terminate the
current subpath, and the next valid coordinate will start a new subpath.
This definition will replace an existing symbol with the given name.
If 'pts' is not given, the symbol must already be defined, either as
a pre-defined symbol (see above) or from a previous symbol definition.
Raises:
TypeError - if symbolinfo is neither a string nor a dictionary
KeyError - if symbolinfo is a dictionary and
the key 'name' is not given
ValueError - if there are problems generating the symbol
'''
# get the information about the symbol
if isinstance(symbolinfo, str):
symbol = symbolinfo
pts = None
fill = False
elif isinstance(symbolinfo, dict):
symbol = symbolinfo['name']
pts = symbolinfo.get('pts', None)
fill = symbolinfo.get('fill', False)
else:
raise TypeError('symbolinfo must either be a dictionary or a string')
if pts is None:
# no path given; check if already defined
sympath = self.__symbolpaths.get(symbol)
if sympath is not None:
return sympath
# symbol not defined - if well known, create a SymbolPath for it
if symbol == 'dot':
path = QPainterPath()
path.addEllipse(-10.0, -10.0, 20.0, 20.0)
sympath = SymbolPath(path, True)
elif symbol == 'dotplus':
path = QPainterPath()
path.addEllipse(-10.0, -10.0, 20.0, 20.0)
# filled path, so need to draw "lines" as rectangles
path.addRect( -4.0, -50.0, 8.0, 24.0)
path.addRect( -4.0, 26.0, 8.0, 24.0)
path.addRect(-50.0, -4.0, 24.0, 8.0)
path.addRect( 26.0, -4.0, 24.0, 8.0)
sympath = SymbolPath(path, True)
elif symbol == 'dotex':
path = QPainterPath()
path.addEllipse(-10.0, -10.0, 20.0, 20.0)
# filled path, so need to draw "lines" as rectangles
path.moveTo(-38.18, -32.53)
path.lineTo(-32.53, -38.18)
path.lineTo(-15.56, -21.21)
path.lineTo(-21.21, -15.56)
# moveTo adds an implicit closeSubpath in QPainterPath
path.moveTo(-38.18, 32.53)
path.lineTo(-32.53, 38.18)
path.lineTo(-15.56, 21.21)
path.lineTo(-21.21, 15.56)
# moveTo adds an implicit closeSubpath in QPainterPath
path.moveTo( 38.18, -32.53)
path.lineTo( 32.53, -38.18)
path.lineTo( 15.56, -21.21)
path.lineTo( 21.21, -15.56)
# moveTo adds an implicit closeSubpath in QPainterPath
path.moveTo( 38.18, 32.53)
path.lineTo( 32.53, 38.18)
path.lineTo( 15.56, 21.21)
path.lineTo( 21.21, 15.56)
# Qt closes the subpath automatically
sympath = SymbolPath(path, True)
elif symbol == 'circle':
path = QPainterPath()
path.addEllipse(-35.0, -35.0, 70.0, 70.0)
sympath = SymbolPath(path, False)
elif symbol == 'circfill':
path = QPainterPath()
path.addEllipse(-39.0, -39.0, 78.0, 78.0)
sympath = SymbolPath(path, True)
elif symbol == 'circplus':
path = QPainterPath()
path.addEllipse(-20.0, -20.0, 40.0, 40.0)
# not a filled path, so just draw the lines
path.moveTo( 0.0, -50.0)
path.lineTo( 0.0, -20.0)
path.moveTo( 0.0, 50.0)
path.lineTo( 0.0, 20.0)
path.moveTo(-50.0, 0.0)
path.lineTo(-20.0, 0.0)
path.moveTo( 50.0, 0.0)
path.lineTo( 20.0, 0.0)
sympath = SymbolPath(path, False)
elif symbol == 'circex':
path = QPainterPath()
path.addEllipse(-20.0, -20.0, 40.0, 40.0)
# not a filled path, so just draw the lines
path.moveTo(-35.35, -35.35)
path.lineTo(-14.15, -14.15)
path.moveTo(-35.35, 35.35)
path.lineTo(-14.15, 14.15)
path.moveTo( 35.35, -35.35)
path.lineTo( 14.15, -14.15)
path.moveTo( 35.35, 35.35)
path.lineTo( 14.15, 14.15)
sympath = SymbolPath(path, False)
else:
raise ValueError("Unknown symbol '%s'" % str(symbol))
else:
# define (or redefine) a symbol from the given path
try:
coords = [ [ float(val) for val in coord ] for coord in pts ]
if not coords:
raise ValueError
for crd in coords:
if len(crd) != 2:
raise ValueError
except Exception:
raise ValueError('pts, if given, must be a sequence of pairs of numbers')
path = QPainterPath()
somethingdrawn = False
newstart = True
for (xval, yval) in coords:
# flip so positive y is up
yval *= -1.0
if (xval < -100.0) or (xval > 100.0) or (yval < -100.0) or (yval > 100.0):
# end the current subpath
newstart = True
elif newstart:
# start a new subpath; moveTo adds an implicit closeSubpath in QPainterPath
path.moveTo(xval, yval)
newstart = False
else:
# continue the current subpath
path.lineTo(xval, yval)
somethingdrawn = True
if not somethingdrawn:
del path
raise ValueError('symbol definition does not contain any drawn lines')
# Qt closes the (sub)path automatically
sympath = SymbolPath(path, fill)
# save and return the SymbolPath
self.__symbolpaths[symbol] = sympath
return sympath
def getSizeFromCmnd(self, sizeinfo):
'''
Returns a QSizeF based on the information in the dictionary
sizeinfo. Recognized keys are "width" and "height", and
correspond to those float values in the QSizeF. Values not
given in sizeinfo are assigned as zero in the returned QSizeF.
'''
myrect = QSizeF(0.0, 0.0)
try:
myrect.setWidth(float(sizeinfo["width"]))
except KeyError:
pass
try:
myrect.setHeight(float(sizeinfo["height"]))
except KeyError:
pass
return myrect
def getSidesFromCmnd(self, rectinfo):
'''
Returns a SidesQRectF based on the information in the dictionary
rectinfo. Recognized keys are "left", "top", "right", and "bottom",
and correspond to those float values in the SidesQRectF. Default
values: "left": 0.0, "top": 0.0, "right":1.0, "bottom":1.0
'''
myrect = SidesRectF(left=0.0, top=0.0, right=1.0, bottom=1.0)
try:
myrect.setLeft(float(rectinfo["left"]))
except KeyError:
pass
try:
myrect.setTop(float(rectinfo["top"]))
except KeyError:
pass
try:
myrect.setRight(float(rectinfo["right"]))
except KeyError:
pass
try:
myrect.setBottom(float(rectinfo["bottom"]))
except KeyError:
pass
return myrect
def getColorFromCmnd(self, colorinfo):
'''
Returns a QColor based on the information in the dictionary
colorinfo. Raises a KeyError if the "color" key is not given.
Recognized keys are:
"color": color name or 24-bit RGB integer value
(eg, 0xFF0088)
"alpha": alpha value from 0 (transparent) to 255 (opaque)
if viewer.ignoreAlpha True, this value is ignored
'''
colordata = colorinfo["color"]
mycolor = QColor(colordata)
if not mycolor.isValid():
raise ValueError("Invalid color '%s'" % str(colordata))
if not self.__viewer.ignoreAlpha():
try:
mycolor.setAlpha(int(colorinfo["alpha"]))
except KeyError:
pass
return mycolor
def computeARGB32PreMultInt(self, color):
'''
Returns the Format_ARGB32_Premultiplied integer value
of the given QColor.
'''
(redint, greenint, blueint, alphaint) = color.getRgb()
if self.__viewer.ignoreAlpha():
alphaint = 255
elif (alphaint < 255):
# Scale the RGB values by the alpha value
alphafactor = alphaint / 255.0
redint = int( redint * alphafactor + 0.5 )
if redint > alphaint:
redint = alphaint
greenint = int( greenint * alphafactor + 0.5 )
if greenint > alphaint:
greenint = alphaint
blueint = int( blueint * alphafactor + 0.5 )
if blueint > alphaint:
blueint = alphaint
fillint = ((alphaint * 256 + redint) * 256 + \
greenint) * 256 + blueint
return fillint
|
|
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class crvserver_lbvserver_binding(base_resource) :
""" Binding class showing the lbvserver that can be bound to crvserver.
"""
def __init__(self) :
self._lbvserver = ""
self._hits = 0
self._name = ""
self.___count = 0
@property
def lbvserver(self) :
ur"""The Default target server name.<br/>Minimum length = 1.
"""
try :
return self._lbvserver
except Exception as e:
raise e
@lbvserver.setter
def lbvserver(self, lbvserver) :
ur"""The Default target server name.<br/>Minimum length = 1
"""
try :
self._lbvserver = lbvserver
except Exception as e:
raise e
@property
def name(self) :
ur"""Name of the cache redirection virtual server to which to bind the cache redirection policy.<br/>Minimum length = 1.
"""
try :
return self._name
except Exception as e:
raise e
@name.setter
def name(self, name) :
ur"""Name of the cache redirection virtual server to which to bind the cache redirection policy.<br/>Minimum length = 1
"""
try :
self._name = name
except Exception as e:
raise e
@property
def hits(self) :
ur"""Number of hits.
"""
try :
return self._hits
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
ur""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(crvserver_lbvserver_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.crvserver_lbvserver_binding
except Exception as e :
raise e
def _get_object_name(self) :
ur""" Returns the value of object identifier argument
"""
try :
if self.name is not None :
return str(self.name)
return None
except Exception as e :
raise e
@classmethod
def add(cls, client, resource) :
try :
if resource and type(resource) is not list :
updateresource = crvserver_lbvserver_binding()
updateresource.name = resource.name
updateresource.lbvserver = resource.lbvserver
return updateresource.update_resource(client)
else :
if resource and len(resource) > 0 :
updateresources = [crvserver_lbvserver_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
updateresources[i].name = resource[i].name
updateresources[i].lbvserver = resource[i].lbvserver
return cls.update_bulk_request(client, updateresources)
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
try :
if resource and type(resource) is not list :
deleteresource = crvserver_lbvserver_binding()
deleteresource.name = resource.name
deleteresource.lbvserver = resource.lbvserver
return deleteresource.delete_resource(client)
else :
if resource and len(resource) > 0 :
deleteresources = [crvserver_lbvserver_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].name = resource[i].name
deleteresources[i].lbvserver = resource[i].lbvserver
return cls.delete_bulk_request(client, deleteresources)
except Exception as e :
raise e
@classmethod
def get(cls, service, name) :
ur""" Use this API to fetch crvserver_lbvserver_binding resources.
"""
try :
obj = crvserver_lbvserver_binding()
obj.name = name
response = obj.get_resources(service)
return response
except Exception as e:
raise e
@classmethod
def get_filtered(cls, service, name, filter_) :
ur""" Use this API to fetch filtered set of crvserver_lbvserver_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = crvserver_lbvserver_binding()
obj.name = name
option_ = options()
option_.filter = filter_
response = obj.getfiltered(service, option_)
return response
except Exception as e:
raise e
@classmethod
def count(cls, service, name) :
ur""" Use this API to count crvserver_lbvserver_binding resources configued on NetScaler.
"""
try :
obj = crvserver_lbvserver_binding()
obj.name = name
option_ = options()
option_.count = True
response = obj.get_resources(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
@classmethod
def count_filtered(cls, service, name, filter_) :
ur""" Use this API to count the filtered set of crvserver_lbvserver_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = crvserver_lbvserver_binding()
obj.name = name
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
class crvserver_lbvserver_binding_response(base_response) :
def __init__(self, length=1) :
self.crvserver_lbvserver_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.crvserver_lbvserver_binding = [crvserver_lbvserver_binding() for _ in range(length)]
|
|
"""
Base classes for writing management commands (named commands which can
be executed through ``django-admin.py`` or ``manage.py``).
"""
from __future__ import with_statement
import os
import sys
from optparse import make_option, OptionParser
import traceback
import django
from django.core.exceptions import ImproperlyConfigured
from django.core.management.color import color_style
from django.utils.encoding import smart_str
class CommandError(Exception):
"""
Exception class indicating a problem while executing a management
command.
If this exception is raised during the execution of a management
command, it will be caught and turned into a nicely-printed error
message to the appropriate output stream (i.e., stderr); as a
result, raising this exception (with a sensible description of the
error) is the preferred way to indicate that something has gone
wrong in the execution of a command.
"""
pass
def handle_default_options(options):
"""
Include any default options that all commands should accept here
so that ManagementUtility can handle them before searching for
user commands.
"""
if options.settings:
os.environ['DJANGO_SETTINGS_MODULE'] = options.settings
if options.pythonpath:
sys.path.insert(0, options.pythonpath)
class BaseCommand(object):
"""
The base class from which all management commands ultimately
derive.
Use this class if you want access to all of the mechanisms which
parse the command-line arguments and work out what code to call in
response; if you don't need to change any of that behavior,
consider using one of the subclasses defined in this file.
If you are interested in overriding/customizing various aspects of
the command-parsing and -execution behavior, the normal flow works
as follows:
1. ``django-admin.py`` or ``manage.py`` loads the command class
and calls its ``run_from_argv()`` method.
2. The ``run_from_argv()`` method calls ``create_parser()`` to get
an ``OptionParser`` for the arguments, parses them, performs
any environment changes requested by options like
``pythonpath``, and then calls the ``execute()`` method,
passing the parsed arguments.
3. The ``execute()`` method attempts to carry out the command by
calling the ``handle()`` method with the parsed arguments; any
output produced by ``handle()`` will be printed to standard
output and, if the command is intended to produce a block of
SQL statements, will be wrapped in ``BEGIN`` and ``COMMIT``.
4. If ``handle()`` raised a ``CommandError``, ``execute()`` will
instead print an error message to ``stderr``.
Thus, the ``handle()`` method is typically the starting point for
subclasses; many built-in commands and command types either place
all of their logic in ``handle()``, or perform some additional
parsing work in ``handle()`` and then delegate from it to more
specialized methods as needed.
Several attributes affect behavior at various steps along the way:
``args``
A string listing the arguments accepted by the command,
suitable for use in help messages; e.g., a command which takes
a list of application names might set this to '<appname
appname ...>'.
``can_import_settings``
A boolean indicating whether the command needs to be able to
import Django settings; if ``True``, ``execute()`` will verify
that this is possible before proceeding. Default value is
``True``.
``help``
A short description of the command, which will be printed in
help messages.
``option_list``
This is the list of ``optparse`` options which will be fed
into the command's ``OptionParser`` for parsing arguments.
``output_transaction``
A boolean indicating whether the command outputs SQL
statements; if ``True``, the output will automatically be
wrapped with ``BEGIN;`` and ``COMMIT;``. Default value is
``False``.
``requires_model_validation``
A boolean; if ``True``, validation of installed models will be
performed prior to executing the command. Default value is
``True``. To validate an individual application's models
rather than all applications' models, call
``self.validate(app)`` from ``handle()``, where ``app`` is the
application's Python module.
"""
# Metadata about this command.
option_list = (
make_option('-v', '--verbosity', action='store', dest='verbosity', default='1',
type='choice', choices=['0', '1', '2', '3'],
help='Verbosity level; 0=minimal output, 1=normal output, 2=verbose output, 3=very verbose output'),
make_option('--settings',
help='The Python path to a settings module, e.g. "myproject.settings.main". If this isn\'t provided, the DJANGO_SETTINGS_MODULE environment variable will be used.'),
make_option('--pythonpath',
help='A directory to add to the Python path, e.g. "/home/djangoprojects/myproject".'),
make_option('--traceback', action='store_true',
help='Print traceback on exception'),
)
help = ''
args = ''
# Configuration shortcuts that alter various logic.
can_import_settings = True
requires_model_validation = True
output_transaction = False # Whether to wrap the output in a "BEGIN; COMMIT;"
def __init__(self):
self.style = color_style()
def get_version(self):
"""
Return the Django version, which should be correct for all
built-in Django commands. User-supplied commands should
override this method.
"""
return django.get_version()
def usage(self, subcommand):
"""
Return a brief description of how to use this command, by
default from the attribute ``self.help``.
"""
usage = '%%prog %s [options] %s' % (subcommand, self.args)
if self.help:
return '%s\n\n%s' % (usage, self.help)
else:
return usage
def create_parser(self, prog_name, subcommand):
"""
Create and return the ``OptionParser`` which will be used to
parse the arguments to this command.
"""
return OptionParser(prog=prog_name,
usage=self.usage(subcommand),
version=self.get_version(),
option_list=self.option_list)
def print_help(self, prog_name, subcommand):
"""
Print the help message for this command, derived from
``self.usage()``.
"""
parser = self.create_parser(prog_name, subcommand)
parser.print_help()
def run_from_argv(self, argv):
"""
Set up any environment changes requested (e.g., Python path
and Django settings), then run this command.
"""
parser = self.create_parser(argv[0], argv[1])
options, args = parser.parse_args(argv[2:])
handle_default_options(options)
self.execute(*args, **options.__dict__)
def execute(self, *args, **options):
"""
Try to execute this command, performing model validation if
needed (as controlled by the attribute
``self.requires_model_validation``). If the command raises a
``CommandError``, intercept it and print it sensibly to
stderr.
"""
show_traceback = options.get('traceback', False)
# Switch to English, because django-admin.py creates database content
# like permissions, and those shouldn't contain any translations.
# But only do this if we can assume we have a working settings file,
# because django.utils.translation requires settings.
saved_lang = None
if self.can_import_settings:
try:
from django.utils import translation
saved_lang = translation.get_language()
translation.activate('en-us')
except ImportError, e:
# If settings should be available, but aren't,
# raise the error and quit.
if show_traceback:
traceback.print_exc()
else:
sys.stderr.write(smart_str(self.style.ERROR('Error: %s\n' % e)))
sys.exit(1)
try:
self.stdout = options.get('stdout', sys.stdout)
self.stderr = options.get('stderr', sys.stderr)
if self.requires_model_validation:
self.validate()
output = self.handle(*args, **options)
if output:
if self.output_transaction:
# This needs to be imported here, because it relies on
# settings.
from django.db import connections, DEFAULT_DB_ALIAS
connection = connections[options.get('database', DEFAULT_DB_ALIAS)]
if connection.ops.start_transaction_sql():
self.stdout.write(self.style.SQL_KEYWORD(connection.ops.start_transaction_sql()) + '\n')
self.stdout.write(output)
if self.output_transaction:
self.stdout.write('\n' + self.style.SQL_KEYWORD("COMMIT;") + '\n')
except CommandError, e:
if show_traceback:
traceback.print_exc()
else:
self.stderr.write(smart_str(self.style.ERROR('Error: %s\n' % e)))
sys.exit(1)
if saved_lang is not None:
translation.activate(saved_lang)
def validate(self, app=None, display_num_errors=False):
"""
Validates the given app, raising CommandError for any errors.
If app is None, then this will validate all installed apps.
"""
from django.core.management.validation import get_validation_errors
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
s = StringIO()
num_errors = get_validation_errors(s, app)
if num_errors:
s.seek(0)
error_text = s.read()
raise CommandError("One or more models did not validate:\n%s" % error_text)
if display_num_errors:
self.stdout.write("%s error%s found\n" % (num_errors, num_errors != 1 and 's' or ''))
def handle(self, *args, **options):
"""
The actual logic of the command. Subclasses must implement
this method.
"""
raise NotImplementedError()
class AppCommand(BaseCommand):
"""
A management command which takes one or more installed application
names as arguments, and does something with each of them.
Rather than implementing ``handle()``, subclasses must implement
``handle_app()``, which will be called once for each application.
"""
args = '<appname appname ...>'
def handle(self, *app_labels, **options):
from django.db import models
if not app_labels:
raise CommandError('Enter at least one appname.')
try:
app_list = [models.get_app(app_label) for app_label in app_labels]
except (ImproperlyConfigured, ImportError), e:
raise CommandError("%s. Are you sure your INSTALLED_APPS setting is correct?" % e)
output = []
for app in app_list:
app_output = self.handle_app(app, **options)
if app_output:
output.append(app_output)
return '\n'.join(output)
def handle_app(self, app, **options):
"""
Perform the command's actions for ``app``, which will be the
Python module corresponding to an application name given on
the command line.
"""
raise NotImplementedError()
class LabelCommand(BaseCommand):
"""
A management command which takes one or more arbitrary arguments
(labels) on the command line, and does something with each of
them.
Rather than implementing ``handle()``, subclasses must implement
``handle_label()``, which will be called once for each label.
If the arguments should be names of installed applications, use
``AppCommand`` instead.
"""
args = '<label label ...>'
label = 'label'
def handle(self, *labels, **options):
if not labels:
raise CommandError('Enter at least one %s.' % self.label)
output = []
for label in labels:
label_output = self.handle_label(label, **options)
if label_output:
output.append(label_output)
return '\n'.join(output)
def handle_label(self, label, **options):
"""
Perform the command's actions for ``label``, which will be the
string as given on the command line.
"""
raise NotImplementedError()
class NoArgsCommand(BaseCommand):
"""
A command which takes no arguments on the command line.
Rather than implementing ``handle()``, subclasses must implement
``handle_noargs()``; ``handle()`` itself is overridden to ensure
no arguments are passed to the command.
Attempting to pass arguments will raise ``CommandError``.
"""
args = ''
def handle(self, *args, **options):
if args:
raise CommandError("Command doesn't accept any arguments")
return self.handle_noargs(**options)
def handle_noargs(self, **options):
"""
Perform this command's actions.
"""
raise NotImplementedError()
|
|
# Authors: Olivier Grisel <olivier.grisel@ensta.org>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD 3 clause
from sys import version_info
import numpy as np
from scipy import interpolate, sparse
from copy import deepcopy
from sklearn.datasets import load_boston
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import TempMemmap
from sklearn.linear_model.coordinate_descent import Lasso, \
LassoCV, ElasticNet, ElasticNetCV, MultiTaskLasso, MultiTaskElasticNet, \
MultiTaskElasticNetCV, MultiTaskLassoCV, lasso_path, enet_path
from sklearn.linear_model import LassoLarsCV, lars_path
from sklearn.utils import check_array
def check_warnings():
if version_info < (2, 6):
raise SkipTest("Testing for warnings is not supported in versions \
older than Python 2.6")
def test_lasso_zero():
# Check that the lasso can handle zero data without crashing
X = [[0], [0], [0]]
y = [0, 0, 0]
clf = Lasso(alpha=0.1).fit(X, y)
pred = clf.predict([[1], [2], [3]])
assert_array_almost_equal(clf.coef_, [0])
assert_array_almost_equal(pred, [0, 0, 0])
assert_almost_equal(clf.dual_gap_, 0)
def test_lasso_toy():
# Test Lasso on a toy example for various values of alpha.
# When validating this against glmnet notice that glmnet divides it
# against nobs.
X = [[-1], [0], [1]]
Y = [-1, 0, 1] # just a straight line
T = [[2], [3], [4]] # test sample
clf = Lasso(alpha=1e-8)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = Lasso(alpha=0.1)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.85])
assert_array_almost_equal(pred, [1.7, 2.55, 3.4])
assert_almost_equal(clf.dual_gap_, 0)
clf = Lasso(alpha=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.25])
assert_array_almost_equal(pred, [0.5, 0.75, 1.])
assert_almost_equal(clf.dual_gap_, 0)
clf = Lasso(alpha=1)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.0])
assert_array_almost_equal(pred, [0, 0, 0])
assert_almost_equal(clf.dual_gap_, 0)
def test_enet_toy():
# Test ElasticNet for various parameters of alpha and l1_ratio.
# Actually, the parameters alpha = 0 should not be allowed. However,
# we test it as a border case.
# ElasticNet is tested with and without precomputed Gram matrix
X = np.array([[-1.], [0.], [1.]])
Y = [-1, 0, 1] # just a straight line
T = [[2.], [3.], [4.]] # test sample
# this should be the same as lasso
clf = ElasticNet(alpha=1e-8, l1_ratio=1.0)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.3, max_iter=100,
precompute=False)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf.set_params(max_iter=100, precompute=True)
clf.fit(X, Y) # with Gram
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf.set_params(max_iter=100, precompute=np.dot(X.T, X))
clf.fit(X, Y) # with Gram
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.45454], 3)
assert_array_almost_equal(pred, [0.9090, 1.3636, 1.8181], 3)
assert_almost_equal(clf.dual_gap_, 0)
def build_dataset(n_samples=50, n_features=200, n_informative_features=10,
n_targets=1):
"""
build an ill-posed linear regression problem with many noisy features and
comparatively few samples
"""
random_state = np.random.RandomState(0)
if n_targets > 1:
w = random_state.randn(n_features, n_targets)
else:
w = random_state.randn(n_features)
w[n_informative_features:] = 0.0
X = random_state.randn(n_samples, n_features)
y = np.dot(X, w)
X_test = random_state.randn(n_samples, n_features)
y_test = np.dot(X_test, w)
return X, y, X_test, y_test
def test_lasso_cv():
X, y, X_test, y_test = build_dataset()
max_iter = 150
clf = LassoCV(n_alphas=10, eps=1e-3, max_iter=max_iter).fit(X, y)
assert_almost_equal(clf.alpha_, 0.056, 2)
clf = LassoCV(n_alphas=10, eps=1e-3, max_iter=max_iter, precompute=True)
clf.fit(X, y)
assert_almost_equal(clf.alpha_, 0.056, 2)
# Check that the lars and the coordinate descent implementation
# select a similar alpha
lars = LassoLarsCV(normalize=False, max_iter=30).fit(X, y)
# for this we check that they don't fall in the grid of
# clf.alphas further than 1
assert_true(np.abs(
np.searchsorted(clf.alphas_[::-1], lars.alpha_) -
np.searchsorted(clf.alphas_[::-1], clf.alpha_)) <= 1)
# check that they also give a similar MSE
mse_lars = interpolate.interp1d(lars.cv_alphas_, lars.cv_mse_path_.T)
np.testing.assert_approx_equal(mse_lars(clf.alphas_[5]).mean(),
clf.mse_path_[5].mean(), significant=2)
# test set
assert_greater(clf.score(X_test, y_test), 0.99)
def test_lasso_cv_positive_constraint():
X, y, X_test, y_test = build_dataset()
max_iter = 500
# Ensure the unconstrained fit has a negative coefficient
clf_unconstrained = LassoCV(n_alphas=3, eps=1e-1, max_iter=max_iter, cv=2,
n_jobs=1)
clf_unconstrained.fit(X, y)
assert_true(min(clf_unconstrained.coef_) < 0)
# On same data, constrained fit has non-negative coefficients
clf_constrained = LassoCV(n_alphas=3, eps=1e-1, max_iter=max_iter,
positive=True, cv=2, n_jobs=1)
clf_constrained.fit(X, y)
assert_true(min(clf_constrained.coef_) >= 0)
def test_lasso_path_return_models_vs_new_return_gives_same_coefficients():
# Test that lasso_path with lars_path style output gives the
# same result
# Some toy data
X = np.array([[1, 2, 3.1], [2.3, 5.4, 4.3]]).T
y = np.array([1, 2, 3.1])
alphas = [5., 1., .5]
# Use lars_path and lasso_path(new output) with 1D linear interpolation
# to compute the same path
alphas_lars, _, coef_path_lars = lars_path(X, y, method='lasso')
coef_path_cont_lars = interpolate.interp1d(alphas_lars[::-1],
coef_path_lars[:, ::-1])
alphas_lasso2, coef_path_lasso2, _ = lasso_path(X, y, alphas=alphas,
return_models=False)
coef_path_cont_lasso = interpolate.interp1d(alphas_lasso2[::-1],
coef_path_lasso2[:, ::-1])
assert_array_almost_equal(
coef_path_cont_lasso(alphas), coef_path_cont_lars(alphas),
decimal=1)
def test_enet_path():
# We use a large number of samples and of informative features so that
# the l1_ratio selected is more toward ridge than lasso
X, y, X_test, y_test = build_dataset(n_samples=200, n_features=100,
n_informative_features=100)
max_iter = 150
# Here we have a small number of iterations, and thus the
# ElasticNet might not converge. This is to speed up tests
clf = ElasticNetCV(alphas=[0.01, 0.05, 0.1], eps=2e-3,
l1_ratio=[0.5, 0.7], cv=3,
max_iter=max_iter)
ignore_warnings(clf.fit)(X, y)
# Well-conditioned settings, we should have selected our
# smallest penalty
assert_almost_equal(clf.alpha_, min(clf.alphas_))
# Non-sparse ground truth: we should have selected an elastic-net
# that is closer to ridge than to lasso
assert_equal(clf.l1_ratio_, min(clf.l1_ratio))
clf = ElasticNetCV(alphas=[0.01, 0.05, 0.1], eps=2e-3,
l1_ratio=[0.5, 0.7], cv=3,
max_iter=max_iter, precompute=True)
ignore_warnings(clf.fit)(X, y)
# Well-conditioned settings, we should have selected our
# smallest penalty
assert_almost_equal(clf.alpha_, min(clf.alphas_))
# Non-sparse ground truth: we should have selected an elastic-net
# that is closer to ridge than to lasso
assert_equal(clf.l1_ratio_, min(clf.l1_ratio))
# We are in well-conditioned settings with low noise: we should
# have a good test-set performance
assert_greater(clf.score(X_test, y_test), 0.99)
# Multi-output/target case
X, y, X_test, y_test = build_dataset(n_features=10, n_targets=3)
clf = MultiTaskElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7],
cv=3, max_iter=max_iter)
ignore_warnings(clf.fit)(X, y)
# We are in well-conditioned settings with low noise: we should
# have a good test-set performance
assert_greater(clf.score(X_test, y_test), 0.99)
assert_equal(clf.coef_.shape, (3, 10))
# Mono-output should have same cross-validated alpha_ and l1_ratio_
# in both cases.
X, y, _, _ = build_dataset(n_features=10)
clf1 = ElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf1.fit(X, y)
clf2 = MultiTaskElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf2.fit(X, y[:, np.newaxis])
assert_almost_equal(clf1.l1_ratio_, clf2.l1_ratio_)
assert_almost_equal(clf1.alpha_, clf2.alpha_)
def test_path_parameters():
X, y, _, _ = build_dataset()
max_iter = 100
clf = ElasticNetCV(n_alphas=50, eps=1e-3, max_iter=max_iter,
l1_ratio=0.5, tol=1e-3)
clf.fit(X, y) # new params
assert_almost_equal(0.5, clf.l1_ratio)
assert_equal(50, clf.n_alphas)
assert_equal(50, len(clf.alphas_))
def test_warm_start():
X, y, _, _ = build_dataset()
clf = ElasticNet(alpha=0.1, max_iter=5, warm_start=True)
ignore_warnings(clf.fit)(X, y)
ignore_warnings(clf.fit)(X, y) # do a second round with 5 iterations
clf2 = ElasticNet(alpha=0.1, max_iter=10)
ignore_warnings(clf2.fit)(X, y)
assert_array_almost_equal(clf2.coef_, clf.coef_)
def test_lasso_alpha_warning():
X = [[-1], [0], [1]]
Y = [-1, 0, 1] # just a straight line
clf = Lasso(alpha=0)
assert_warns(UserWarning, clf.fit, X, Y)
def test_lasso_positive_constraint():
X = [[-1], [0], [1]]
y = [1, 0, -1] # just a straight line with negative slope
lasso = Lasso(alpha=0.1, max_iter=1000, positive=True)
lasso.fit(X, y)
assert_true(min(lasso.coef_) >= 0)
lasso = Lasso(alpha=0.1, max_iter=1000, precompute=True, positive=True)
lasso.fit(X, y)
assert_true(min(lasso.coef_) >= 0)
def test_enet_positive_constraint():
X = [[-1], [0], [1]]
y = [1, 0, -1] # just a straight line with negative slope
enet = ElasticNet(alpha=0.1, max_iter=1000, positive=True)
enet.fit(X, y)
assert_true(min(enet.coef_) >= 0)
def test_enet_cv_positive_constraint():
X, y, X_test, y_test = build_dataset()
max_iter = 500
# Ensure the unconstrained fit has a negative coefficient
enetcv_unconstrained = ElasticNetCV(n_alphas=3, eps=1e-1,
max_iter=max_iter,
cv=2, n_jobs=1)
enetcv_unconstrained.fit(X, y)
assert_true(min(enetcv_unconstrained.coef_) < 0)
# On same data, constrained fit has non-negative coefficients
enetcv_constrained = ElasticNetCV(n_alphas=3, eps=1e-1, max_iter=max_iter,
cv=2, positive=True, n_jobs=1)
enetcv_constrained.fit(X, y)
assert_true(min(enetcv_constrained.coef_) >= 0)
def test_uniform_targets():
enet = ElasticNetCV(fit_intercept=True, n_alphas=3)
m_enet = MultiTaskElasticNetCV(fit_intercept=True, n_alphas=3)
lasso = LassoCV(fit_intercept=True, n_alphas=3)
m_lasso = MultiTaskLassoCV(fit_intercept=True, n_alphas=3)
models_single_task = (enet, lasso)
models_multi_task = (m_enet, m_lasso)
rng = np.random.RandomState(0)
X_train = rng.random_sample(size=(10, 3))
X_test = rng.random_sample(size=(10, 3))
y1 = np.empty(10)
y2 = np.empty((10, 2))
for model in models_single_task:
for y_values in (0, 5):
y1.fill(y_values)
assert_array_equal(model.fit(X_train, y1).predict(X_test), y1)
assert_array_equal(model.alphas_, [np.finfo(float).resolution]*3)
for model in models_multi_task:
for y_values in (0, 5):
y2[:, 0].fill(y_values)
y2[:, 1].fill(2 * y_values)
assert_array_equal(model.fit(X_train, y2).predict(X_test), y2)
assert_array_equal(model.alphas_, [np.finfo(float).resolution]*3)
def test_multi_task_lasso_and_enet():
X, y, X_test, y_test = build_dataset()
Y = np.c_[y, y]
# Y_test = np.c_[y_test, y_test]
clf = MultiTaskLasso(alpha=1, tol=1e-8).fit(X, Y)
assert_true(0 < clf.dual_gap_ < 1e-5)
assert_array_almost_equal(clf.coef_[0], clf.coef_[1])
clf = MultiTaskElasticNet(alpha=1, tol=1e-8).fit(X, Y)
assert_true(0 < clf.dual_gap_ < 1e-5)
assert_array_almost_equal(clf.coef_[0], clf.coef_[1])
def test_lasso_readonly_data():
X = np.array([[-1], [0], [1]])
Y = np.array([-1, 0, 1]) # just a straight line
T = np.array([[2], [3], [4]]) # test sample
with TempMemmap((X, Y)) as (X, Y):
clf = Lasso(alpha=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.25])
assert_array_almost_equal(pred, [0.5, 0.75, 1.])
assert_almost_equal(clf.dual_gap_, 0)
def test_multi_task_lasso_readonly_data():
X, y, X_test, y_test = build_dataset()
Y = np.c_[y, y]
with TempMemmap((X, Y)) as (X, Y):
Y = np.c_[y, y]
clf = MultiTaskLasso(alpha=1, tol=1e-8).fit(X, Y)
assert_true(0 < clf.dual_gap_ < 1e-5)
assert_array_almost_equal(clf.coef_[0], clf.coef_[1])
def test_enet_multitarget():
n_targets = 3
X, y, _, _ = build_dataset(n_samples=10, n_features=8,
n_informative_features=10, n_targets=n_targets)
estimator = ElasticNet(alpha=0.01, fit_intercept=True)
estimator.fit(X, y)
coef, intercept, dual_gap = (estimator.coef_, estimator.intercept_,
estimator.dual_gap_)
for k in range(n_targets):
estimator.fit(X, y[:, k])
assert_array_almost_equal(coef[k, :], estimator.coef_)
assert_array_almost_equal(intercept[k], estimator.intercept_)
assert_array_almost_equal(dual_gap[k], estimator.dual_gap_)
def test_multioutput_enetcv_error():
X = np.random.randn(10, 2)
y = np.random.randn(10, 2)
clf = ElasticNetCV()
assert_raises(ValueError, clf.fit, X, y)
def test_multitask_enet_and_lasso_cv():
X, y, _, _ = build_dataset(n_features=50, n_targets=3)
clf = MultiTaskElasticNetCV().fit(X, y)
assert_almost_equal(clf.alpha_, 0.00556, 3)
clf = MultiTaskLassoCV().fit(X, y)
assert_almost_equal(clf.alpha_, 0.00278, 3)
X, y, _, _ = build_dataset(n_targets=3)
clf = MultiTaskElasticNetCV(n_alphas=10, eps=1e-3, max_iter=100,
l1_ratio=[0.3, 0.5], tol=1e-3)
clf.fit(X, y)
assert_equal(0.5, clf.l1_ratio_)
assert_equal((3, X.shape[1]), clf.coef_.shape)
assert_equal((3, ), clf.intercept_.shape)
assert_equal((2, 10, 3), clf.mse_path_.shape)
assert_equal((2, 10), clf.alphas_.shape)
X, y, _, _ = build_dataset(n_targets=3)
clf = MultiTaskLassoCV(n_alphas=10, eps=1e-3, max_iter=100, tol=1e-3)
clf.fit(X, y)
assert_equal((3, X.shape[1]), clf.coef_.shape)
assert_equal((3, ), clf.intercept_.shape)
assert_equal((10, 3), clf.mse_path_.shape)
assert_equal(10, len(clf.alphas_))
def test_1d_multioutput_enet_and_multitask_enet_cv():
X, y, _, _ = build_dataset(n_features=10)
y = y[:, np.newaxis]
clf = ElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf.fit(X, y[:, 0])
clf1 = MultiTaskElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf1.fit(X, y)
assert_almost_equal(clf.l1_ratio_, clf1.l1_ratio_)
assert_almost_equal(clf.alpha_, clf1.alpha_)
assert_almost_equal(clf.coef_, clf1.coef_[0])
assert_almost_equal(clf.intercept_, clf1.intercept_[0])
def test_1d_multioutput_lasso_and_multitask_lasso_cv():
X, y, _, _ = build_dataset(n_features=10)
y = y[:, np.newaxis]
clf = LassoCV(n_alphas=5, eps=2e-3)
clf.fit(X, y[:, 0])
clf1 = MultiTaskLassoCV(n_alphas=5, eps=2e-3)
clf1.fit(X, y)
assert_almost_equal(clf.alpha_, clf1.alpha_)
assert_almost_equal(clf.coef_, clf1.coef_[0])
assert_almost_equal(clf.intercept_, clf1.intercept_[0])
def test_sparse_input_dtype_enet_and_lassocv():
X, y, _, _ = build_dataset(n_features=10)
clf = ElasticNetCV(n_alphas=5)
clf.fit(sparse.csr_matrix(X), y)
clf1 = ElasticNetCV(n_alphas=5)
clf1.fit(sparse.csr_matrix(X, dtype=np.float32), y)
assert_almost_equal(clf.alpha_, clf1.alpha_, decimal=6)
assert_almost_equal(clf.coef_, clf1.coef_, decimal=6)
clf = LassoCV(n_alphas=5)
clf.fit(sparse.csr_matrix(X), y)
clf1 = LassoCV(n_alphas=5)
clf1.fit(sparse.csr_matrix(X, dtype=np.float32), y)
assert_almost_equal(clf.alpha_, clf1.alpha_, decimal=6)
assert_almost_equal(clf.coef_, clf1.coef_, decimal=6)
def test_precompute_invalid_argument():
X, y, _, _ = build_dataset()
for clf in [ElasticNetCV(precompute="invalid"),
LassoCV(precompute="invalid")]:
assert_raises_regex(ValueError, ".*should be.*True.*False.*auto.*"
"array-like.*Got 'invalid'", clf.fit, X, y)
# Precompute = 'auto' is not supported for ElasticNet
assert_raises_regex(ValueError, ".*should be.*True.*False.*array-like.*"
"Got 'auto'", ElasticNet(precompute='auto').fit, X, y)
def test_warm_start_convergence():
X, y, _, _ = build_dataset()
model = ElasticNet(alpha=1e-3, tol=1e-3).fit(X, y)
n_iter_reference = model.n_iter_
# This dataset is not trivial enough for the model to converge in one pass.
assert_greater(n_iter_reference, 2)
# Check that n_iter_ is invariant to multiple calls to fit
# when warm_start=False, all else being equal.
model.fit(X, y)
n_iter_cold_start = model.n_iter_
assert_equal(n_iter_cold_start, n_iter_reference)
# Fit the same model again, using a warm start: the optimizer just performs
# a single pass before checking that it has already converged
model.set_params(warm_start=True)
model.fit(X, y)
n_iter_warm_start = model.n_iter_
assert_equal(n_iter_warm_start, 1)
def test_warm_start_convergence_with_regularizer_decrement():
boston = load_boston()
X, y = boston.data, boston.target
# Train a model to converge on a lightly regularized problem
final_alpha = 1e-5
low_reg_model = ElasticNet(alpha=final_alpha).fit(X, y)
# Fitting a new model on a more regularized version of the same problem.
# Fitting with high regularization is easier it should converge faster
# in general.
high_reg_model = ElasticNet(alpha=final_alpha * 10).fit(X, y)
assert_greater(low_reg_model.n_iter_, high_reg_model.n_iter_)
# Fit the solution to the original, less regularized version of the
# problem but from the solution of the highly regularized variant of
# the problem as a better starting point. This should also converge
# faster than the original model that starts from zero.
warm_low_reg_model = deepcopy(high_reg_model)
warm_low_reg_model.set_params(warm_start=True, alpha=final_alpha)
warm_low_reg_model.fit(X, y)
assert_greater(low_reg_model.n_iter_, warm_low_reg_model.n_iter_)
def test_random_descent():
# Test that both random and cyclic selection give the same results.
# Ensure that the test models fully converge and check a wide
# range of conditions.
# This uses the coordinate descent algo using the gram trick.
X, y, _, _ = build_dataset(n_samples=50, n_features=20)
clf_cyclic = ElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(X, y)
clf_random = ElasticNet(selection='random', tol=1e-8, random_state=42)
clf_random.fit(X, y)
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# This uses the descent algo without the gram trick
clf_cyclic = ElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(X.T, y[:20])
clf_random = ElasticNet(selection='random', tol=1e-8, random_state=42)
clf_random.fit(X.T, y[:20])
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# Sparse Case
clf_cyclic = ElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(sparse.csr_matrix(X), y)
clf_random = ElasticNet(selection='random', tol=1e-8, random_state=42)
clf_random.fit(sparse.csr_matrix(X), y)
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# Multioutput case.
new_y = np.hstack((y[:, np.newaxis], y[:, np.newaxis]))
clf_cyclic = MultiTaskElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(X, new_y)
clf_random = MultiTaskElasticNet(selection='random', tol=1e-8,
random_state=42)
clf_random.fit(X, new_y)
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# Raise error when selection is not in cyclic or random.
clf_random = ElasticNet(selection='invalid')
assert_raises(ValueError, clf_random.fit, X, y)
def test_enet_path_positive():
# Test that the coefs returned by positive=True in enet_path are positive
X, y, _, _ = build_dataset(n_samples=50, n_features=50)
for path in [enet_path, lasso_path]:
pos_path_coef = path(X, y, positive=True)[1]
assert_true(np.all(pos_path_coef >= 0))
def test_sparse_dense_descent_paths():
# Test that dense and sparse input give the same input for descent paths.
X, y, _, _ = build_dataset(n_samples=50, n_features=20)
csr = sparse.csr_matrix(X)
for path in [enet_path, lasso_path]:
_, coefs, _ = path(X, y, fit_intercept=False)
_, sparse_coefs, _ = path(csr, y, fit_intercept=False)
assert_array_almost_equal(coefs, sparse_coefs)
def test_check_input_false():
X, y, _, _ = build_dataset(n_samples=20, n_features=10)
X = check_array(X, order='F', dtype='float64')
y = check_array(X, order='F', dtype='float64')
clf = ElasticNet(selection='cyclic', tol=1e-8)
# Check that no error is raised if data is provided in the right format
clf.fit(X, y, check_input=False)
X = check_array(X, order='F', dtype='float32')
clf.fit(X, y, check_input=True)
# Check that an error is raised if data is provided in the wrong dtype,
# because of check bypassing
assert_raises(ValueError, clf.fit, X, y, check_input=False)
# With no input checking, providing X in C order should result in false
# computation
X = check_array(X, order='C', dtype='float64')
assert_raises(ValueError, clf.fit, X, y, check_input=False)
def test_overrided_gram_matrix():
X, y, _, _ = build_dataset(n_samples=20, n_features=10)
Gram = X.T.dot(X)
clf = ElasticNet(selection='cyclic', tol=1e-8, precompute=Gram,
fit_intercept=True)
assert_warns_message(UserWarning,
"Gram matrix was provided but X was centered"
" to fit intercept, "
"or X was normalized : recomputing Gram matrix.",
clf.fit, X, y)
def test_lasso_non_float_y():
X = [[0, 0], [1, 1], [-1, -1]]
y = [0, 1, 2]
y_float = [0.0, 1.0, 2.0]
for model in [ElasticNet, Lasso]:
clf = model(fit_intercept=False)
clf.fit(X, y)
clf_float = model(fit_intercept=False)
clf_float.fit(X, y_float)
assert_array_equal(clf.coef_, clf_float.coef_)
def test_enet_float_precision():
# Generate dataset
X, y, X_test, y_test = build_dataset(n_samples=20, n_features=10)
# Here we have a small number of iterations, and thus the
# ElasticNet might not converge. This is to speed up tests
for normalize in [True, False]:
for fit_intercept in [True, False]:
coef = {}
intercept = {}
for dtype in [np.float64, np.float32]:
clf = ElasticNet(alpha=0.5, max_iter=100, precompute=False,
fit_intercept=fit_intercept,
normalize=normalize)
X = dtype(X)
y = dtype(y)
ignore_warnings(clf.fit)(X, y)
coef[dtype] = clf.coef_
intercept[dtype] = clf.intercept_
assert_equal(clf.coef_.dtype, dtype)
# test precompute Gram array
Gram = X.T.dot(X)
clf_precompute = ElasticNet(alpha=0.5, max_iter=100,
precompute=Gram,
fit_intercept=fit_intercept,
normalize=normalize)
ignore_warnings(clf_precompute.fit)(X, y)
assert_array_almost_equal(clf.coef_, clf_precompute.coef_)
assert_array_almost_equal(clf.intercept_,
clf_precompute.intercept_)
assert_array_almost_equal(coef[np.float32], coef[np.float64],
decimal=4)
assert_array_almost_equal(intercept[np.float32],
intercept[np.float64],
decimal=4)
|
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf numpy mathematical methods."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
from absl.testing import parameterized
import numpy as np
from six.moves import range
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops.numpy_ops import np_array_ops
from tensorflow.python.ops.numpy_ops import np_arrays
from tensorflow.python.ops.numpy_ops import np_math_ops
from tensorflow.python.platform import test
class MathTest(test.TestCase, parameterized.TestCase):
def setUp(self):
super(MathTest, self).setUp()
self.array_transforms = [
lambda x: x, # Identity,
ops.convert_to_tensor,
np.array,
lambda x: np.array(x, dtype=np.float32),
lambda x: np.array(x, dtype=np.float64),
np_array_ops.array,
lambda x: np_array_ops.array(x, dtype=np.float32),
lambda x: np_array_ops.array(x, dtype=np.float64),
]
self.types = [np.int32, np.int64, np.float32, np.float64]
def _testBinaryOp(self,
math_fun,
np_fun,
name,
operands=None,
extra_operands=None,
check_promotion=True,
check_promotion_result_type=True):
def run_test(a, b):
for fn in self.array_transforms:
arg1 = fn(a)
arg2 = fn(b)
self.match(
math_fun(arg1, arg2),
np_fun(arg1, arg2),
msg='{}({}, {})'.format(name, arg1, arg2))
# Tests type promotion
for type_a in self.types:
for type_b in self.types:
if not check_promotion and type_a != type_b:
continue
arg1 = np_array_ops.array(a, dtype=type_a)
arg2 = np_array_ops.array(b, dtype=type_b)
self.match(
math_fun(arg1, arg2),
np_fun(arg1, arg2),
msg='{}({}, {})'.format(name, arg1, arg2),
check_dtype=check_promotion_result_type)
if operands is None:
operands = [(5, 2), (5, [2, 3]), (5, [[2, 3], [6, 7]]), ([1, 2, 3], 7),
([1, 2, 3], [5, 6, 7])]
for operand1, operand2 in operands:
run_test(operand1, operand2)
if extra_operands is not None:
for operand1, operand2 in extra_operands:
run_test(operand1, operand2)
def testDot(self):
extra_operands = [([1, 2], [[5, 6, 7], [8, 9, 10]]),
(np.arange(2 * 3 * 5).reshape([2, 3, 5]).tolist(),
np.arange(5 * 7 * 11).reshape([7, 5, 11]).tolist())]
return self._testBinaryOp(
np_math_ops.dot, np.dot, 'dot', extra_operands=extra_operands)
def testMinimum(self):
# The numpy version has strange result type when promotion happens,
# so set check_promotion_result_type to False.
return self._testBinaryOp(
np_math_ops.minimum,
np.minimum,
'minimum',
check_promotion_result_type=False)
def testMaximum(self):
# The numpy version has strange result type when promotion happens,
# so set check_promotion_result_type to False.
return self._testBinaryOp(
np_math_ops.maximum,
np.maximum,
'maximum',
check_promotion_result_type=False)
def testMatmul(self):
operands = [([[1, 2]], [[3, 4, 5], [6, 7, 8]])]
return self._testBinaryOp(
np_math_ops.matmul, np.matmul, 'matmul', operands=operands)
def testMatmulError(self):
with self.assertRaisesRegex(ValueError, r''):
np_math_ops.matmul(
np_array_ops.ones([], np.int32), np_array_ops.ones([2, 3], np.int32))
with self.assertRaisesRegex(ValueError, r''):
np_math_ops.matmul(
np_array_ops.ones([2, 3], np.int32), np_array_ops.ones([], np.int32))
def testVDot(self):
operands = [([[1, 2], [3, 4]], [[3, 4], [6, 7]]),
([[1, 2], [3, 4]], [3, 4, 6, 7])]
return self._testBinaryOp(
np_math_ops.vdot, np.vdot, 'vdot', operands=operands)
def _testUnaryOp(self, math_fun, np_fun, name):
def run_test(a):
for fn in self.array_transforms:
arg1 = fn(a)
self.match(
math_fun(arg1), np_fun(arg1), msg='{}({})'.format(name, arg1))
run_test(5)
run_test([2, 3])
run_test([[2, -3], [-6, 7]])
def testLog(self):
self._testUnaryOp(np_math_ops.log, np.log, 'log')
def testExp(self):
self._testUnaryOp(np_math_ops.exp, np.exp, 'exp')
def testTanh(self):
self._testUnaryOp(np_math_ops.tanh, np.tanh, 'tanh')
def testSqrt(self):
self._testUnaryOp(np_math_ops.sqrt, np.sqrt, 'sqrt')
def match(self, actual, expected, msg='', check_dtype=True):
self.assertIsInstance(actual, np_arrays.ndarray)
if check_dtype:
self.assertEqual(
actual.dtype, expected.dtype,
'Dtype mismatch.\nActual: {}\nExpected: {}\n{}'.format(
actual.dtype, expected.dtype, msg))
self.assertEqual(
actual.shape, expected.shape,
'Shape mismatch.\nActual: {}\nExpected: {}\n{}'.format(
actual.shape, expected.shape, msg))
np.testing.assert_allclose(actual.tolist(), expected.tolist(), rtol=1e-6)
def testArgsort(self):
self._testUnaryOp(np_math_ops.argsort, np.argsort, 'argsort')
# Test stability
r = np.arange(100)
a = np.zeros(100)
np.testing.assert_equal(np_math_ops.argsort(a, kind='stable'), r)
def testArgMaxArgMin(self):
data = [
0,
5,
[1],
[1, 2, 3],
[[1, 2, 3]],
[[4, 6], [7, 8]],
[[[4, 6], [9, 10]], [[7, 8], [12, 34]]],
]
for fn, d in itertools.product(self.array_transforms, data):
arr = fn(d)
self.match(np_math_ops.argmax(arr), np.argmax(arr))
self.match(np_math_ops.argmin(arr), np.argmin(arr))
if hasattr(arr, 'shape'):
ndims = len(arr.shape)
else:
ndims = np_array_ops.array(arr, copy=False).ndim
if ndims == 0:
# Numpy flattens the scalar ndarray and treats it as a 1-d array of
# size 1.
ndims = 1
for axis in range(-ndims, ndims):
self.match(
np_math_ops.argmax(arr, axis=axis), np.argmax(arr, axis=axis))
self.match(
np_math_ops.argmin(arr, axis=axis), np.argmin(arr, axis=axis))
@parameterized.parameters([False, True])
def testIsCloseEqualNan(self, equal_nan):
a = np.asarray([1, 1, np.nan, 1, np.nan], np.float32)
b = np.asarray([1, 2, 1, np.nan, np.nan], np.float32)
self.match(
np_math_ops.isclose(a, b, equal_nan=equal_nan),
np.isclose(a, b, equal_nan=equal_nan))
def testAverageWrongShape(self):
with self.assertRaisesWithPredicateMatch(errors.InvalidArgumentError, r''):
np_math_ops.average(np.ones([2, 3]), weights=np.ones([2, 4]))
with self.assertRaisesWithPredicateMatch(errors.InvalidArgumentError, r''):
np_math_ops.average(np.ones([2, 3]), axis=0, weights=np.ones([2, 4]))
with self.assertRaisesWithPredicateMatch(errors.InvalidArgumentError, r''):
np_math_ops.average(np.ones([2, 3]), axis=0, weights=np.ones([]))
with self.assertRaisesWithPredicateMatch(errors.InvalidArgumentError, r''):
np_math_ops.average(np.ones([2, 3]), axis=0, weights=np.ones([5]))
def testClip(self):
def run_test(arr, *args, **kwargs):
check_dtype = kwargs.pop('check_dtype', True)
for fn in self.array_transforms:
arr = fn(arr)
self.match(
np_math_ops.clip(arr, *args, **kwargs),
np.clip(arr, *args, **kwargs),
check_dtype=check_dtype)
# NumPy exhibits weird typing behavior when a/a_min/a_max are scalars v/s
# lists, e.g.,
#
# np.clip(np.array(0, dtype=np.int32), -5, 5).dtype == np.int64
# np.clip(np.array([0], dtype=np.int32), -5, 5).dtype == np.int32
# np.clip(np.array([0], dtype=np.int32), [-5], [5]).dtype == np.int64
#
# So we skip matching type. In tf-numpy the type of the output array is
# always the same as the input array.
run_test(0, -1, 5, check_dtype=False)
run_test(-1, -1, 5, check_dtype=False)
run_test(5, -1, 5, check_dtype=False)
run_test(-10, -1, 5, check_dtype=False)
run_test(10, -1, 5, check_dtype=False)
run_test(10, None, 5, check_dtype=False)
run_test(10, -1, None, check_dtype=False)
run_test([0, 20, -5, 4], -1, 5, check_dtype=False)
run_test([0, 20, -5, 4], None, 5, check_dtype=False)
run_test([0, 20, -5, 4], -1, None, check_dtype=False)
run_test([0.5, 20.2, -5.7, 4.4], -1.5, 5.1, check_dtype=False)
run_test([0, 20, -5, 4], [-5, 0, -5, 0], [0, 5, 0, 5], check_dtype=False)
run_test([[1, 2, 3], [4, 5, 6]], [2, 0, 2], 5, check_dtype=False)
run_test([[1, 2, 3], [4, 5, 6]], 0, [5, 3, 1], check_dtype=False)
def testPtp(self):
def run_test(arr, *args, **kwargs):
for fn in self.array_transforms:
arg = fn(arr)
self.match(
np_math_ops.ptp(arg, *args, **kwargs), np.ptp(arg, *args, **kwargs))
run_test([1, 2, 3])
run_test([1., 2., 3.])
run_test([[1, 2], [3, 4]], axis=1)
run_test([[1, 2], [3, 4]], axis=0)
run_test([[1, 2], [3, 4]], axis=-1)
run_test([[1, 2], [3, 4]], axis=-2)
def testLinSpace(self):
array_transforms = [
lambda x: x, # Identity,
ops.convert_to_tensor,
np.array,
lambda x: np.array(x, dtype=np.float32),
lambda x: np.array(x, dtype=np.float64),
np_array_ops.array,
lambda x: np_array_ops.array(x, dtype=np.float32),
lambda x: np_array_ops.array(x, dtype=np.float64)
]
def run_test(start, stop, **kwargs):
for fn1 in array_transforms:
for fn2 in array_transforms:
arg1 = fn1(start)
arg2 = fn2(stop)
self.match(
np_math_ops.linspace(arg1, arg2, **kwargs),
np.linspace(arg1, arg2, **kwargs),
msg='linspace({}, {})'.format(arg1, arg2))
run_test(0, 1)
run_test(0, 1, num=10)
run_test(0, 1, endpoint=False)
run_test(0, -1)
run_test(0, -1, num=10)
run_test(0, -1, endpoint=False)
def testLogSpace(self):
array_transforms = [
lambda x: x, # Identity,
ops.convert_to_tensor,
np.array,
lambda x: np.array(x, dtype=np.float32),
lambda x: np.array(x, dtype=np.float64),
np_array_ops.array,
lambda x: np_array_ops.array(x, dtype=np.float32),
lambda x: np_array_ops.array(x, dtype=np.float64)
]
def run_test(start, stop, **kwargs):
for fn1 in array_transforms:
for fn2 in array_transforms:
arg1 = fn1(start)
arg2 = fn2(stop)
self.match(
np_math_ops.logspace(arg1, arg2, **kwargs),
np.logspace(arg1, arg2, **kwargs),
msg='logspace({}, {})'.format(arg1, arg2))
run_test(0, 5)
run_test(0, 5, num=10)
run_test(0, 5, endpoint=False)
run_test(0, 5, base=2.0)
run_test(0, -5)
run_test(0, -5, num=10)
run_test(0, -5, endpoint=False)
run_test(0, -5, base=2.0)
def testGeomSpace(self):
def run_test(start, stop, **kwargs):
arg1 = start
arg2 = stop
self.match(
np_math_ops.geomspace(arg1, arg2, **kwargs),
np.geomspace(arg1, arg2, **kwargs),
msg='geomspace({}, {})'.format(arg1, arg2))
run_test(1, 1000, num=5)
run_test(1, 1000, num=5, endpoint=False)
run_test(-1, -1000, num=5)
run_test(-1, -1000, num=5, endpoint=False)
if __name__ == '__main__':
ops.enable_eager_execution()
test.main()
|
|
# http://127.0.0.1:8000/api/accidents_in_rect?ne=52.012788283447016,-1.8865173453125408&sw=51.95756835101524,-2.468304454687541&Weather_Conditions=1&Road_Surface_Conditions=4,1
#
# Weather Conds:
#1 Fine no high winds
#2 Raining no high winds
#3 Snowing no high winds
#4 Fine + high winds
#5 Raining + high winds
#6 Snowing + high winds
#7 Fog or mist
#8 Other
#9 Unknown
#-1 Data missing or out of range
# Road Surf Conds:
#1 Dry
#2 Wet or damp
#3 Snow
#4 Frost or ice
#5 Flood over 3cm. deep
#6 Oil or diesel
#7 Mud
#-1 Data missing or out of range
# just (snow, frost or ice) on road
# http://127.0.0.1:8000/api/accidents_in_rect?ne=52.012788283447016,-1.8865173453125408&sw=51.95756835101524,-2.468304454687541&Road_Surface_Conditions=3,4
#
# for a lot of cardiff...
# http://127.0.0.1:8000/api/accidents_in_rect?ne=52.012788283447016,-1.8865173453125408&sw=50.95756835101524,-4.468304454687541&Road_Surface_Conditions=3,4
from django.http import HttpResponse, HttpResponseRedirect, HttpResponseBadRequest
from django.template import Template, Context, RequestContext
from django.template.loader import get_template
from django.core.exceptions import ObjectDoesNotExist
from django.views.decorators.csrf import csrf_exempt
from django import forms
import models
import json
from datetime import datetime
import sqlite3
from django.conf import settings
from tweet_grabber import getTweets, getTweetsLocation
# Create your views here.
def _parse_param_as_ints_list( params, pname ):
lst_str = params[pname]
parts = lst_str.split(",")
try:
return [ int(x.strip()) for x in parts ]
except Exception:
return None
def accidents_in_rect( request ):
"""
"""
#
# Prelim
if request.method != 'GET':
return HttpResponseBadRequest( "GET calls only" )
params = request.GET
req_params = ["ne","sw"]
for req_param in req_params:
if req_param not in params:
return HttpResponseBadRequest( "missing param: %s" %(req_param) )
#
# Lat longs
ne = params['ne']
try:
ne_lat,ne_long = [ float(x.strip()) for x in ne.split(',')]
except Exception:
return HttpResponseBadRequest( "failed to parse %s" % (ne) )
sw = params['sw']
try:
sw_lat,sw_long = [ float(x.strip()) for x in sw.split(',')]
except Exception:
return HttpResponseBadRequest( "failed to parse %s" % (sw) )
#
# Optionals
weather_conds = None
pname = "Weather_Conditions"
if pname in params:
weather_conds = _parse_param_as_ints_list( params, pname )
if weather_conds is None:
return HttpResponseBadRequest( "failed to parse %s" % (pname) )
road_surf_conds = None
pname = "Road_Surface_Conditions"
if pname in params:
road_surf_conds = _parse_param_as_ints_list( params, pname )
if road_surf_conds is None:
return HttpResponseBadRequest( "failed to parse %s" % (pname) )
#
# Build query
qry = "SELECT * FROM accidents "
qry += " WHERE "
qry += " %s <= latitude " % sw_lat
qry += " AND "
qry += " latitude <= %s " % ne_lat
qry += " AND "
qry += " %s <= longitude " % sw_long
qry += " AND "
qry += " longitude <= %s " % ne_long
#sw_lat <= p_lat <= ne_lat
#sw_long <= p_long <= ne_long
if weather_conds is not None:
lst_str = "("
lst_str += ''.join( [ str(x)+"," for x in weather_conds] ).rstrip(',')
lst_str += ")"
qry += " AND Weather_Conditions IN " + lst_str
if road_surf_conds is not None:
lst_str = "("
lst_str += ''.join( [ str(x)+"," for x in road_surf_conds] ).rstrip(',')
lst_str += ")"
qry += " AND Road_Surface_Conditions IN " + lst_str
#
# Query and parse
conn = sqlite3.connect(settings.PROJ_PATH+'/datasets_db.db') #~
conn.row_factory = sqlite3.Row
rset = conn.execute(qry)
results = rset.fetchall()
dicts = [ dict(res) for res in results ]
json_str = json.dumps(dicts)
conn.close()
return HttpResponse( json_str )
def freezetweets_national( request ):
"""
"""
#
# Prelim
if request.method != 'GET':
return HttpResponseBadRequest( "GET calls only" )
#
#
try:
json_str = getTweets( settings.TWITTER_HASHTAG )
return HttpResponse( json_str )
except Exception:
return HttpResponseBadRequest( "Something went wrong. Blame Greenwood." )
def freezetweets_near( request ):
"""
"""
#
# Prelim
if request.method != 'GET':
return HttpResponseBadRequest( "GET calls only" )
params = request.GET
req_params = ["loc"]
for req_param in req_params:
if req_param not in params:
return HttpResponseBadRequest( "missing param: %s" %(req_param) )
#
# Grab params
loc = params['loc']
try:
llat,llong = [ float(x.strip()) for x in loc.split(',')]
except Exception:
return HttpResponseBadRequest( "failed to parse %s" % (loc) )
#
#
try:
json_str = getTweetsLocation( settings.TWITTER_HASHTAG, llat, llong, "5mi" )
return HttpResponse( json_str )
except Exception:
return HttpResponseBadRequest( "Something went wrong. Blame Greenwood." )
|
|
import os
import platform
import sys
from datetime import datetime, timezone
from pathlib import Path
from unittest.mock import MagicMock, Mock
import pytest
import tzlocal.unix
import tzlocal.utils
if sys.version_info >= (3, 9):
from zoneinfo import ZoneInfo, ZoneInfoNotFoundError
else:
from backports.zoneinfo import ZoneInfo, ZoneInfoNotFoundError
@pytest.fixture(scope="session", autouse=True)
def clear_tz_env_variable():
os.environ.pop("TZ", None)
def tz_path(zonefile: str = None) -> str:
path = Path(__file__).parent.joinpath("test_data")
if zonefile:
return str(path / zonefile)
else:
return str(path)
def test_env(monkeypatch):
tz_harare = tzlocal.utils._tz_from_env(":Africa/Harare")
assert str(tz_harare) == "Africa/Harare"
# Some Unices allow this as well, so we must allow it:
tz_harare = tzlocal.utils._tz_from_env("Africa/Harare")
assert str(tz_harare) == "Africa/Harare"
path = tz_path(os.path.join("Africa", "Harare"))
tz_local = tzlocal.utils._tz_from_env(":" + path)
assert str(tz_local) == "Africa/Harare"
# Make sure the local timezone is the same as the Harare one above.
# We test this with a past date, so that we don't run into future changes
# of the Harare timezone.
dt = datetime(2012, 1, 1, 5)
assert dt.replace(tzinfo=tz_harare) == dt.replace(tzinfo=tz_local)
tz_local = tzlocal.utils._tz_from_env(tz_path("UTC"))
assert str(tz_local) == "UTC"
path = tz_path(os.path.join("localtime", "etc", "localtime"))
tz_local = tzlocal.utils._tz_from_env(path)
assert str(tz_local) == "localtime"
# Non-zoneinfo timezones are not supported in the TZ environment.
pytest.raises(ZoneInfoNotFoundError, tzlocal.utils._tz_from_env, "GMT+03:00")
# With a zone that doesn't exist, raises error
pytest.raises(ZoneInfoNotFoundError, tzlocal.utils._tz_from_env, "Just Nonsense")
def test_timezone():
# Most versions of Ubuntu
tz = tzlocal.unix._get_localzone(_root=tz_path("timezone"))
assert str(tz) == "Africa/Harare"
def test_timezone_top_line_comment():
tz = tzlocal.unix._get_localzone(_root=tz_path("top_line_comment"))
assert str(tz) == "Africa/Harare"
def test_zone_setting():
# A ZONE setting in /etc/sysconfig/clock, f ex CentOS
tz = tzlocal.unix._get_localzone(_root=tz_path("zone_setting"))
assert str(tz) == "Africa/Harare"
def test_timezone_setting():
# A ZONE setting in /etc/conf.d/clock, f ex Gentoo
tz = tzlocal.unix._get_localzone(_root=tz_path("timezone_setting"))
assert str(tz) == "Africa/Harare"
@pytest.mark.skipif(
platform.system() == "Windows", reason="Symbolic links are not available on Windows"
)
def test_symlink_localtime():
# A ZONE setting in the target path of a symbolic linked localtime, f ex systemd distributions
tz = tzlocal.unix._get_localzone(_root=tz_path("symlink_localtime"))
assert str(tz) == "Africa/Harare"
def test_vardbzoneinfo_setting():
# A ZONE setting in /etc/conf.d/clock, f ex Gentoo
tz = tzlocal.unix._get_localzone(_root=tz_path("vardbzoneinfo"))
assert str(tz) == "Africa/Harare"
def test_only_localtime():
tz = tzlocal.unix._get_localzone(_root=tz_path("localtime"))
assert str(tz) == "local"
dt = datetime(2012, 1, 1, 5)
assert dt.replace(tzinfo=ZoneInfo("Africa/Harare")) == dt.replace(tzinfo=tz)
def test_get_reload(mocker, monkeypatch):
mocker.patch("tzlocal.utils.assert_tz_offset")
# Clear any cached zone
monkeypatch.setattr(tzlocal.unix, "_cache_tz", None)
monkeypatch.setenv("TZ", "Africa/Harare")
tz_harare = tzlocal.unix.get_localzone()
assert str(tz_harare) == "Africa/Harare"
# Changing the TZ makes no difference, because it's cached
monkeypatch.setenv("TZ", "Africa/Johannesburg")
tz_harare = tzlocal.unix.get_localzone()
assert str(tz_harare) == "Africa/Harare"
# So we reload it
tz_harare = tzlocal.unix.reload_localzone()
assert str(tz_harare) == "Africa/Johannesburg"
def test_fail(recwarn):
with pytest.warns(UserWarning, match="Can not find any timezone configuration"):
tz = tzlocal.unix._get_localzone(_root=tz_path())
assert tz == timezone.utc
def test_assert_tz_offset():
# The local zone should be the local zone:
local = tzlocal.get_localzone()
tzlocal.utils.assert_tz_offset(local)
# Get a non local zone. Let's use Chatham, population 600.
other = ZoneInfo("Pacific/Chatham")
pytest.raises(ValueError, tzlocal.utils.assert_tz_offset, other)
def test_win32(mocker):
if sys.platform == "win32":
# Ironically, these tests don't work on Windows.
import tzlocal.win32
# Just check on Windows that the code works, and that we get
# something reasonable back.
tz = tzlocal.win32.get_localzone()
# It should be a timezone with a slash in it, at least:
assert '/' in str(tz)
return
# Yes, winreg is all mocked out, but this test means we at least
# catch syntax errors, etc.
mocker.patch("tzlocal.utils.assert_tz_offset")
winreg = MagicMock()
winreg.EnumValue.configure_mock(
return_value=("TimeZoneKeyName", "Belarus Standard Time")
)
sys.modules["winreg"] = winreg
import tzlocal.win32
tz = tzlocal.win32.get_localzone()
assert str(tz) == "Europe/Minsk"
tz = tzlocal.win32.reload_localzone()
assert str(tz) == "Europe/Minsk"
winreg.EnumValue.configure_mock(
return_value=("TimeZoneKeyName", "Not a real timezone")
)
pytest.raises(ZoneInfoNotFoundError, tzlocal.win32._get_localzone_name)
# Old XP style reginfo should fail
winreg.EnumValue.configure_mock(
return_value=("TimeZoneKeyName", "Belarus Standard Time")
)
tzlocal.win32.valuestodict = Mock(
return_value={
"StandardName": "Mocked Standard Time",
"Std": "Mocked Standard Time",
}
)
pytest.raises(LookupError, tzlocal.win32._get_localzone_name)
def test_win32_env(mocker, monkeypatch):
sys.modules["winreg"] = MagicMock()
import tzlocal.win32
mocker.patch("tzlocal.utils.assert_tz_offset")
monkeypatch.setattr(tzlocal.win32, "_cache_tz", None)
monkeypatch.setenv("TZ", "Europe/Berlin")
tzlocal.win32._cache_tz_name = None
tzname = tzlocal.win32.get_localzone_name()
assert tzname == "Europe/Berlin"
tz = tzlocal.win32.get_localzone()
assert str(tz) == "Europe/Berlin"
def test_win32_no_dst(mocker):
mocker.patch("tzlocal.utils.assert_tz_offset")
valuesmock = mocker.patch("tzlocal.win32.valuestodict")
# If you turn off the DST, tzlocal returns "Etc/GMT+zomething":
valuesmock.configure_mock(
return_value={
"TimeZoneKeyName": "Romance Standard Time",
"DynamicDaylightTimeDisabled": 1
})
tzlocal.win32._cache_tz_name = None
tzlocal.win32._cache_tz = None
assert str(tzlocal.win32.get_localzone()) == "Etc/GMT-1"
# Except if the timezone doesn't have daylight savings at all,
# then just return the timezone in question, because why not?
valuesmock.configure_mock(
return_value={
"TimeZoneKeyName": "Belarus Standard Time",
"DynamicDaylightTimeDisabled": 1
})
tz = tzlocal.win32._get_localzone_name()
assert tz == "Europe/Minsk"
# Now, if you disable this in a timezone with DST, that has a
# non-whole hour offset, then there's nothing we can return.
valuesmock.configure_mock(
return_value={
"TimeZoneKeyName": "Cen. Australia Standard Time",
"DynamicDaylightTimeDisabled": 1
})
pytest.raises(ZoneInfoNotFoundError, tzlocal.win32._get_localzone_name)
# But again, if there is no DST, that works fine:
valuesmock.configure_mock(
return_value={
"TimeZoneKeyName": "Aus Central W. Standard Time",
"DynamicDaylightTimeDisabled": 1
})
tz = tzlocal.win32._get_localzone_name()
assert tz == "Australia/Eucla"
def test_termux(mocker):
subprocess = MagicMock()
subprocess.check_output.configure_mock(return_value=b"Africa/Johannesburg")
sys.modules["subprocess"] = subprocess
tz = tzlocal.unix._get_localzone(_root=tz_path("termux"))
assert str(tz) == "Africa/Johannesburg"
@pytest.mark.skipif(
platform.system() == "Windows", reason="Symbolic links are not available on Windows"
)
def test_conflicting():
with pytest.raises(ZoneInfoNotFoundError) as excinfo:
tz = tzlocal.unix._get_localzone(_root=tz_path("conflicting"))
message = excinfo.value.args[0]
assert "Multiple conflicting time zone configurations found:\n" in message
assert "Europe/Paris" in message
assert "America/New_York" in message
assert "Europe/Warsaw" in message
assert "Africa/Johannesburg" in message
assert "localtime is a symlink to: Africa/Harare" in message
@pytest.mark.skipif(
platform.system() == "Windows", reason="Symbolic links are not available on Windows"
)
def test_noconflict():
tz = tzlocal.unix._get_localzone(_root=tz_path("noconflict"))
assert str(tz) == "Etc/UTC"
def test_pytz_compatibility():
os.environ["TZ"] = "Africa/Harare"
tzlocal.unix.reload_localzone()
tz_harare = tzlocal.unix.get_localzone()
os.environ["TZ"] = "America/New_York"
tzlocal.unix.reload_localzone()
tz_newyork = tzlocal.unix.get_localzone()
dt = datetime(2021, 10, 1, 12, 00)
dt = tz_harare.localize(dt)
tz_harare.normalize(dt)
assert dt.tzinfo.zone == "Africa/Harare"
assert dt.utcoffset().total_seconds() == 7200
dt = dt.astimezone(tz_newyork)
dt = tz_newyork.normalize(dt)
assert dt.tzinfo.zone == "America/New_York"
assert dt.utcoffset().total_seconds() == -14400
del os.environ["TZ"]
def test_zoneinfo_compatibility():
os.environ["TZ"] = "Africa/Harare"
tzlocal.unix.reload_localzone()
tz_harare = tzlocal.unix.get_localzone()
assert str(tz_harare) == "Africa/Harare"
os.environ["TZ"] = "America/New_York"
tzlocal.unix.reload_localzone()
tz_newyork = tzlocal.unix.get_localzone()
assert str(tz_newyork) == "America/New_York"
dt = datetime(2021, 10, 1, 12, 00)
dt = dt.replace(tzinfo=tz_harare)
assert dt.utcoffset().total_seconds() == 7200
dt = dt.replace(tzinfo=tz_newyork)
assert dt.utcoffset().total_seconds() == -14400
del os.environ["TZ"]
def test_get_localzone_name():
tzlocal.unix._cache_tz_name = None
os.environ["TZ"] = "America/New_York"
assert tzlocal.unix.get_localzone_name() == "America/New_York"
del os.environ["TZ"]
def test_ubuntu_docker_bug():
tz = tzlocal.unix._get_localzone(_root=tz_path("ubuntu_docker_bug"))
assert str(tz) == "UTC"
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for rnn module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import time
import timeit
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.contrib import rnn as contrib_rnn
from tensorflow.core.protobuf import config_pb2
from tensorflow.python import keras
from tensorflow.python.client import session
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops as ops_lib
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.keras import testing_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import rnn
from tensorflow.python.ops import rnn_cell_impl
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variables as variables_lib
import tensorflow.python.ops.data_flow_grad # pylint: disable=unused-import
from tensorflow.python.ops.losses import losses
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
import tensorflow.python.ops.sparse_grad # pylint: disable=unused-import
import tensorflow.python.ops.tensor_array_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
from tensorflow.python.training import saver
from tensorflow.python.training import training
class Plus1RNNCell(rnn_cell_impl.RNNCell):
"""RNN Cell generating (output, new_state) = (input + 1, state + 1)."""
@property
def output_size(self):
return 5
@property
def state_size(self):
return 5
def call(self, input_, state, scope=None):
return (input_ + 1, state + 1)
class ScalarStateRNNCell(rnn_cell_impl.RNNCell):
"""RNN Cell generating (output, new_state) = (input + 1, state + 1)."""
@property
def output_size(self):
return 1
@property
def state_size(self):
return tensor_shape.TensorShape([])
def zero_state(self, batch_size, dtype):
return array_ops.zeros([], dtype=dtypes.int32)
def call(self, input_, state, scope=None):
return (input_, state + 1)
class UnbalancedOutputRNNCell(rnn_cell_impl.RNNCell):
"""RNN Cell generating (output, new_state) = (input + 1, state + 1)."""
@property
def output_size(self):
return tensor_shape.TensorShape(1), tensor_shape.TensorShape((2))
@property
def state_size(self):
return tensor_shape.TensorShape([])
def zero_state(self, batch_size, dtype):
return array_ops.zeros([], dtype=dtypes.int32)
def call(self, input_, state, scope=None):
concatenated = array_ops.concat((input_, input_), axis=-1)
return (input_, concatenated), state + 1
class TensorArrayStateRNNCell(rnn_cell_impl.RNNCell):
"""RNN Cell its state as a TensorArray."""
@property
def output_size(self):
return 1
@property
def state_size(self):
return (tensor_shape.TensorShape([]), ())
def zero_state(self, batch_size, dtype):
return (array_ops.zeros([], dtype=dtypes.int32),
tensor_array_ops.TensorArray(
dtype=dtype, size=0, dynamic_size=True))
def call(self, input_, state, scope=None):
new_array = state[1].write(state[0], input_)
return (input_, (state[0] + 1, new_array))
class RNNTest(test.TestCase):
def setUp(self):
self._seed = 23489
np.random.seed(self._seed)
@test_util.run_in_graph_and_eager_modes
def testInvalidSequenceLengthShape(self):
cell = Plus1RNNCell()
if context.executing_eagerly():
inputs = [constant_op.constant(np.ones((3, 4)))]
else:
inputs = [array_ops.placeholder(dtypes.float32, shape=(3, 4))]
with self.assertRaisesRegexp(ValueError, "must be a vector"):
rnn.dynamic_rnn(
cell,
array_ops.stack(inputs),
dtype=dtypes.float32,
sequence_length=[[4]])
@test_util.run_in_graph_and_eager_modes
def testBatchSizeFromInput(self):
cell = Plus1RNNCell()
in_eager_mode = context.executing_eagerly()
# With static batch size
if in_eager_mode:
inputs = np.zeros((3, 4, 5), dtype=np.float32)
initial_state = np.zeros((3, 5), dtype=np.float32)
else:
inputs = array_ops.placeholder(dtypes.float32, shape=(3, 4, 5))
initial_state = array_ops.placeholder(dtypes.float32, shape=(3, 5))
# - Without initial_state
outputs, state = rnn.dynamic_rnn(cell, inputs, dtype=dtypes.float32)
self.assertEqual(3, outputs.shape[0])
self.assertEqual(3, state.shape[0])
# - With initial_state
outputs, state = rnn.dynamic_rnn(
cell, inputs, initial_state=initial_state)
self.assertEqual(3, outputs.shape[0])
self.assertEqual(3, state.shape[0])
# Without static batch size
# Tensor shapes are fully determined with eager execution enabled,
# so only run this test for graph construction.
if not in_eager_mode:
inputs = array_ops.placeholder(dtypes.float32, shape=(None, 4, 5))
# - Without initial_state
outputs, state = rnn.dynamic_rnn(cell, inputs, dtype=dtypes.float32)
self.assertEqual(None, outputs.shape[0].value)
self.assertEqual(None, state.shape[0].value)
# - With initial_state
outputs, state = rnn.dynamic_rnn(
cell,
inputs,
initial_state=array_ops.placeholder(dtypes.float32, shape=(None, 5)))
self.assertEqual(None, outputs.shape[0].value)
self.assertEqual(None, state.shape[0].value)
@test_util.run_in_graph_and_eager_modes
def testScalarStateIsAccepted(self):
cell = ScalarStateRNNCell()
in_eager_mode = context.executing_eagerly()
if in_eager_mode:
inputs = np.array([[[1], [2], [3], [4]]], dtype=np.float32)
else:
inputs = array_ops.placeholder(dtypes.float32, shape=(1, 4, 1))
with self.cached_session(use_gpu=True) as sess:
outputs, state = rnn.dynamic_rnn(
cell, inputs, dtype=dtypes.float32, sequence_length=[4])
if not in_eager_mode:
outputs, state = sess.run(
[outputs, state], feed_dict={inputs: [[[1], [2], [3], [4]]]})
self.assertAllEqual([[[1], [2], [3], [4]]], outputs)
self.assertAllEqual(4, state)
@test_util.run_in_graph_and_eager_modes
def testUnbalancedOutputIsAccepted(self):
cell = UnbalancedOutputRNNCell()
in_eager_mode = context.executing_eagerly()
if in_eager_mode:
inputs = np.array([[[1], [2], [3], [4]]], dtype=np.float32)
else:
inputs = array_ops.placeholder(dtypes.float32, shape=(1, 4, 1))
with self.cached_session(use_gpu=True) as sess:
outputs, state = rnn.dynamic_rnn(
cell, inputs, dtype=dtypes.float32, sequence_length=[4])
if not in_eager_mode:
outputs, state = sess.run(
[outputs, state], feed_dict={inputs: [[[1], [2], [3], [4]]]})
self.assertIsInstance(outputs, tuple)
self.assertAllEqual([[[1], [2], [3], [4]]], outputs[0])
self.assertAllEqual([[[1, 1], [2, 2], [3, 3], [4, 4]]], outputs[1])
self.assertAllEqual(4, state)
@test_util.assert_no_new_pyobjects_executing_eagerly
def testEagerMemory(self):
with context.eager_mode():
cell = TensorArrayStateRNNCell()
inputs = np.array([[[1], [2], [3], [4]]], dtype=np.float32)
rnn.dynamic_rnn(cell, inputs, dtype=dtypes.float32, sequence_length=[4])
@test_util.run_in_graph_and_eager_modes
def testTensorArrayStateIsAccepted(self):
cell = TensorArrayStateRNNCell()
in_eager_mode = context.executing_eagerly()
if in_eager_mode:
inputs = np.array([[[1], [2], [3], [4]]], dtype=np.float32)
else:
inputs = array_ops.placeholder(dtypes.float32, shape=(1, 4, 1))
with self.cached_session(use_gpu=True) as sess:
outputs, state = rnn.dynamic_rnn(
cell, inputs, dtype=dtypes.float32, sequence_length=[4])
state = (state[0], state[1].stack())
if not in_eager_mode:
outputs, state = sess.run(
[outputs, state], feed_dict={
inputs: [[[1], [2], [3], [4]]]
})
self.assertAllEqual([[[1], [2], [3], [4]]], outputs)
self.assertAllEqual(4, state[0])
self.assertAllEqual([[[1]], [[2]], [[3]], [[4]]], state[1])
def testCellGetInitialState(self):
cell = rnn_cell_impl.BasicRNNCell(5)
with self.assertRaisesRegexp(
ValueError, "batch_size and dtype cannot be None"):
cell.get_initial_state(None, None, None)
inputs = array_ops.placeholder(dtypes.float32, shape=(None, 4, 1))
with self.assertRaisesRegexp(
ValueError, "batch size from input tensor is different from"):
cell.get_initial_state(inputs=inputs, batch_size=50, dtype=None)
with self.assertRaisesRegexp(
ValueError, "batch size from input tensor is different from"):
cell.get_initial_state(
inputs=inputs, batch_size=constant_op.constant(50), dtype=None)
with self.assertRaisesRegexp(
ValueError, "dtype from input tensor is different from"):
cell.get_initial_state(inputs=inputs, batch_size=None, dtype=dtypes.int16)
initial_state = cell.get_initial_state(
inputs=inputs, batch_size=None, dtype=None)
self.assertEqual(initial_state.shape.as_list(), [None, 5])
self.assertEqual(initial_state.dtype, inputs.dtype)
batch = array_ops.shape(inputs)[0]
dtype = inputs.dtype
initial_state = cell.get_initial_state(None, batch, dtype)
self.assertEqual(initial_state.shape.as_list(), [None, 5])
self.assertEqual(initial_state.dtype, inputs.dtype)
def _assert_cell_builds(self, cell_class, dtype, batch_size, in_size,
out_size):
cell = cell_class(out_size, dtype=dtype)
in_shape = tensor_shape.TensorShape((batch_size, in_size))
cell.build(in_shape)
state_output = cell.get_initial_state(
inputs=None, batch_size=batch_size, dtype=dtype)
cell_output, _ = cell(array_ops.zeros(in_shape, dtype), state_output)
self.assertAllEqual([batch_size, out_size], cell_output.shape.as_list())
@test_util.run_in_graph_and_eager_modes
def testCellsBuild(self):
f32 = dtypes.float32
f64 = dtypes.float64
self._assert_cell_builds(rnn_cell_impl.BasicRNNCell, f32, 5, 7, 3)
self._assert_cell_builds(rnn_cell_impl.BasicRNNCell, f64, 5, 7, 3)
self._assert_cell_builds(rnn_cell_impl.BasicLSTMCell, f32, 5, 7, 3)
self._assert_cell_builds(rnn_cell_impl.BasicLSTMCell, f64, 5, 7, 3)
self._assert_cell_builds(rnn_cell_impl.GRUCell, f32, 5, 7, 3)
self._assert_cell_builds(rnn_cell_impl.GRUCell, f64, 5, 7, 3)
self._assert_cell_builds(rnn_cell_impl.LSTMCell, f32, 5, 7, 3)
self._assert_cell_builds(rnn_cell_impl.LSTMCell, f64, 5, 7, 3)
self._assert_cell_builds(contrib_rnn.IndRNNCell, f32, 5, 7, 3)
self._assert_cell_builds(contrib_rnn.IndRNNCell, f64, 5, 7, 3)
self._assert_cell_builds(contrib_rnn.IndyGRUCell, f32, 5, 7, 3)
self._assert_cell_builds(contrib_rnn.IndyGRUCell, f64, 5, 7, 3)
self._assert_cell_builds(contrib_rnn.IndyLSTMCell, f32, 5, 7, 3)
self._assert_cell_builds(contrib_rnn.IndyLSTMCell, f64, 5, 7, 3)
def testRNNWithKerasSimpleRNNCell(self):
with self.cached_session() as sess:
input_shape = 10
output_shape = 5
timestep = 4
batch = 100
(x_train, y_train), _ = testing_utils.get_test_data(
train_samples=batch,
test_samples=0,
input_shape=(timestep, input_shape),
num_classes=output_shape)
y_train = keras.utils.to_categorical(y_train)
cell = keras.layers.SimpleRNNCell(output_shape)
inputs = array_ops.placeholder(
dtypes.float32, shape=(None, timestep, input_shape))
predict = array_ops.placeholder(
dtypes.float32, shape=(None, output_shape))
outputs, state = rnn.dynamic_rnn(
cell, inputs, dtype=dtypes.float32)
self.assertEqual(outputs.shape.as_list(), [None, timestep, output_shape])
self.assertEqual(state.shape.as_list(), [None, output_shape])
loss = losses.softmax_cross_entropy(predict, state)
train_op = training.GradientDescentOptimizer(0.001).minimize(loss)
sess.run([variables_lib.global_variables_initializer()])
_, outputs, state = sess.run(
[train_op, outputs, state], {inputs: x_train, predict: y_train})
self.assertEqual(len(outputs), batch)
self.assertEqual(len(state), batch)
def testRNNWithKerasGRUCell(self):
with self.cached_session() as sess:
input_shape = 10
output_shape = 5
timestep = 4
batch = 100
(x_train, y_train), _ = testing_utils.get_test_data(
train_samples=batch,
test_samples=0,
input_shape=(timestep, input_shape),
num_classes=output_shape)
y_train = keras.utils.to_categorical(y_train)
cell = keras.layers.GRUCell(output_shape)
inputs = array_ops.placeholder(
dtypes.float32, shape=(None, timestep, input_shape))
predict = array_ops.placeholder(
dtypes.float32, shape=(None, output_shape))
outputs, state = rnn.dynamic_rnn(
cell, inputs, dtype=dtypes.float32)
self.assertEqual(outputs.shape.as_list(), [None, timestep, output_shape])
self.assertEqual(state.shape.as_list(), [None, output_shape])
loss = losses.softmax_cross_entropy(predict, state)
train_op = training.GradientDescentOptimizer(0.001).minimize(loss)
sess.run([variables_lib.global_variables_initializer()])
_, outputs, state = sess.run(
[train_op, outputs, state], {inputs: x_train, predict: y_train})
self.assertEqual(len(outputs), batch)
self.assertEqual(len(state), batch)
def testRNNWithKerasLSTMCell(self):
with self.cached_session() as sess:
input_shape = 10
output_shape = 5
timestep = 4
batch = 100
(x_train, y_train), _ = testing_utils.get_test_data(
train_samples=batch,
test_samples=0,
input_shape=(timestep, input_shape),
num_classes=output_shape)
y_train = keras.utils.to_categorical(y_train)
cell = keras.layers.LSTMCell(output_shape)
inputs = array_ops.placeholder(
dtypes.float32, shape=(None, timestep, input_shape))
predict = array_ops.placeholder(
dtypes.float32, shape=(None, output_shape))
outputs, state = rnn.dynamic_rnn(
cell, inputs, dtype=dtypes.float32)
self.assertEqual(outputs.shape.as_list(), [None, timestep, output_shape])
self.assertEqual(len(state), 2)
self.assertEqual(state[0].shape.as_list(), [None, output_shape])
self.assertEqual(state[1].shape.as_list(), [None, output_shape])
loss = losses.softmax_cross_entropy(predict, state[0])
train_op = training.GradientDescentOptimizer(0.001).minimize(loss)
sess.run([variables_lib.global_variables_initializer()])
_, outputs, state = sess.run(
[train_op, outputs, state], {inputs: x_train, predict: y_train})
self.assertEqual(len(outputs), batch)
self.assertEqual(len(state), 2)
self.assertEqual(len(state[0]), batch)
self.assertEqual(len(state[1]), batch)
def testRNNWithStackKerasCell(self):
with self.cached_session() as sess:
input_shape = 10
output_shape = 5
timestep = 4
batch = 100
(x_train, y_train), _ = testing_utils.get_test_data(
train_samples=batch,
test_samples=0,
input_shape=(timestep, input_shape),
num_classes=output_shape)
y_train = keras.utils.to_categorical(y_train)
cell = keras.layers.StackedRNNCells(
[keras.layers.LSTMCell(2 * output_shape),
keras.layers.LSTMCell(output_shape)])
inputs = array_ops.placeholder(
dtypes.float32, shape=(None, timestep, input_shape))
predict = array_ops.placeholder(
dtypes.float32, shape=(None, output_shape))
outputs, state = rnn.dynamic_rnn(
cell, inputs, dtype=dtypes.float32)
self.assertEqual(outputs.shape.as_list(), [None, timestep, output_shape])
self.assertEqual(len(state), 4)
self.assertEqual(state[0].shape.as_list(), [None, 2 * output_shape])
self.assertEqual(state[1].shape.as_list(), [None, 2 * output_shape])
self.assertEqual(state[2].shape.as_list(), [None, output_shape])
self.assertEqual(state[3].shape.as_list(), [None, output_shape])
loss = losses.softmax_cross_entropy(predict, state[2])
train_op = training.GradientDescentOptimizer(0.001).minimize(loss)
sess.run([variables_lib.global_variables_initializer()])
_, outputs, state = sess.run(
[train_op, outputs, state], {inputs: x_train, predict: y_train})
self.assertEqual(len(outputs), batch)
self.assertEqual(len(state), 4)
for s in state:
self.assertEqual(len(s), batch)
def testStaticRNNWithKerasSimpleRNNCell(self):
with self.cached_session() as sess:
input_shape = 10
output_shape = 5
timestep = 4
batch = 100
(x_train, y_train), _ = testing_utils.get_test_data(
train_samples=batch,
test_samples=0,
input_shape=(timestep, input_shape),
num_classes=output_shape)
x_train = np.transpose(x_train, (1, 0, 2))
y_train = keras.utils.to_categorical(y_train)
cell = keras.layers.SimpleRNNCell(output_shape)
inputs = [array_ops.placeholder(
dtypes.float32, shape=(None, input_shape))] * timestep
predict = array_ops.placeholder(
dtypes.float32, shape=(None, output_shape))
outputs, state = rnn.static_rnn(
cell, inputs, dtype=dtypes.float32)
self.assertEqual(len(outputs), timestep)
self.assertEqual(outputs[0].shape.as_list(), [None, output_shape])
self.assertEqual(state.shape.as_list(), [None, output_shape])
loss = losses.softmax_cross_entropy(predict, state)
train_op = training.GradientDescentOptimizer(0.001).minimize(loss)
sess.run([variables_lib.global_variables_initializer()])
feed_dict = {i: d for i, d in zip(inputs, x_train)}
feed_dict[predict] = y_train
_, outputs, state = sess.run(
[train_op, outputs, state], feed_dict)
self.assertEqual(len(outputs), timestep)
self.assertEqual(len(outputs[0]), batch)
self.assertEqual(len(state), batch)
def testKerasAndTFRNNLayerOutputComparison(self):
input_shape = 10
output_shape = 5
timestep = 4
batch = 20
(x_train, _), _ = testing_utils.get_test_data(
train_samples=batch,
test_samples=0,
input_shape=(timestep, input_shape),
num_classes=output_shape)
fix_weights_generator = keras.layers.SimpleRNNCell(output_shape)
fix_weights_generator.build((None, input_shape))
weights = fix_weights_generator.get_weights()
with self.test_session(graph=ops_lib.Graph()) as sess:
inputs = array_ops.placeholder(
dtypes.float32, shape=(None, timestep, input_shape))
cell = keras.layers.SimpleRNNCell(output_shape)
tf_out, tf_state = rnn.dynamic_rnn(
cell, inputs, dtype=dtypes.float32)
cell.set_weights(weights)
[tf_out, tf_state] = sess.run([tf_out, tf_state], {inputs: x_train})
with self.test_session(graph=ops_lib.Graph()) as sess:
k_input = keras.Input(shape=(timestep, input_shape),
dtype=dtypes.float32)
cell = keras.layers.SimpleRNNCell(output_shape)
layer = keras.layers.RNN(cell, return_sequences=True, return_state=True)
keras_out = layer(k_input)
cell.set_weights(weights)
k_out, k_state = sess.run(keras_out, {k_input: x_train})
self.assertAllClose(tf_out, k_out)
self.assertAllClose(tf_state, k_state)
def testBasicLSTMCellInterchangeWithLSTMCell(self):
with self.test_session(graph=ops_lib.Graph()) as sess:
basic_cell = rnn_cell_impl.BasicLSTMCell(1)
basic_cell(array_ops.ones([1, 1]),
state=basic_cell.get_initial_state(inputs=None,
batch_size=1,
dtype=dtypes.float32))
self.evaluate([v.initializer for v in basic_cell.variables])
self.evaluate(basic_cell._bias.assign([10.] * 4))
save = saver.Saver()
prefix = os.path.join(self.get_temp_dir(), "ckpt")
save_path = save.save(sess, prefix)
with self.test_session(graph=ops_lib.Graph()) as sess:
lstm_cell = rnn_cell_impl.LSTMCell(1, name="basic_lstm_cell")
lstm_cell(array_ops.ones([1, 1]),
state=lstm_cell.get_initial_state(inputs=None,
batch_size=1,
dtype=dtypes.float32))
self.evaluate([v.initializer for v in lstm_cell.variables])
save = saver.Saver()
save.restore(sess, save_path)
self.assertAllEqual([10.] * 4, self.evaluate(lstm_cell._bias))
def testRNNCellSerialization(self):
for cell in [
rnn_cell_impl.LSTMCell(32, use_peepholes=True, cell_clip=True),
rnn_cell_impl.BasicLSTMCell(32, dtype=dtypes.float32),
rnn_cell_impl.BasicRNNCell(32, activation="relu", dtype=dtypes.float32),
rnn_cell_impl.GRUCell(
32, kernel_initializer="ones", dtype=dtypes.float32)
]:
with self.cached_session():
x = keras.Input((None, 5))
layer = keras.layers.RNN(cell)
y = layer(x)
model = keras.models.Model(x, y)
model.compile(optimizer="rmsprop", loss="mse")
# Test basic case serialization.
x_np = np.random.random((6, 5, 5))
y_np = model.predict(x_np)
weights = model.get_weights()
config = layer.get_config()
# The custom_objects is important here since rnn_cell_impl is
# not visible as a Keras layer, and also has a name conflict with
# keras.LSTMCell and GRUCell.
layer = keras.layers.RNN.from_config(
config,
custom_objects={
"BasicRNNCell": rnn_cell_impl.BasicRNNCell,
"GRUCell": rnn_cell_impl.GRUCell,
"LSTMCell": rnn_cell_impl.LSTMCell,
"BasicLSTMCell": rnn_cell_impl.BasicLSTMCell
})
y = layer(x)
model = keras.models.Model(x, y)
model.set_weights(weights)
y_np_2 = model.predict(x_np)
self.assertAllClose(y_np, y_np_2, atol=1e-4)
######### Benchmarking RNN code
def _static_vs_dynamic_rnn_benchmark_static(inputs_list_t, sequence_length):
(_, input_size) = inputs_list_t[0].get_shape().as_list()
initializer = init_ops.random_uniform_initializer(-0.01, 0.01, seed=127)
cell = contrib_rnn.LSTMCell(
num_units=input_size,
use_peepholes=True,
initializer=initializer,
state_is_tuple=False)
outputs, final_state = contrib_rnn.static_rnn(
cell,
inputs_list_t,
sequence_length=sequence_length,
dtype=dtypes.float32)
trainable_variables = ops_lib.get_collection(
ops_lib.GraphKeys.TRAINABLE_VARIABLES)
gradients = gradients_impl.gradients(outputs + [final_state],
trainable_variables)
return control_flow_ops.group(final_state, *(gradients + outputs))
def _static_vs_dynamic_rnn_benchmark_dynamic(inputs_t, sequence_length):
(unused_0, unused_1, input_size) = inputs_t.get_shape().as_list()
initializer = init_ops.random_uniform_initializer(-0.01, 0.01, seed=127)
cell = contrib_rnn.LSTMCell(
num_units=input_size,
use_peepholes=True,
initializer=initializer,
state_is_tuple=False)
outputs, final_state = rnn.dynamic_rnn(
cell, inputs_t, sequence_length=sequence_length, dtype=dtypes.float32)
trainable_variables = ops_lib.get_collection(
ops_lib.GraphKeys.TRAINABLE_VARIABLES)
gradients = gradients_impl.gradients([outputs, final_state],
trainable_variables)
return control_flow_ops.group(final_state, outputs, *gradients)
def graph_creation_static_vs_dynamic_rnn_benchmark(max_time):
config = config_pb2.ConfigProto()
config.allow_soft_placement = True
# These parameters don't matter
batch_size = 512
num_units = 512
# Set up sequence lengths
np.random.seed([127])
sequence_length = np.random.randint(0, max_time, size=batch_size)
inputs_list = [
np.random.randn(batch_size, num_units).astype(np.float32)
for _ in range(max_time)
]
inputs = np.dstack(inputs_list).transpose([0, 2, 1]) # batch x time x depth
def _create_static_rnn():
with session.Session(config=config, graph=ops_lib.Graph()):
inputs_list_t = [
variables_lib.Variable(
x, trainable=False).value() for x in inputs_list
]
_static_vs_dynamic_rnn_benchmark_static(inputs_list_t, sequence_length)
def _create_dynamic_rnn():
with session.Session(config=config, graph=ops_lib.Graph()):
inputs_t = variables_lib.Variable(inputs, trainable=False).value()
_static_vs_dynamic_rnn_benchmark_dynamic(inputs_t, sequence_length)
delta_static = timeit.timeit(_create_static_rnn, number=5)
delta_dynamic = timeit.timeit(_create_dynamic_rnn, number=5)
print("%d \t %f \t %f \t %f" %
(max_time, delta_static, delta_dynamic, delta_dynamic / delta_static))
return delta_static, delta_dynamic
def _timer(sess, ops):
# Warm in
for _ in range(2):
sess.run(ops)
# Timing run
runs = 20
start = time.time()
for _ in range(runs):
sess.run(ops)
end = time.time()
return (end - start) / float(runs)
def static_vs_dynamic_rnn_benchmark(batch_size, max_time, num_units, use_gpu):
config = config_pb2.ConfigProto()
config.allow_soft_placement = True
# Set up sequence lengths
np.random.seed([127])
sequence_length = np.random.randint(0, max_time, size=batch_size)
inputs_list = [
np.random.randn(batch_size, num_units).astype(np.float32)
for _ in range(max_time)
]
inputs = np.dstack(inputs_list).transpose([0, 2, 1]) # batch x time x depth
# Using rnn()
with session.Session(config=config, graph=ops_lib.Graph()) as sess:
with ops_lib.device("/cpu:0" if not use_gpu else None):
inputs_list_t = [
variables_lib.Variable(
x, trainable=False).value() for x in inputs_list
]
ops = _static_vs_dynamic_rnn_benchmark_static(inputs_list_t,
sequence_length)
variables_lib.global_variables_initializer().run()
delta_static = _timer(sess, ops)
# Using dynamic_rnn()
with session.Session(config=config, graph=ops_lib.Graph()) as sess:
with ops_lib.device("/cpu:0" if not use_gpu else None):
inputs_t = variables_lib.Variable(inputs, trainable=False).value()
ops = _static_vs_dynamic_rnn_benchmark_dynamic(inputs_t, sequence_length)
variables_lib.global_variables_initializer().run()
delta_dynamic = _timer(sess, ops)
print("%d \t %d \t %d \t %s \t %f \t %f \t %f" %
(batch_size, max_time, num_units, use_gpu, delta_static, delta_dynamic,
delta_dynamic / delta_static))
return delta_static, delta_dynamic
def _half_seq_len_vs_unroll_half_rnn_benchmark(inputs_list_t, sequence_length):
(_, input_size) = inputs_list_t[0].get_shape().as_list()
initializer = init_ops.random_uniform_initializer(-0.01, 0.01, seed=127)
cell = contrib_rnn.LSTMCell(
num_units=input_size,
use_peepholes=True,
initializer=initializer,
state_is_tuple=False)
outputs, final_state = contrib_rnn.static_rnn(
cell,
inputs_list_t,
sequence_length=sequence_length,
dtype=dtypes.float32)
trainable_variables = ops_lib.get_collection(
ops_lib.GraphKeys.TRAINABLE_VARIABLES)
gradients = gradients_impl.gradients(outputs + [final_state],
trainable_variables)
return control_flow_ops.group(final_state, *(gradients + outputs))
def half_seq_len_vs_unroll_half_rnn_benchmark(batch_size, max_time, num_units,
use_gpu):
config = config_pb2.ConfigProto()
config.allow_soft_placement = True
# Set up sequence lengths
np.random.seed([127])
sequence_length = max_time * np.ones((batch_size,))
inputs_list = [
np.random.randn(batch_size, num_units).astype(np.float32)
for _ in range(max_time)
]
# Halve the sequence length, full static unroll
with session.Session(config=config, graph=ops_lib.Graph()) as sess:
with ops_lib.device("/cpu:0" if not use_gpu else None):
inputs_list_t = [
variables_lib.Variable(
x, trainable=False).value() for x in inputs_list
]
ops = _half_seq_len_vs_unroll_half_rnn_benchmark(inputs_list_t,
sequence_length / 2)
variables_lib.global_variables_initializer().run()
delta_half_seq_len = _timer(sess, ops)
# Halve the unroll size, don't use sequence length
with session.Session(config=config, graph=ops_lib.Graph()) as sess:
with ops_lib.device("/cpu:0" if not use_gpu else None):
inputs_list_t = [
variables_lib.Variable(
x, trainable=False).value() for x in inputs_list
]
ops = _half_seq_len_vs_unroll_half_rnn_benchmark(
inputs_list_t[:(max_time // 2)], sequence_length / 2)
variables_lib.global_variables_initializer().run()
delta_unroll_half = _timer(sess, ops)
print("%d \t %d \t\t %d \t %s \t %f \t\t %f \t\t %f" %
(batch_size, max_time, num_units, use_gpu, delta_half_seq_len,
delta_unroll_half, delta_half_seq_len / delta_unroll_half))
return delta_half_seq_len, delta_unroll_half
def _concat_state_vs_tuple_state_rnn_benchmark(inputs_list_t, sequence_length,
state_is_tuple):
(_, input_size) = inputs_list_t[0].get_shape().as_list()
initializer = init_ops.random_uniform_initializer(-0.01, 0.01, seed=127)
cell = contrib_rnn.LSTMCell(
num_units=input_size,
use_peepholes=True,
initializer=initializer,
state_is_tuple=state_is_tuple)
outputs, final_state = contrib_rnn.static_rnn(
cell,
inputs_list_t,
sequence_length=sequence_length,
dtype=dtypes.float32)
final_state = list(final_state) if state_is_tuple else [final_state]
trainable_variables = ops_lib.get_collection(
ops_lib.GraphKeys.TRAINABLE_VARIABLES)
gradients = gradients_impl.gradients(outputs + final_state,
trainable_variables)
return control_flow_ops.group(*(final_state + gradients + outputs))
def concat_state_vs_tuple_state_rnn_benchmark(batch_size, max_time, num_units,
use_gpu):
config = config_pb2.ConfigProto()
config.allow_soft_placement = True
# Set up sequence lengths
np.random.seed([127])
sequence_length = max_time * np.ones((batch_size,))
inputs_list = [
np.random.randn(batch_size, num_units).astype(np.float32)
for _ in range(max_time)
]
# Run with concatenated states (default)
with session.Session(config=config, graph=ops_lib.Graph()) as sess:
with ops_lib.device("/cpu:0" if not use_gpu else None):
inputs_list_t = [
variables_lib.Variable(
x, trainable=False).value() for x in inputs_list
]
ops = _concat_state_vs_tuple_state_rnn_benchmark(
inputs_list_t, sequence_length, state_is_tuple=False)
variables_lib.global_variables_initializer().run()
delta_concat_state = _timer(sess, ops)
# Run with tuple states (new)
with session.Session(config=config, graph=ops_lib.Graph()) as sess:
with ops_lib.device("/cpu:0" if not use_gpu else None):
inputs_list_t = [
variables_lib.Variable(
x, trainable=False).value() for x in inputs_list
]
ops = _concat_state_vs_tuple_state_rnn_benchmark(
inputs_list_t, sequence_length, state_is_tuple=True)
variables_lib.global_variables_initializer().run()
delta_tuple_state = _timer(sess, ops)
print("%d \t %d \t %d \t %s \t %f \t\t %f \t\t %f" %
(batch_size, max_time, num_units, use_gpu, delta_concat_state,
delta_tuple_state, delta_concat_state / delta_tuple_state))
return delta_concat_state, delta_tuple_state
def _dynamic_rnn_swap_memory_benchmark(inputs_t, sequence_length, swap_memory):
(unused_0, unused_1, input_size) = inputs_t.get_shape().as_list()
initializer = init_ops.random_uniform_initializer(-0.01, 0.01, seed=127)
cell = contrib_rnn.LSTMCell(
num_units=input_size,
use_peepholes=True,
initializer=initializer,
state_is_tuple=False)
outputs, final_state = rnn.dynamic_rnn(
cell,
inputs_t,
sequence_length=sequence_length,
swap_memory=swap_memory,
dtype=dtypes.float32)
trainable_variables = ops_lib.get_collection(
ops_lib.GraphKeys.TRAINABLE_VARIABLES)
gradients = gradients_impl.gradients([outputs, final_state],
trainable_variables)
return control_flow_ops.group(final_state, outputs, *gradients)
def dynamic_rnn_swap_memory_benchmark(batch_size, max_time, num_units):
config = config_pb2.ConfigProto()
config.allow_soft_placement = True
# Set up sequence lengths
np.random.seed([127])
sequence_length = np.random.randint(0, max_time, size=batch_size)
inputs_list = [
np.random.randn(batch_size, num_units).astype(np.float32)
for _ in range(max_time)
]
inputs = np.dstack(inputs_list).transpose([0, 2, 1]) # batch x time x depth
# No memory swap
with session.Session(config=config, graph=ops_lib.Graph()) as sess:
inputs_t = variables_lib.Variable(inputs, trainable=False).value()
ops = _dynamic_rnn_swap_memory_benchmark(
inputs_t, sequence_length, swap_memory=False)
variables_lib.global_variables_initializer().run()
no_swap = _timer(sess, ops)
# Memory swap
with session.Session(config=config, graph=ops_lib.Graph()) as sess:
inputs_t = variables_lib.Variable(inputs, trainable=False).value()
ops = _dynamic_rnn_swap_memory_benchmark(
inputs_t, sequence_length, swap_memory=True)
variables_lib.global_variables_initializer().run()
swap = _timer(sess, ops)
print("%d \t %d \t %d \t %f \t %f \t %f" %
(batch_size, max_time, num_units, no_swap, swap, swap / no_swap))
return no_swap, swap
def rnn_long_sequence_benchmark(batch_size, seqlen, num_units, dynamic,
swap_memory, nn):
config = config_pb2.ConfigProto()
config.allow_soft_placement = True
# Set up sequence lengths
np.random.seed([127])
sequence_length = [seqlen for _ in range(batch_size)]
inputs_list = [
np.random.randn(batch_size, num_units).astype(np.float32)
for _ in range(seqlen)
]
inputs = np.dstack(inputs_list).transpose([0, 2, 1]) # batch x time x depth
for _ in range(nn):
if dynamic:
with session.Session(config=config, graph=ops_lib.Graph()) as sess:
inputs_t = variables_lib.Variable(inputs, trainable=False).value()
ops = _dynamic_rnn_swap_memory_benchmark(
inputs_t, sequence_length, swap_memory=swap_memory)
variables_lib.global_variables_initializer().run()
elapsed = _timer(sess, ops)
else:
with session.Session(config=config, graph=ops_lib.Graph()) as sess:
inputs_list_t = [
variables_lib.Variable(
x, trainable=False).value() for x in inputs_list
]
ops = _static_vs_dynamic_rnn_benchmark_static(inputs_list_t,
sequence_length)
variables_lib.global_variables_initializer().run()
elapsed = _timer(sess, ops)
print("%d \t %d \t %d \t %s \t %f \t %f" % (batch_size, seqlen, num_units,
dynamic, elapsed,
elapsed / seqlen))
class BenchmarkRNN(test.Benchmark):
def benchmarkGraphCreationStaticVsDynamicLSTM(self):
print("Graph Creation: Static Unroll vs. Dynamic Unroll LSTM")
print("max_t \t dt(static) \t dt(dynamic) \t dt(dynamic)/dt(static)")
for max_time in (1, 25, 50):
s_dt, d_dt = graph_creation_static_vs_dynamic_rnn_benchmark(max_time)
self.report_benchmark(
name="graph_creation_time_static_T%02d" % max_time,
iters=5,
wall_time=s_dt)
self.report_benchmark(
name="graph_creation_time_dynamic_T%02d" % max_time,
iters=5,
wall_time=d_dt)
def benchmarkStaticUnrollVsDynamicFlowLSTM(self):
print("Calculation: Static Unroll with Dynamic Flow LSTM "
"vs. Dynamic Unroll LSTM")
print("batch \t max_t \t units \t gpu \t dt(static) \t dt(dynamic) "
"\t dt(dynamic)/dt(static)")
for batch_size in (256,):
for max_time in (50,):
for num_units in (512, 256, 128):
for use_gpu in (False, True):
s_dt, d_dt = static_vs_dynamic_rnn_benchmark(batch_size, max_time,
num_units, use_gpu)
self.report_benchmark(
name="static_unroll_time_T%02d_B%03d_N%03d_gpu_%s" %
(max_time, batch_size, num_units, use_gpu),
iters=20,
wall_time=s_dt)
self.report_benchmark(
name="dynamic_unroll_time_T%02d_B%03d_N%03d_gpu_%s" %
(max_time, batch_size, num_units, use_gpu),
iters=20,
wall_time=d_dt)
def benchmarkDynamicLSTMNoMemorySwapVsMemorySwap(self):
print("Calculation: Dynamic LSTM No Memory Swap vs. Memory Swap")
print("batch \t max_t \t units \t no_swap \t swap \t swap/no_swap")
for batch_size in (256, 512):
for max_time in (100,):
for num_units in (512, 256, 128):
no_swap, swap = dynamic_rnn_swap_memory_benchmark(batch_size,
max_time, num_units)
self.report_benchmark(
name="dynamic_lstm_no_memory_swap_T%02d_B%03d_N%03d" %
(max_time, batch_size, num_units),
iters=20,
wall_time=no_swap)
self.report_benchmark(
name="dynamic_lstm_with_memory_swap_T%02d_B%03d_N%03d" %
(max_time, batch_size, num_units),
iters=20,
wall_time=swap)
def benchmarkStaticUnrollHalfSequenceLengthVsHalfUnroll(self):
print("Calculation: Static Unroll with Halved Sequence Length "
"vs. Half Static Unroll")
print("batch \t full_t \t units \t gpu \t dt(half_seq_len) "
"\t dt(unroll_half) \t dt(half_seq_len)/dt(unroll_half)")
for batch_size in (128,):
for max_time in (50,):
for num_units in (256,):
for use_gpu in (False, True):
s_dt, d_dt = half_seq_len_vs_unroll_half_rnn_benchmark(batch_size,
max_time,
num_units,
use_gpu)
self.report_benchmark(
name="half_seq_len_time_T%02d_B%03d_N%03d_gpu_%s" %
(max_time, batch_size, num_units, use_gpu),
iters=20,
wall_time=s_dt)
self.report_benchmark(
name="unroll_half_time_T%02d_B%03d_N%03d_gpu_%s" %
(max_time, batch_size, num_units, use_gpu),
iters=20,
wall_time=d_dt)
def benchmarkStaticUnrollStateConcatVsStateTuple(self):
print("Calculation: Static Unroll with Concatenated State "
"vs. Tuple State")
print("batch \t time \t units \t gpu \t dt(concat_state) "
"\t dt(tuple_state) \t dt(concat_state)/dt(tuple_state)")
for batch_size in (
16,
128,):
for max_time in (50,):
for num_units in (
16,
128,):
for use_gpu in (False, True):
c_dt, t_dt = concat_state_vs_tuple_state_rnn_benchmark(batch_size,
max_time,
num_units,
use_gpu)
self.report_benchmark(
name="concat_state_time_T%02d_B%03d_N%03d_gpu_%s" %
(max_time, batch_size, num_units, use_gpu),
iters=20,
wall_time=c_dt)
self.report_benchmark(
name="tuple_state_time_T%02d_B%03d_N%03d_gpu_%s" %
(max_time, batch_size, num_units, use_gpu),
iters=20,
wall_time=t_dt)
def _benchmarkDynamicLSTMMemorySwapLongSeq(self):
"""The memory swapping test for the SOSP submission."""
print("Calculation: Long LSTM Sequence")
print("batch \t len \t units \t dynamic \t elapsed_t \t elapsed_t/len")
batch_size = 512
seqlen = 800
num_units = 512
dynamic = True
swap_memory = True
# Some warming up.
if swap_memory:
rnn_long_sequence_benchmark(batch_size, seqlen, num_units,
dynamic, swap_memory, 2)
# Measure the performance.
for slen in xrange(100, 1100, 100):
rnn_long_sequence_benchmark(batch_size, slen, num_units, dynamic,
swap_memory, 3)
if __name__ == "__main__":
test.main()
|
|
"""
This module is for inspecting OGR data sources and generating either
models for GeoDjango and/or mapping dictionaries for use with the
`LayerMapping` utility.
Author: Travis Pinney, Dane Springmeyer, & Justin Bronn
"""
from itertools import izip
# Requires GDAL to use.
from django.contrib.gis.gdal import DataSource
from django.contrib.gis.gdal.field import OFTDate, OFTDateTime, OFTInteger, OFTReal, OFTString, OFTTime
def mapping(data_source, geom_name='geom', layer_key=0, multi_geom=False):
"""
Given a DataSource, generates a dictionary that may be used
for invoking the LayerMapping utility.
Keyword Arguments:
`geom_name` => The name of the geometry field to use for the model.
`layer_key` => The key for specifying which layer in the DataSource to use;
defaults to 0 (the first layer). May be an integer index or a string
identifier for the layer.
`multi_geom` => Boolean (default: False) - specify as multigeometry.
"""
if isinstance(data_source, basestring):
# Instantiating the DataSource from the string.
data_source = DataSource(data_source)
elif isinstance(data_source, DataSource):
pass
else:
raise TypeError('Data source parameter must be a string or a DataSource object.')
# Creating the dictionary.
_mapping = {}
# Generating the field name for each field in the layer.
for field in data_source[layer_key].fields:
mfield = field.lower()
if mfield[-1:] == '_': mfield += 'field'
_mapping[mfield] = field
gtype = data_source[layer_key].geom_type
if multi_geom and gtype.num in (1, 2, 3): prefix = 'MULTI'
else: prefix = ''
_mapping[geom_name] = prefix + str(gtype).upper()
return _mapping
def ogrinspect(*args, **kwargs):
"""
Given a data source (either a string or a DataSource object) and a string
model name this function will generate a GeoDjango model.
Usage:
>>> from django.contrib.gis.utils import ogrinspect
>>> ogrinspect('/path/to/shapefile.shp','NewModel')
...will print model definition to stout
or put this in a python script and use to redirect the output to a new
model like:
$ python generate_model.py > myapp/models.py
# generate_model.py
from django.contrib.gis.utils import ogrinspect
shp_file = 'data/mapping_hacks/world_borders.shp'
model_name = 'WorldBorders'
print(ogrinspect(shp_file, model_name, multi_geom=True, srid=4326,
geom_name='shapes', blank=True))
Required Arguments
`datasource` => string or DataSource object to file pointer
`model name` => string of name of new model class to create
Optional Keyword Arguments
`geom_name` => For specifying the model name for the Geometry Field.
Otherwise will default to `geom`
`layer_key` => The key for specifying which layer in the DataSource to use;
defaults to 0 (the first layer). May be an integer index or a string
identifier for the layer.
`srid` => The SRID to use for the Geometry Field. If it can be determined,
the SRID of the datasource is used.
`multi_geom` => Boolean (default: False) - specify as multigeometry.
`name_field` => String - specifies a field name to return for the
`__unicode__` function (which will be generated if specified).
`imports` => Boolean (default: True) - set to False to omit the
`from django.contrib.gis.db import models` code from the
autogenerated models thus avoiding duplicated imports when building
more than one model by batching ogrinspect()
`decimal` => Boolean or sequence (default: False). When set to True
all generated model fields corresponding to the `OFTReal` type will
be `DecimalField` instead of `FloatField`. A sequence of specific
field names to generate as `DecimalField` may also be used.
`blank` => Boolean or sequence (default: False). When set to True all
generated model fields will have `blank=True`. If the user wants to
give specific fields to have blank, then a list/tuple of OGR field
names may be used.
`null` => Boolean (default: False) - When set to True all generated
model fields will have `null=True`. If the user wants to specify
give specific fields to have null, then a list/tuple of OGR field
names may be used.
Note: This routine calls the _ogrinspect() helper to do the heavy lifting.
"""
return '\n'.join(s for s in _ogrinspect(*args, **kwargs))
def _ogrinspect(data_source, model_name, geom_name='geom', layer_key=0, srid=None,
multi_geom=False, name_field=None, imports=True,
decimal=False, blank=False, null=False):
"""
Helper routine for `ogrinspect` that generates GeoDjango models corresponding
to the given data source. See the `ogrinspect` docstring for more details.
"""
# Getting the DataSource
if isinstance(data_source, str):
data_source = DataSource(data_source)
elif isinstance(data_source, DataSource):
pass
else:
raise TypeError('Data source parameter must be a string or a DataSource object.')
# Getting the layer corresponding to the layer key and getting
# a string listing of all OGR fields in the Layer.
layer = data_source[layer_key]
ogr_fields = layer.fields
# Creating lists from the `null`, `blank`, and `decimal`
# keyword arguments.
def process_kwarg(kwarg):
if isinstance(kwarg, (list, tuple)):
return [s.lower() for s in kwarg]
elif kwarg:
return [s.lower() for s in ogr_fields]
else:
return []
null_fields = process_kwarg(null)
blank_fields = process_kwarg(blank)
decimal_fields = process_kwarg(decimal)
# Gets the `null` and `blank` keywords for the given field name.
def get_kwargs_str(field_name):
kwlist = []
if field_name.lower() in null_fields: kwlist.append('null=True')
if field_name.lower() in blank_fields: kwlist.append('blank=True')
if kwlist: return ', ' + ', '.join(kwlist)
else: return ''
# For those wishing to disable the imports.
if imports:
yield '# This is an auto-generated Django model module created by ogrinspect.'
yield 'from django.contrib.gis.db import models'
yield ''
yield 'class %s(models.Model):' % model_name
for field_name, width, precision, field_type in izip(ogr_fields, layer.field_widths, layer.field_precisions, layer.field_types):
# The model field name.
mfield = field_name.lower()
if mfield[-1:] == '_': mfield += 'field'
# Getting the keyword args string.
kwargs_str = get_kwargs_str(field_name)
if field_type is OFTReal:
# By default OFTReals are mapped to `FloatField`, however, they
# may also be mapped to `DecimalField` if specified in the
# `decimal` keyword.
if field_name.lower() in decimal_fields:
yield ' %s = models.DecimalField(max_digits=%d, decimal_places=%d%s)' % (mfield, width, precision, kwargs_str)
else:
yield ' %s = models.FloatField(%s)' % (mfield, kwargs_str[2:])
elif field_type is OFTInteger:
yield ' %s = models.IntegerField(%s)' % (mfield, kwargs_str[2:])
elif field_type is OFTString:
yield ' %s = models.CharField(max_length=%s%s)' % (mfield, width, kwargs_str)
elif field_type is OFTDate:
yield ' %s = models.DateField(%s)' % (mfield, kwargs_str[2:])
elif field_type is OFTDateTime:
yield ' %s = models.DateTimeField(%s)' % (mfield, kwargs_str[2:])
elif field_type is OFTTime:
yield ' %s = models.TimeField(%s)' % (mfield, kwargs_str[2:])
else:
raise TypeError('Unknown field type %s in %s' % (field_type, mfield))
# TODO: Autodetection of multigeometry types (see #7218).
gtype = layer.geom_type
if multi_geom and gtype.num in (1, 2, 3):
geom_field = 'Multi%s' % gtype.django
else:
geom_field = gtype.django
# Setting up the SRID keyword string.
if srid is None:
if layer.srs is None:
srid_str = 'srid=-1'
else:
srid = layer.srs.srid
if srid is None:
srid_str = 'srid=-1'
elif srid == 4326:
# WGS84 is already the default.
srid_str = ''
else:
srid_str = 'srid=%s' % srid
else:
srid_str = 'srid=%s' % srid
yield ' %s = models.%s(%s)' % (geom_name, geom_field, srid_str)
yield ' objects = models.GeoManager()'
if name_field:
yield ''
yield ' def __unicode__(self): return self.%s' % name_field
|
|
"""Sensitivity Analysis Tasks"""
import numpy as np
from caput import config
from ..core import task, io, containers
from ..util import tools
class ComputeSystemSensitivity(task.SingleTask):
"""Compute the sensitivity of beamformed visibilities.
Parameters
----------
exclude_intracyl : bool
Exclude the intracylinder baselines in the sensitivity estimate.
Default is to use all baselines. Note that a RuntimeError
will be raised if exclude_intracyl is True and the visibilities
have already been stacked over cylinder.
"""
exclude_intracyl = config.Property(proptype=bool, default=False)
def setup(self, telescope):
"""Save the telescope model.
Parameters
----------
telescope : TransitTelescope
"""
self.telescope = io.get_telescope(telescope)
def process(self, data):
"""Estimate the sensitivity of the input data.
Parameters
----------
data : TODContainer
Must have a weight property that contains an
estimate of the inverse variance of the noise
in each visibility. The visibilities can be
stacked to any level of redundancy.
Returns
-------
metrics : SystemSensitivity
Contains the measured and radiometric estimates of
the noise in the beamformed visibilities.
"""
# Ensure we are distributed over frequency. Get shape of visibilities.
data.redistribute("freq")
nfreq, nstack, ntime = data.vis.local_shape
# Extract the input flags. If container has a gain dataset,
# then also check for the default gain 1.0 + 0.0j as this indicates
# that an input was masked for a particular time and frequency.
inpflg = data.input_flags[:].view(np.ndarray).astype(np.bool)
niff = 1
if "gain" in data.datasets:
# Derive frequency dependent flags from gains
gainflg = data.gain[:].view(np.ndarray) != (1.0 + 0.0j)
inpflg = np.swapaxes(inpflg[np.newaxis, :, :] & gainflg, 0, 1)
# Flatten frequency and time axis so we can use numpy's unique
inpflg = inpflg.reshape(inpflg.shape[0], -1)
niff = nfreq
# Find unique sets of input flags
uniq_inpflg, index_cnt = np.unique(inpflg, return_inverse=True, axis=1)
# Calculate redundancy for each unique set of input flags
cnt = tools.calculate_redundancy(
uniq_inpflg.astype(np.float32),
data.prod,
data.reverse_map["stack"]["stack"],
data.stack.size,
)
# Determine stack axis
stack_new, stack_flag = tools.redefine_stack_index_map(
self.telescope, data.input, data.prod, data.stack, data.reverse_map["stack"]
)
if not np.all(stack_flag):
self.log.warning(
"There are %d stacked baselines that are masked "
"in the telescope instance." % np.sum(~stack_flag)
)
ps = data.prod[stack_new["prod"]]
conj = stack_new["conjugate"]
prodstack = ps.copy()
prodstack["input_a"] = np.where(conj, ps["input_b"], ps["input_a"])
prodstack["input_b"] = np.where(conj, ps["input_a"], ps["input_b"])
# Figure out mapping between inputs in data file and inputs in telescope
tel_index = tools.find_inputs(
self.telescope.input_index, data.input, require_match=False
)
# Use the mapping to extract polarisation and EW position of each input
input_pol = np.array(
[
self.telescope.polarisation[ti] if ti is not None else "N"
for ti in tel_index
]
)
ew_position = np.array(
[
self.telescope.feedpositions[ti, 0] if ti is not None else 0.0
for ti in tel_index
]
)
# Next we determine indices into the stack axis for each polarisation product
# The next three lines result in XY and YX being
# combined into a single polarisation product
pa, pb = input_pol[prodstack["input_a"]], input_pol[prodstack["input_b"]]
pol_a = np.where(pa <= pb, pa, pb)
pol_b = np.where(pa <= pb, pb, pa)
baseline_pol = np.core.defchararray.add(pol_a, pol_b)
if self.exclude_intracyl:
baseline_flag = (
ew_position[prodstack["input_a"]] != ew_position[prodstack["input_b"]]
)
else:
baseline_flag = np.ones(prodstack.size, dtype=np.bool)
pol_uniq = [bp for bp in np.unique(baseline_pol) if "N" not in bp]
pol_index = [
np.flatnonzero((baseline_pol == up) & baseline_flag) for up in pol_uniq
]
npol = len(pol_uniq)
auto_flag = (prodstack["input_a"] == prodstack["input_b"]).astype(np.float32)
if self.exclude_intracyl and (np.sum(auto_flag) == npol):
raise ValueError(
"You have requested the exclusion of "
"intracylinder baselines, however it appears "
"that the visibilities have already been stacked "
"over cylinder, preventing calculation of the "
"radiometric estimate."
)
# Dereference the weight dataset
bweight = data.weight[:].view(np.ndarray)
bflag = bweight > 0.0
# Initialize arrays
var = np.zeros((nfreq, npol, ntime), dtype=np.float32)
counter = np.zeros((nfreq, npol, ntime), dtype=np.float32)
# Average over selected baseline per polarization
for pp, ipol in enumerate(pol_index):
pcnt = cnt[ipol, :]
pscale = 2.0 - auto_flag[ipol, np.newaxis]
# Loop over frequencies to reduce memory usage
for ff in range(nfreq):
fslc = slice((ff % niff) * ntime, ((ff % niff) + 1) * ntime)
pfcnt = pcnt[:, index_cnt[fslc]]
pvar = tools.invert_no_zero(bweight[ff, ipol, :])
pflag = bflag[ff, ipol, :].astype(np.float32)
var[ff, pp, :] = np.sum(pfcnt**2 * pscale * pflag * pvar, axis=0)
counter[ff, pp, :] = np.sum(pfcnt * pscale * pflag, axis=0)
# Normalize
var *= tools.invert_no_zero(counter**2)
# Determine which of the stack indices correspond to autocorrelations
auto_stack_id = np.flatnonzero(auto_flag)
auto_input = prodstack["input_a"][auto_stack_id]
auto_pol = input_pol[auto_input]
auto_cnt = cnt[auto_stack_id, :][:, index_cnt]
auto_cnt = np.swapaxes(auto_cnt.reshape(-1, niff, ntime), 0, 1)
num_feed = auto_cnt * bflag[:, auto_stack_id, :].astype(np.float32)
auto = data.vis[:, auto_stack_id, :].real
# Construct the radiometric estimate of the noise by taking the sum
# of the product of pairs of (possibly stacked) autocorrelations.
radiometer = np.zeros((nfreq, npol, ntime), dtype=np.float32)
radiometer_counter = np.zeros((nfreq, npol, ntime), dtype=np.float32)
for ii, (ai, pi) in enumerate(zip(auto_input, auto_pol)):
for jj, (aj, pj) in enumerate(zip(auto_input, auto_pol)):
if self.exclude_intracyl and (ew_position[ai] == ew_position[aj]):
# Exclude intracylinder baselines
continue
# Combine XY and YX into single polarisation product
pp = pol_uniq.index(pi + pj) if pi <= pj else pol_uniq.index(pj + pi)
# Weight by the number of feeds that were averaged
# together to obtain each stacked autocorrelation
nsq = num_feed[:, ii, :] * num_feed[:, jj, :]
radiometer[:, pp, :] += nsq * auto[:, ii, :] * auto[:, jj, :]
radiometer_counter[:, pp, :] += nsq
# Calculate number of independent samples from the
# integration time, frequency resolution, and fraction of packets lost
tint = np.median(np.abs(np.diff(data.time)))
dnu = np.median(data.index_map["freq"]["width"]) * 1e6
if ("flags" in data) and ("frac_lost" in data["flags"]):
frac_lost = data["flags"]["frac_lost"][:]
else:
frac_lost = np.zeros((1, 1), dtype=np.float32)
nint = dnu * tint * (1.0 - frac_lost[:, np.newaxis, :])
# Normalize by the number of independent samples
# and the total number of baselines squared
radiometer *= tools.invert_no_zero(nint * radiometer_counter**2)
# Create output container
metrics = containers.SystemSensitivity(
pol=np.array(pol_uniq, dtype="<U2"),
time=data.time[:],
axes_from=data,
attrs_from=data,
comm=data.comm,
distributed=data.distributed,
)
metrics.redistribute("freq")
# In order to write generic code for generating the radiometric
# estimate of the sensitivity, we had to sum over the upper and lower triangle
# of the visibility matrix. Below we multiply by sqrt(2) in order to
# obtain the sensitivity of the real component.
metrics.radiometer[:] = np.sqrt(2.0 * radiometer)
metrics.measured[:] = np.sqrt(2.0 * var)
# Save the total number of baselines that were averaged in the weight dataset
metrics.weight[:] = counter
# Save the fraction of missing samples
metrics.frac_lost[:] = frac_lost
return metrics
|
|
# -*- coding: utf-8 -*-
"""
amqp.five
~~~~~~~~~~~
Compatibility implementations of features
only available in newer Python versions.
"""
from __future__ import absolute_import
import io
import sys
try:
from collections import Counter
except ImportError: # pragma: no cover
from collections import defaultdict
def Counter(): # noqa
return defaultdict(int)
try:
buffer_t = buffer
except NameError: # pragma: no cover
# Py3 does not have buffer, only use this for isa checks.
class buffer_t(object): # noqa
pass
bytes_t = bytes
__all__ = ['Counter', 'reload', 'UserList', 'UserDict',
'Queue', 'Empty', 'Full', 'LifoQueue', 'builtins',
'zip_longest', 'map', 'zip', 'string', 'string_t', 'bytes_t',
'long_t', 'text_t', 'int_types', 'module_name_t',
'range', 'items', 'keys', 'values', 'nextfun', 'reraise',
'WhateverIO', 'with_metaclass', 'open_fqdn', 'StringIO',
'THREAD_TIMEOUT_MAX', 'format_d', 'monotonic', 'buffer_t']
# ############# py3k ########################################################
PY3 = sys.version_info[0] == 3
try:
reload = reload # noqa
except NameError: # pragma: no cover
from imp import reload # noqa
try:
from collections import UserList # noqa
except ImportError: # pragma: no cover
from UserList import UserList # noqa
try:
from collections import UserDict # noqa
except ImportError: # pragma: no cover
from UserDict import UserDict # noqa
# ############# time.monotonic #############################################
if sys.version_info < (3, 3):
import platform
SYSTEM = platform.system()
try:
import ctypes
except ImportError: # pragma: no cover
ctypes = None # noqa
if SYSTEM == 'Darwin' and ctypes is not None:
from ctypes.util import find_library
libSystem = ctypes.CDLL(find_library('libSystem.dylib'))
CoreServices = ctypes.CDLL(find_library('CoreServices'),
use_errno=True)
mach_absolute_time = libSystem.mach_absolute_time
mach_absolute_time.restype = ctypes.c_uint64
absolute_to_nanoseconds = CoreServices.AbsoluteToNanoseconds
absolute_to_nanoseconds.restype = ctypes.c_uint64
absolute_to_nanoseconds.argtypes = [ctypes.c_uint64]
def _monotonic():
return absolute_to_nanoseconds(mach_absolute_time()) * 1e-9
elif SYSTEM == 'Linux' and ctypes is not None:
# from stackoverflow:
# questions/1205722/how-do-i-get-monotonic-time-durations-in-python
import os
CLOCK_MONOTONIC = 1 # see <linux/time.h>
class timespec(ctypes.Structure):
_fields_ = [
('tv_sec', ctypes.c_long),
('tv_nsec', ctypes.c_long),
]
librt = ctypes.CDLL('librt.so.1', use_errno=True)
clock_gettime = librt.clock_gettime
clock_gettime.argtypes = [
ctypes.c_int, ctypes.POINTER(timespec),
]
def _monotonic(): # noqa
t = timespec()
if clock_gettime(CLOCK_MONOTONIC, ctypes.pointer(t)) != 0:
errno_ = ctypes.get_errno()
raise OSError(errno_, os.strerror(errno_))
return t.tv_sec + t.tv_nsec * 1e-9
else:
from time import time as _monotonic
try:
from time import monotonic
except ImportError:
monotonic = _monotonic # noqa
# ############# Py3 <-> Py2 #################################################
if PY3: # pragma: no cover
import builtins
from itertools import zip_longest
map = map
zip = zip
string = str
string_t = str
long_t = int
text_t = str
range = range
int_types = (int,)
module_name_t = str
open_fqdn = 'builtins.open'
def items(d):
return d.items()
def keys(d):
return d.keys()
def values(d):
return d.values()
def nextfun(it):
return it.__next__
exec_ = getattr(builtins, 'exec')
def reraise(tp, value, tb=None):
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
else:
import __builtin__ as builtins # noqa
from itertools import ( # noqa
imap as map,
izip as zip,
izip_longest as zip_longest,
)
string = unicode # noqa
string_t = basestring # noqa
text_t = unicode
long_t = long # noqa
range = xrange
module_name_t = str
int_types = (int, long)
open_fqdn = '__builtin__.open'
def items(d): # noqa
return d.iteritems()
def keys(d): # noqa
return d.iterkeys()
def values(d): # noqa
return d.itervalues()
def nextfun(it): # noqa
return it.next
def exec_(code, globs=None, locs=None): # pragma: no cover
"""Execute code in a namespace."""
if globs is None:
frame = sys._getframe(1)
globs = frame.f_globals
if locs is None:
locs = frame.f_locals
del frame
elif locs is None:
locs = globs
exec("""exec code in globs, locs""")
exec_("""def reraise(tp, value, tb=None): raise tp, value, tb""")
def with_metaclass(Type, skip_attrs=set(('__dict__', '__weakref__'))):
"""Class decorator to set metaclass.
Works with both Python 2 and Python 3 and it does not add
an extra class in the lookup order like ``six.with_metaclass`` does
(that is -- it copies the original class instead of using inheritance).
"""
def _clone_with_metaclass(Class):
attrs = dict((key, value) for key, value in items(vars(Class))
if key not in skip_attrs)
return Type(Class.__name__, Class.__bases__, attrs)
return _clone_with_metaclass
# ############# threading.TIMEOUT_MAX ########################################
try:
from threading import TIMEOUT_MAX as THREAD_TIMEOUT_MAX
except ImportError:
THREAD_TIMEOUT_MAX = 1e10 # noqa
# ############# format(int, ',d') ############################################
if sys.version_info >= (2, 7): # pragma: no cover
def format_d(i):
return format(i, ',d')
else: # pragma: no cover
def format_d(i): # noqa
s = '%d' % i
groups = []
while s and s[-1].isdigit():
groups.append(s[-3:])
s = s[:-3]
return s + ','.join(reversed(groups))
StringIO = io.StringIO
_SIO_write = StringIO.write
_SIO_init = StringIO.__init__
class WhateverIO(StringIO):
def __init__(self, v=None, *a, **kw):
_SIO_init(self, v.decode() if isinstance(v, bytes) else v, *a, **kw)
def write(self, data):
_SIO_write(self, data.decode() if isinstance(data, bytes) else data)
|
|
from django.conf import settings
from django.contrib import admin
from django.contrib.auth.models import AnonymousUser
from django.contrib.messages.storage import (
default_storage as default_messages_storage)
from django.db import connection
from django.test import RequestFactory
from django.test.utils import CaptureQueriesContext
from django.utils.dateformat import DateFormat
from unittest import mock
import six
from pyquery import PyQuery as pq
from olympia import amo, core
from olympia.abuse.models import AbuseReport
from olympia.activity.models import ActivityLog
from olympia.addons.models import AddonUser
from olympia.amo.tests import (
addon_factory, collection_factory, TestCase, user_factory, version_factory)
from olympia.amo.urlresolvers import reverse
from olympia.bandwagon.models import Collection
from olympia.ratings.models import Rating
from olympia.reviewers.models import ReviewerScore
from olympia.users.admin import UserAdmin
from olympia.users.models import UserProfile
class TestUserAdmin(TestCase):
def setUp(self):
self.user = user_factory()
self.list_url = reverse('admin:users_userprofile_changelist')
self.detail_url = reverse(
'admin:users_userprofile_change', args=(self.user.pk,)
)
self.delete_url = reverse(
'admin:users_userprofile_delete', args=(self.user.pk, )
)
def test_search_for_multiple_users(self):
user = user_factory()
another_user = user_factory()
self.grant_permission(user, 'Admin:Tools')
self.grant_permission(user, 'Users:Edit')
self.client.login(email=user.email)
response = self.client.get(
self.list_url,
{'q': '%s,%s,foobaa' % (self.user.pk, another_user.pk)},
follow=True)
assert response.status_code == 200
doc = pq(response.content)
assert str(self.user.pk) in doc('#result_list').text()
assert str(another_user.pk) in doc('#result_list').text()
def test_search_for_multiple_user_ids(self):
"""Test the optimization when just searching for matching ids."""
user = user_factory()
another_user = user_factory()
self.grant_permission(user, 'Admin:Tools')
self.grant_permission(user, 'Users:Edit')
self.client.login(email=user.email)
with CaptureQueriesContext(connection) as queries:
response = self.client.get(
self.list_url,
{'q': '%s,%s' % (self.user.pk, another_user.pk)},
follow=True)
queries_str = '; '.join(q['sql'] for q in queries.captured_queries)
in_sql = f'`users`.`id` IN ({self.user.pk}, {another_user.pk})'
assert in_sql in queries_str
assert len(queries.captured_queries) == 6
assert response.status_code == 200
doc = pq(response.content)
assert str(self.user.pk) in doc('#result_list').text()
assert str(another_user.pk) in doc('#result_list').text()
def test_can_not_edit_without_users_edit_permission(self):
user = user_factory()
self.grant_permission(user, 'Admin:Tools')
self.grant_permission(user, 'Addons:Edit')
self.client.login(email=user.email)
response = self.client.get(self.detail_url, follow=True)
assert response.status_code == 403
response = self.client.post(
self.detail_url, {'username': 'foo', 'email': self.user.email},
follow=True)
assert response.status_code == 403
assert self.user.reload().username != 'foo'
def test_can_edit_with_users_edit_permission(self):
old_username = self.user.username
user = user_factory()
self.grant_permission(user, 'Admin:Tools')
self.grant_permission(user, 'Users:Edit')
self.client.login(email=user.email)
core.set_user(user)
response = self.client.get(self.detail_url, follow=True)
assert response.status_code == 200
response = self.client.post(
self.detail_url, {'username': 'foo', 'email': self.user.email},
follow=True)
assert response.status_code == 200
assert self.user.reload().username == 'foo'
alog = ActivityLog.objects.latest('pk')
assert alog.action == amo.LOG.ADMIN_USER_EDITED.id
assert alog.arguments == [self.user]
assert alog.details == {'username': [old_username, 'foo']}
@mock.patch.object(UserProfile, 'delete_or_disable_related_content')
def test_can_not_delete_with_users_edit_permission(
self, delete_or_disable_related_content_mock):
user = user_factory()
assert not user.deleted
self.grant_permission(user, 'Admin:Tools')
self.grant_permission(user, 'Users:Edit')
self.client.login(email=user.email)
response = self.client.get(self.delete_url, follow=True)
assert response.status_code == 403
response = self.client.post(self.delete_url, {'post': 'yes'},
follow=True)
assert response.status_code == 403
user.reload()
assert not user.deleted
assert user.email
assert delete_or_disable_related_content_mock.call_count == 0
@mock.patch.object(UserProfile, 'delete_or_disable_related_content')
def test_can_delete_with_admin_advanced_permission(
self, delete_or_disable_related_content_mock):
user = user_factory()
assert not self.user.deleted
self.grant_permission(user, 'Admin:Tools')
self.grant_permission(user, 'Admin:Advanced')
self.client.login(email=user.email)
core.set_user(user)
response = self.client.get(self.delete_url, follow=True)
assert response.status_code == 200
assert b'Cannot delete user' not in response.content
response = self.client.post(self.delete_url, {'post': 'yes'},
follow=True)
assert response.status_code == 200
self.user.reload()
assert self.user.deleted
assert self.user.email is None
assert delete_or_disable_related_content_mock.call_count == 1
assert (
delete_or_disable_related_content_mock.call_args[1] ==
{'delete': True})
alog = ActivityLog.objects.latest('pk')
assert alog.action == amo.LOG.ADMIN_USER_ANONYMIZED.id
assert alog.arguments == [self.user]
def test_can_delete_with_related_objects_with_admin_advanced_permission(
self):
# Add related instances...
addon = addon_factory()
addon_with_other_authors = addon_factory()
AddonUser.objects.create(
addon=addon_with_other_authors, user=user_factory())
relations_that_should_be_deleted = [
AddonUser.objects.create(
addon=addon_with_other_authors, user=self.user),
Rating.objects.create(
addon=addon_factory(), rating=5, user=self.user),
addon, # Has no other author, should be deleted.
collection_factory(author=self.user)
]
relations_that_should_survive = [
AbuseReport.objects.create(reporter=self.user),
AbuseReport.objects.create(user=self.user),
ActivityLog.create(user=self.user, action=amo.LOG.USER_EDITED),
ReviewerScore.objects.create(user=self.user, score=42),
addon_with_other_authors, # Has other authors, should be kept.
# Bit of a weird case, but because the user was the only author of
# this add-on, the addonuser relation is kept, and both the add-on
# and the user are soft-deleted. This is in contrast with the case
# where the user is *not* the only author, in which case the
# addonuser relation is deleted, but the add-on is left intact.
AddonUser.objects.create(addon=addon, user=self.user),
]
# Now test as normal.
user = user_factory()
assert not self.user.deleted
self.grant_permission(user, 'Admin:Tools')
self.grant_permission(user, 'Admin:Advanced')
self.client.login(email=user.email)
core.set_user(user)
response = self.client.get(self.delete_url, follow=True)
assert response.status_code == 200
assert b'Cannot delete user' not in response.content
response = self.client.post(self.delete_url, {'post': 'yes'},
follow=True)
assert response.status_code == 200
self.user.reload()
assert self.user.deleted
assert self.user.email is None
alog = ActivityLog.objects.filter(
action=amo.LOG.ADMIN_USER_ANONYMIZED.id).get()
assert alog.arguments == [self.user]
# Test the related instances we created earlier.
for obj in relations_that_should_be_deleted:
assert not obj.__class__.objects.filter(pk=obj.pk).exists()
for obj in relations_that_should_survive:
assert obj.__class__.objects.filter(pk=obj.pk).exists()
def test_get_actions(self):
user_admin = UserAdmin(UserProfile, admin.site)
request = RequestFactory().get('/')
request.user = AnonymousUser()
assert list(user_admin.get_actions(request).keys()) == []
request.user = user_factory()
self.grant_permission(request.user, 'Users:Edit')
assert list(user_admin.get_actions(request).keys()) == ['ban_action']
def test_ban_action(self):
another_user = user_factory()
a_third_user = user_factory()
users = UserProfile.objects.filter(
pk__in=(another_user.pk, self.user.pk))
user_admin = UserAdmin(UserProfile, admin.site)
request = RequestFactory().get('/')
request.user = user_factory()
core.set_user(request.user)
request._messages = default_messages_storage(request)
user_admin.ban_action(request, users)
# Both users should be banned.
another_user.reload()
self.user.reload()
assert another_user.deleted
assert another_user.email
assert self.user.deleted
assert self.user.email
# The 3rd user should be unaffected.
assert not a_third_user.reload().deleted
# We should see 2 activity logs for banning.
assert ActivityLog.objects.filter(
action=amo.LOG.ADMIN_USER_BANNED.id).count() == 2
def test_ban_button_in_change_view(self):
ban_url = reverse('admin:users_userprofile_ban', args=(self.user.pk, ))
user = user_factory()
self.grant_permission(user, 'Admin:Tools')
self.grant_permission(user, 'Users:Edit')
self.client.login(email=user.email)
response = self.client.get(self.detail_url, follow=True)
assert response.status_code == 200
assert ban_url in response.content.decode('utf-8')
def test_delete_picture_button_in_change_view(self):
delete_picture_url = reverse('admin:users_userprofile_delete_picture',
args=(self.user.pk, ))
user = user_factory()
self.grant_permission(user, 'Admin:Tools')
self.grant_permission(user, 'Users:Edit')
self.client.login(email=user.email)
response = self.client.get(self.detail_url, follow=True)
assert response.status_code == 200
assert delete_picture_url in response.content.decode('utf-8')
def test_ban(self):
ban_url = reverse('admin:users_userprofile_ban', args=(self.user.pk, ))
wrong_ban_url = reverse(
'admin:users_userprofile_ban', args=(self.user.pk + 42, ))
user = user_factory()
self.grant_permission(user, 'Admin:Tools')
self.client.login(email=user.email)
core.set_user(user)
response = self.client.post(ban_url, follow=True)
assert response.status_code == 403
self.grant_permission(user, 'Users:Edit')
response = self.client.get(ban_url, follow=True)
assert response.status_code == 405 # Wrong http method.
response = self.client.post(wrong_ban_url, follow=True)
assert response.status_code == 404 # Wrong pk.
self.user.reload()
assert not self.user.deleted
response = self.client.post(ban_url, follow=True)
assert response.status_code == 200
assert response.redirect_chain[-1][0].endswith(self.detail_url)
assert response.redirect_chain[-1][1] == 302
self.user.reload()
assert self.user.deleted
assert self.user.email
alog = ActivityLog.objects.latest('pk')
assert alog.action == amo.LOG.ADMIN_USER_BANNED.id
assert alog.arguments == [self.user]
@mock.patch.object(UserProfile, 'delete_picture')
def test_delete_picture(self, delete_picture_mock):
delete_picture_url = reverse(
'admin:users_userprofile_delete_picture', args=(self.user.pk, ))
wrong_delete_picture_url = reverse(
'admin:users_userprofile_delete_picture',
args=(self.user.pk + 42, ))
user = user_factory()
self.grant_permission(user, 'Admin:Tools')
self.client.login(email=user.email)
core.set_user(user)
response = self.client.post(delete_picture_url, follow=True)
assert response.status_code == 403
self.grant_permission(user, 'Users:Edit')
response = self.client.get(delete_picture_url, follow=True)
assert response.status_code == 405 # Wrong http method.
response = self.client.post(wrong_delete_picture_url, follow=True)
assert response.status_code == 404 # Wrong pk.
assert delete_picture_mock.call_count == 0
response = self.client.post(delete_picture_url, follow=True)
assert response.status_code == 200
assert response.redirect_chain[-1][0].endswith(self.detail_url)
assert response.redirect_chain[-1][1] == 302
assert delete_picture_mock.call_count == 1
alog = ActivityLog.objects.latest('pk')
assert alog.action == amo.LOG.ADMIN_USER_PICTURE_DELETED.id
assert alog.arguments == [self.user]
def test_picture_img(self):
model_admin = UserAdmin(UserProfile, admin.site)
assert self.user.picture_url.endswith('anon_user.png')
assert (
model_admin.picture_img(self.user) ==
'<img src="%s" />' % self.user.picture_url)
self.user.update(picture_type='image/png')
assert (
model_admin.picture_img(self.user) ==
'<img src="%s" />' % self.user.picture_url)
def test_known_ip_adresses(self):
self.user.update(last_login_ip='127.1.2.3')
Rating.objects.create(
addon=addon_factory(), user=self.user, ip_address='127.1.2.3')
dummy_addon = addon_factory()
Rating.objects.create(
addon=dummy_addon, version=dummy_addon.current_version,
user=self.user, ip_address='128.1.2.3')
Rating.objects.create(
addon=dummy_addon, version=version_factory(addon=dummy_addon),
user=self.user, ip_address='129.1.2.4')
Rating.objects.create(
addon=addon_factory(), user=self.user, ip_address='130.1.2.4')
Rating.objects.create(
addon=addon_factory(), user=self.user, ip_address='130.1.2.4')
Rating.objects.create(
addon=dummy_addon,
user=user_factory(), ip_address='255.255.0.0')
model_admin = UserAdmin(UserProfile, admin.site)
doc = pq(model_admin.known_ip_adresses(self.user))
result = doc('ul li').text().split()
assert len(result) == 4
assert (set(result) ==
set(['130.1.2.4', '128.1.2.3', '129.1.2.4', '127.1.2.3']))
def test_last_known_activity_time(self):
someone_else = user_factory(username='someone_else')
addon = addon_factory()
model_admin = UserAdmin(UserProfile, admin.site)
assert six.text_type(
model_admin.last_known_activity_time(self.user)) == ''
# Add various activities. They will be attached to whatever user is
# set in the thread global at the time, so set that in advance.
core.set_user(self.user)
expected_date = self.days_ago(1)
activity = ActivityLog.create(amo.LOG.CREATE_ADDON, addon)
activity.update(created=self.days_ago(2))
activity.userlog_set.update(created=self.days_ago(2))
activity = ActivityLog.create(amo.LOG.EDIT_PROPERTIES, addon)
activity.update(created=expected_date)
activity.userlog_set.update(created=expected_date)
assert activity.reload().created == expected_date
# Create another activity, more recent, attached to a different user.
core.set_user(someone_else)
activity = ActivityLog.create(amo.LOG.EDIT_PROPERTIES, addon)
expected_result = DateFormat(expected_date).format(
settings.DATETIME_FORMAT)
assert (
six.text_type(model_admin.last_known_activity_time(self.user)) ==
expected_result)
def _call_related_content_method(self, method):
model_admin = UserAdmin(UserProfile, admin.site)
result = getattr(model_admin, method)(self.user)
link = pq(result)('a')[0]
return link.attrib['href'], link.text
def test_collections_created(self):
Collection.objects.create()
Collection.objects.create(author=self.user)
Collection.objects.create(author=self.user, listed=False)
url, text = self._call_related_content_method('collections_created')
expected_url = (
reverse('admin:bandwagon_collection_changelist') +
'?author=%d' % self.user.pk)
assert url == expected_url
assert text == '2'
def test_addons_created(self):
addon_factory()
another_user = user_factory()
addon_factory(users=[self.user, another_user])
addon_factory(users=[self.user], status=amo.STATUS_PENDING)
addon_factory(users=[self.user], status=amo.STATUS_DELETED)
addon_factory(users=[self.user],
version_kw={'channel': amo.RELEASE_CHANNEL_UNLISTED})
url, text = self._call_related_content_method('addons_created')
expected_url = (
reverse('admin:addons_addon_changelist') +
'?authors=%d' % self.user.pk)
assert url == expected_url
assert text == '4'
def test_ratings_created(self):
Rating.objects.create(addon=addon_factory(), user=self.user)
dummy_addon = addon_factory()
Rating.objects.create(
addon=dummy_addon, version=dummy_addon.current_version,
user=self.user)
Rating.objects.create(
addon=dummy_addon, version=version_factory(addon=dummy_addon),
user=self.user)
Rating.objects.create(
addon=dummy_addon,
user=user_factory(), ip_address='255.255.0.0')
url, text = self._call_related_content_method('ratings_created')
expected_url = (
reverse('admin:ratings_rating_changelist') +
'?user=%d' % self.user.pk)
assert url == expected_url
assert text == '3'
def test_activity(self):
addon = addon_factory()
core.set_user(self.user)
ActivityLog.create(amo.LOG.CREATE_ADDON, addon)
ActivityLog.create(amo.LOG.EDIT_PROPERTIES, addon)
# Create another activity attached to a different user.
someone_else = user_factory()
core.set_user(someone_else)
ActivityLog.create(amo.LOG.EDIT_PROPERTIES, addon)
url, text = self._call_related_content_method('activity')
expected_url = (
reverse('admin:activity_activitylog_changelist') +
'?user=%d' % self.user.pk)
assert url == expected_url
assert text == '2'
def test_abuse_reports_by_this_user(self):
addon = addon_factory()
AbuseReport.objects.create(user=self.user)
AbuseReport.objects.create(user=self.user)
AbuseReport.objects.create(addon=addon)
AbuseReport.objects.create(addon=addon, reporter=self.user)
AbuseReport.objects.create(user=user_factory(), reporter=self.user)
url, text = self._call_related_content_method(
'abuse_reports_by_this_user')
expected_url = (
reverse('admin:abuse_abusereport_changelist') +
'?reporter=%d' % self.user.pk)
assert url == expected_url
assert text == '2'
def test_abuse_reports_for_this_user(self):
other_user = user_factory()
addon = addon_factory()
AbuseReport.objects.create(user=self.user)
AbuseReport.objects.create(user=other_user)
AbuseReport.objects.create(user=other_user, reporter=self.user)
AbuseReport.objects.create(addon=addon, reporter=self.user)
AbuseReport.objects.create(user=self.user, reporter=user_factory())
url, text = self._call_related_content_method(
'abuse_reports_for_this_user')
expected_url = (
reverse('admin:abuse_abusereport_changelist') +
'?user=%d' % self.user.pk)
assert url == expected_url
assert text == '2'
|
|
"""
This module provides WSGI application to serve the Home Assistant API.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/http/
"""
import hmac
import json
import logging
import mimetypes
import threading
import re
import ssl
from ipaddress import ip_address, ip_network
import voluptuous as vol
import homeassistant.remote as rem
from homeassistant import util
from homeassistant.const import (
SERVER_PORT, HTTP_HEADER_HA_AUTH, HTTP_HEADER_CACHE_CONTROL,
HTTP_HEADER_ACCESS_CONTROL_ALLOW_ORIGIN, CONTENT_TYPE_JSON,
HTTP_HEADER_ACCESS_CONTROL_ALLOW_HEADERS, ALLOWED_CORS_HEADERS,
EVENT_HOMEASSISTANT_STOP, EVENT_HOMEASSISTANT_START)
from homeassistant.core import split_entity_id
import homeassistant.util.dt as dt_util
import homeassistant.helpers.config_validation as cv
from homeassistant.components import persistent_notification
DOMAIN = 'http'
REQUIREMENTS = ('cherrypy==8.1.2', 'static3==0.7.0', 'Werkzeug==0.11.11')
CONF_API_PASSWORD = 'api_password'
CONF_SERVER_HOST = 'server_host'
CONF_SERVER_PORT = 'server_port'
CONF_DEVELOPMENT = 'development'
CONF_SSL_CERTIFICATE = 'ssl_certificate'
CONF_SSL_KEY = 'ssl_key'
CONF_CORS_ORIGINS = 'cors_allowed_origins'
CONF_TRUSTED_NETWORKS = 'trusted_networks'
DATA_API_PASSWORD = 'api_password'
NOTIFICATION_ID_LOGIN = 'http-login'
# TLS configuation follows the best-practice guidelines specified here:
# https://wiki.mozilla.org/Security/Server_Side_TLS
# Intermediate guidelines are followed.
SSL_VERSION = ssl.PROTOCOL_SSLv23
SSL_OPTS = ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3
if hasattr(ssl, 'OP_NO_COMPRESSION'):
SSL_OPTS |= ssl.OP_NO_COMPRESSION
CIPHERS = "ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:" \
"ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:" \
"ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:" \
"DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:" \
"ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256:" \
"ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES256-SHA384:" \
"ECDHE-RSA-AES128-SHA:ECDHE-ECDSA-AES256-SHA384:" \
"ECDHE-ECDSA-AES256-SHA:ECDHE-RSA-AES256-SHA:" \
"DHE-RSA-AES128-SHA256:DHE-RSA-AES128-SHA:DHE-RSA-AES256-SHA256:" \
"DHE-RSA-AES256-SHA:ECDHE-ECDSA-DES-CBC3-SHA:" \
"ECDHE-RSA-DES-CBC3-SHA:EDH-RSA-DES-CBC3-SHA:AES128-GCM-SHA256:" \
"AES256-GCM-SHA384:AES128-SHA256:AES256-SHA256:AES128-SHA:" \
"AES256-SHA:DES-CBC3-SHA:!DSS"
_FINGERPRINT = re.compile(r'^(.+)-[a-z0-9]{32}\.(\w+)$', re.IGNORECASE)
_LOGGER = logging.getLogger(__name__)
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Optional(CONF_API_PASSWORD): cv.string,
vol.Optional(CONF_SERVER_HOST): cv.string,
vol.Optional(CONF_SERVER_PORT, default=SERVER_PORT):
vol.All(vol.Coerce(int), vol.Range(min=1, max=65535)),
vol.Optional(CONF_DEVELOPMENT): cv.string,
vol.Optional(CONF_SSL_CERTIFICATE): cv.isfile,
vol.Optional(CONF_SSL_KEY): cv.isfile,
vol.Optional(CONF_CORS_ORIGINS): vol.All(cv.ensure_list, [cv.string]),
vol.Optional(CONF_TRUSTED_NETWORKS):
vol.All(cv.ensure_list, [ip_network])
}),
}, extra=vol.ALLOW_EXTRA)
class HideSensitiveFilter(logging.Filter):
"""Filter API password calls."""
# pylint: disable=too-few-public-methods
def __init__(self, hass):
"""Initialize sensitive data filter."""
super().__init__()
self.hass = hass
def filter(self, record):
"""Hide sensitive data in messages."""
if self.hass.wsgi.api_password is None:
return True
record.msg = record.msg.replace(self.hass.wsgi.api_password, '*******')
return True
def setup(hass, config):
"""Set up the HTTP API and debug interface."""
_LOGGER.addFilter(HideSensitiveFilter(hass))
conf = config.get(DOMAIN, {})
api_password = util.convert(conf.get(CONF_API_PASSWORD), str)
server_host = conf.get(CONF_SERVER_HOST, '0.0.0.0')
server_port = conf.get(CONF_SERVER_PORT, SERVER_PORT)
development = str(conf.get(CONF_DEVELOPMENT, '')) == '1'
ssl_certificate = conf.get(CONF_SSL_CERTIFICATE)
ssl_key = conf.get(CONF_SSL_KEY)
cors_origins = conf.get(CONF_CORS_ORIGINS, [])
trusted_networks = [
ip_network(trusted_network)
for trusted_network in conf.get(CONF_TRUSTED_NETWORKS, [])]
server = HomeAssistantWSGI(
hass,
development=development,
server_host=server_host,
server_port=server_port,
api_password=api_password,
ssl_certificate=ssl_certificate,
ssl_key=ssl_key,
cors_origins=cors_origins,
trusted_networks=trusted_networks
)
def start_wsgi_server(event):
"""Start the WSGI server."""
server.start()
hass.bus.listen_once(EVENT_HOMEASSISTANT_START, start_wsgi_server)
def stop_wsgi_server(event):
"""Stop the WSGI server."""
server.stop()
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, stop_wsgi_server)
hass.wsgi = server
hass.config.api = rem.API(server_host if server_host != '0.0.0.0'
else util.get_local_ip(),
api_password, server_port,
ssl_certificate is not None)
return True
def request_class():
"""Generate request class.
Done in method because of imports.
"""
from werkzeug.exceptions import BadRequest
from werkzeug.wrappers import BaseRequest, AcceptMixin
from werkzeug.utils import cached_property
class Request(BaseRequest, AcceptMixin):
"""Base class for incoming requests."""
@cached_property
def json(self):
"""Get the result of json.loads if possible."""
if not self.data:
return None
# elif 'json' not in self.environ.get('CONTENT_TYPE', ''):
# raise BadRequest('Not a JSON request')
try:
return json.loads(self.data.decode(
self.charset, self.encoding_errors))
except (TypeError, ValueError):
raise BadRequest('Unable to read JSON request')
return Request
def routing_map(hass):
"""Generate empty routing map with HA validators."""
from werkzeug.routing import Map, BaseConverter, ValidationError
class EntityValidator(BaseConverter):
"""Validate entity_id in urls."""
regex = r"(\w+)\.(\w+)"
def __init__(self, url_map, exist=True, domain=None):
"""Initilalize entity validator."""
super().__init__(url_map)
self._exist = exist
self._domain = domain
def to_python(self, value):
"""Validate entity id."""
if self._exist and hass.states.get(value) is None:
raise ValidationError()
if self._domain is not None and \
split_entity_id(value)[0] != self._domain:
raise ValidationError()
return value
def to_url(self, value):
"""Convert entity_id for a url."""
return value
class DateValidator(BaseConverter):
"""Validate dates in urls."""
regex = r'\d{4}-\d{1,2}-\d{1,2}'
def to_python(self, value):
"""Validate and convert date."""
parsed = dt_util.parse_date(value)
if parsed is None:
raise ValidationError()
return parsed
def to_url(self, value):
"""Convert date to url value."""
return value.isoformat()
class DateTimeValidator(BaseConverter):
"""Validate datetimes in urls formatted per ISO 8601."""
regex = r'\d{4}-[01]\d-[0-3]\dT[0-2]\d:[0-5]\d:[0-5]\d' \
r'\.\d+([+-][0-2]\d:[0-5]\d|Z)'
def to_python(self, value):
"""Validate and convert date."""
parsed = dt_util.parse_datetime(value)
if parsed is None:
raise ValidationError()
return parsed
def to_url(self, value):
"""Convert date to url value."""
return value.isoformat()
return Map(converters={
'entity': EntityValidator,
'date': DateValidator,
'datetime': DateTimeValidator,
})
class HomeAssistantWSGI(object):
"""WSGI server for Home Assistant."""
# pylint: disable=too-many-instance-attributes, too-many-locals
# pylint: disable=too-many-arguments
def __init__(self, hass, development, api_password, ssl_certificate,
ssl_key, server_host, server_port, cors_origins,
trusted_networks):
"""Initilalize the WSGI Home Assistant server."""
from werkzeug.wrappers import Response
Response.mimetype = 'text/html'
# pylint: disable=invalid-name
self.Request = request_class()
self.url_map = routing_map(hass)
self.views = {}
self.hass = hass
self.extra_apps = {}
self.development = development
self.api_password = api_password
self.ssl_certificate = ssl_certificate
self.ssl_key = ssl_key
self.server_host = server_host
self.server_port = server_port
self.cors_origins = cors_origins
self.trusted_networks = trusted_networks
self.event_forwarder = None
self.server = None
def register_view(self, view):
"""Register a view with the WSGI server.
The view argument must be a class that inherits from HomeAssistantView.
It is optional to instantiate it before registering; this method will
handle it either way.
"""
from werkzeug.routing import Rule
if view.name in self.views:
_LOGGER.warning("View '%s' is being overwritten", view.name)
if isinstance(view, type):
# Instantiate the view, if needed
view = view(self.hass)
self.views[view.name] = view
rule = Rule(view.url, endpoint=view.name)
self.url_map.add(rule)
for url in view.extra_urls:
rule = Rule(url, endpoint=view.name)
self.url_map.add(rule)
def register_redirect(self, url, redirect_to):
"""Register a redirect with the server.
If given this must be either a string or callable. In case of a
callable it's called with the url adapter that triggered the match and
the values of the URL as keyword arguments and has to return the target
for the redirect, otherwise it has to be a string with placeholders in
rule syntax.
"""
from werkzeug.routing import Rule
self.url_map.add(Rule(url, redirect_to=redirect_to))
def register_static_path(self, url_root, path, cache_length=31):
"""Register a folder to serve as a static path.
Specify optional cache length of asset in days.
"""
from static import Cling
headers = []
if cache_length and not self.development:
# 1 year in seconds
cache_time = cache_length * 86400
headers.append({
'prefix': '',
HTTP_HEADER_CACHE_CONTROL:
"public, max-age={}".format(cache_time)
})
self.register_wsgi_app(url_root, Cling(path, headers=headers))
def register_wsgi_app(self, url_root, app):
"""Register a path to serve a WSGI app."""
if url_root in self.extra_apps:
_LOGGER.warning("Url root '%s' is being overwritten", url_root)
self.extra_apps[url_root] = app
def start(self):
"""Start the wsgi server."""
from cherrypy import wsgiserver
from cherrypy.wsgiserver.ssl_builtin import BuiltinSSLAdapter
# pylint: disable=too-few-public-methods,super-init-not-called
class ContextSSLAdapter(BuiltinSSLAdapter):
"""SSL Adapter that takes in an SSL context."""
def __init__(self, context):
self.context = context
# pylint: disable=no-member
self.server = wsgiserver.CherryPyWSGIServer(
(self.server_host, self.server_port), self,
server_name='Home Assistant')
if self.ssl_certificate:
context = ssl.SSLContext(SSL_VERSION)
context.options |= SSL_OPTS
context.set_ciphers(CIPHERS)
context.load_cert_chain(self.ssl_certificate, self.ssl_key)
self.server.ssl_adapter = ContextSSLAdapter(context)
threading.Thread(
target=self.server.start, daemon=True, name='WSGI-server').start()
def stop(self):
"""Stop the wsgi server."""
self.server.stop()
def dispatch_request(self, request):
"""Handle incoming request."""
from werkzeug.exceptions import (
MethodNotAllowed, NotFound, BadRequest, Unauthorized,
)
from werkzeug.routing import RequestRedirect
with request:
adapter = self.url_map.bind_to_environ(request.environ)
try:
endpoint, values = adapter.match()
return self.views[endpoint].handle_request(request, **values)
except RequestRedirect as ex:
return ex
except (BadRequest, NotFound, MethodNotAllowed,
Unauthorized) as ex:
resp = ex.get_response(request.environ)
if request.accept_mimetypes.accept_json:
resp.data = json.dumps({
'result': 'error',
'message': str(ex),
})
resp.mimetype = CONTENT_TYPE_JSON
return resp
def base_app(self, environ, start_response):
"""WSGI Handler of requests to base app."""
request = self.Request(environ)
response = self.dispatch_request(request)
if self.cors_origins:
cors_check = (environ.get('HTTP_ORIGIN') in self.cors_origins)
cors_headers = ", ".join(ALLOWED_CORS_HEADERS)
if cors_check:
response.headers[HTTP_HEADER_ACCESS_CONTROL_ALLOW_ORIGIN] = \
environ.get('HTTP_ORIGIN')
response.headers[HTTP_HEADER_ACCESS_CONTROL_ALLOW_HEADERS] = \
cors_headers
return response(environ, start_response)
def __call__(self, environ, start_response):
"""Handle a request for base app + extra apps."""
from werkzeug.wsgi import DispatcherMiddleware
if not self.hass.is_running:
from werkzeug.exceptions import BadRequest
return BadRequest()(environ, start_response)
app = DispatcherMiddleware(self.base_app, self.extra_apps)
# Strip out any cachebusting MD5 fingerprints
fingerprinted = _FINGERPRINT.match(environ.get('PATH_INFO', ''))
if fingerprinted:
environ['PATH_INFO'] = '{}.{}'.format(*fingerprinted.groups())
return app(environ, start_response)
@staticmethod
def get_real_ip(request):
"""Return the clients correct ip address, even in proxied setups."""
if request.access_route:
return request.access_route[-1]
else:
return request.remote_addr
def is_trusted_ip(self, remote_addr):
"""Match an ip address against trusted CIDR networks."""
return any(ip_address(remote_addr) in trusted_network
for trusted_network in self.hass.wsgi.trusted_networks)
class HomeAssistantView(object):
"""Base view for all views."""
extra_urls = []
requires_auth = True # Views inheriting from this class can override this
def __init__(self, hass):
"""Initilalize the base view."""
from werkzeug.wrappers import Response
if not hasattr(self, 'url'):
class_name = self.__class__.__name__
raise AttributeError(
'{0} missing required attribute "url"'.format(class_name)
)
if not hasattr(self, 'name'):
class_name = self.__class__.__name__
raise AttributeError(
'{0} missing required attribute "name"'.format(class_name)
)
self.hass = hass
# pylint: disable=invalid-name
self.Response = Response
def handle_request(self, request, **values):
"""Handle request to url."""
from werkzeug.exceptions import MethodNotAllowed, Unauthorized
if request.method == "OPTIONS":
# For CORS preflight requests.
return self.options(request)
try:
handler = getattr(self, request.method.lower())
except AttributeError:
raise MethodNotAllowed
remote_addr = HomeAssistantWSGI.get_real_ip(request)
# Auth code verbose on purpose
authenticated = False
if self.hass.wsgi.api_password is None:
authenticated = True
elif self.hass.wsgi.is_trusted_ip(remote_addr):
authenticated = True
elif hmac.compare_digest(request.headers.get(HTTP_HEADER_HA_AUTH, ''),
self.hass.wsgi.api_password):
# A valid auth header has been set
authenticated = True
elif hmac.compare_digest(request.args.get(DATA_API_PASSWORD, ''),
self.hass.wsgi.api_password):
authenticated = True
if self.requires_auth and not authenticated:
_LOGGER.warning('Login attempt or request with an invalid '
'password from %s', remote_addr)
persistent_notification.create(
self.hass,
'Invalid password used from {}'.format(remote_addr),
'Login attempt failed', NOTIFICATION_ID_LOGIN)
raise Unauthorized()
request.authenticated = authenticated
_LOGGER.info('Serving %s to %s (auth: %s)',
request.path, remote_addr, authenticated)
result = handler(request, **values)
if isinstance(result, self.Response):
# The method handler returned a ready-made Response, how nice of it
return result
status_code = 200
if isinstance(result, tuple):
result, status_code = result
return self.Response(result, status=status_code)
def json(self, result, status_code=200):
"""Return a JSON response."""
msg = json.dumps(
result, sort_keys=True, cls=rem.JSONEncoder).encode('UTF-8')
return self.Response(
msg, mimetype=CONTENT_TYPE_JSON, status=status_code)
def json_message(self, error, status_code=200):
"""Return a JSON message response."""
return self.json({'message': error}, status_code)
def file(self, request, fil, mimetype=None):
"""Return a file."""
from werkzeug.wsgi import wrap_file
from werkzeug.exceptions import NotFound
if isinstance(fil, str):
if mimetype is None:
mimetype = mimetypes.guess_type(fil)[0]
try:
fil = open(fil, mode='br')
except IOError:
raise NotFound()
return self.Response(wrap_file(request.environ, fil),
mimetype=mimetype, direct_passthrough=True)
def options(self, request):
"""Default handler for OPTIONS (necessary for CORS preflight)."""
return self.Response('', status=200)
|
|
# Copyright (c) 2015 Mirantis, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import os
import requests
from tempest import config
from tempest.lib.common import rest_client
from murano_tempest_tests import utils
CONF = config.CONF
class ApplicationCatalogClient(rest_client.RestClient):
"""Tempest REST client for Murano Application Catalog"""
def __init__(self, auth_provider):
super(ApplicationCatalogClient, self).__init__(
auth_provider,
CONF.application_catalog.catalog_type,
CONF.identity.region,
endpoint_type=CONF.application_catalog.endpoint_type)
self.build_interval = CONF.application_catalog.build_interval
self.build_timeout = CONF.application_catalog.build_timeout
# -----------------------------Packages methods--------------------------------
def upload_package(self, package_name, package_path, top_dir, body):
"""Upload a Murano package into Murano repository
:param package_name: Package name
:param package_path: Path with .zip relatively top_dir
:param top_dir: Top directory with tests
:param body: dict of tags, parameters, etc
:return:
"""
headers = {'X-Auth-Token': self.auth_provider.get_token()}
files = open(os.path.join(top_dir, package_path), 'rb')
uri = "/v1/catalog/packages"
post_body = {'JsonString': json.dumps(body)}
endpoint = self.base_url
url = endpoint + uri
resp = requests.post(url, files={package_name: files}, data=post_body,
headers=headers)
self.expected_success(200, resp.status_code)
return self._parse_resp(resp.text)
def update_package(self, package_id, post_body):
headers = {
'content-type': 'application/murano-packages-json-patch'
}
uri = 'v1/catalog/packages/{0}'.format(package_id)
resp, body = self.patch(uri, json.dumps(post_body), headers=headers)
self.expected_success(200, resp.status)
return self._parse_resp(body)
def delete_package(self, package_id):
"""Removes a package from a repository
:param package_id: Package ID
"""
uri = 'v1/catalog/packages/{0}'.format(package_id)
resp, body = self.delete(uri)
self.expected_success(200, resp.status)
def get_package(self, package_id):
uri = 'v1/catalog/packages/{0}'.format(package_id)
resp, body = self.get(uri)
self.expected_success(200, resp.status)
return self._parse_resp(body)
def get_list_packages(self):
uri = 'v1/catalog/packages'
resp, body = self.get(uri)
self.expected_success(200, resp.status)
return self._parse_resp(body)
def download_package(self, package_id):
headers = {
'content-type': 'application/octet-stream'
}
uri = 'v1/catalog/packages/{0}/download'.format(package_id)
resp, body = self.get(uri, headers=headers)
self.expected_success(200, resp.status)
return self._parse_resp(body)
def get_ui_definition(self, package_id):
headers = {
'content-type': 'application/octet-stream'
}
uri = 'v1/catalog/packages/{0}/ui'.format(package_id)
resp, body = self.get(uri, headers=headers)
self.expected_success(200, resp.status)
return self._parse_resp(body)
def get_logo(self, package_id):
headers = {
'content-type': 'application/octet-stream'
}
uri = 'v1/catalog/packages/{0}/ui'.format(package_id)
resp, body = self.get(uri, headers=headers)
self.expected_success(200, resp.status)
return self._parse_resp(body)
# -----------------------Methods for environment CRUD--------------------------
def get_environments_list(self):
uri = 'v1/environments'
resp, body = self.get(uri)
self.expected_success(200, resp.status)
return self._parse_resp(body)
def create_environment(self, name):
uri = 'v1/environments'
post_body = {'name': name}
resp, body = self.post(uri, json.dumps(post_body))
self.expected_success(200, resp.status)
return self._parse_resp(body)
def delete_environment(self, environment_id):
uri = 'v1/environments/{0}'.format(environment_id)
resp, body = self.delete(uri)
self.expected_success(200, resp.status)
def abandon_environment(self, environment_id):
uri = 'v1/environments/{0}?abandon=True'.format(environment_id)
resp, body = self.delete(uri)
self.expected_success(200, resp.status)
def update_environment(self, environment_id):
uri = 'v1/environments/{0}'.format(environment_id)
name = utils.generate_name("updated_env")
post_body = {"name": name}
resp, body = self.put(uri, json.dumps(post_body))
self.expected_success(200, resp.status)
return self._parse_resp(body)
def get_environment(self, environment_id):
uri = 'v1/environments/{0}'.format(environment_id)
resp, body = self.get(uri)
self.expected_success(200, resp.status)
return self._parse_resp(body)
# -----------------------Methods for session manage ---------------------------
def create_session(self, environment_id):
body = None
uri = 'v1/environments/{0}/configure'.format(environment_id)
resp, body = self.post(uri, body)
self.expected_success(200, resp.status)
return self._parse_resp(body)
def delete_session(self, environment_id, session_id):
uri = 'v1/environments/{0}/sessions/{1}'.format(environment_id,
session_id)
resp, body = self.delete(uri)
self.expected_success(200, resp.status)
return self._parse_resp(body)
def get_session(self, environment_id, session_id):
uri = 'v1/environments/{0}/sessions/{1}'.format(environment_id,
session_id)
resp, body = self.get(uri)
self.expected_success(200, resp.status)
return self._parse_resp(body)
def deploy_session(self, environment_id, session_id):
body = None
url = 'v1/environments/{0}/sessions/{1}/deploy'.format(environment_id,
session_id)
resp, body = self.post(url, body)
self.expected_success(200, resp.status)
return self._parse_resp(body)
# -----------------------------Service methods---------------------------------
def create_service(self, environment_id, session_id, post_body):
headers = self.get_headers()
headers.update(
{'X-Configuration-Session': session_id}
)
uri = 'v1/environments/{0}/services'.format(environment_id)
resp, body = self.post(uri, json.dumps(post_body), headers)
self.expected_success(200, resp.status)
return self._parse_resp(body)
def delete_service(self, environment_id, session_id, service_id):
headers = self.get_headers()
headers.update(
{'X-Configuration-Session': session_id}
)
uri = 'v1/environments/{0}/services/{1}'.format(environment_id,
service_id)
resp, body = self.delete(uri, headers)
self.expected_success(200, resp.status)
return self._parse_resp(body)
def get_services_list(self, environment_id, session_id=None):
headers = self.get_headers()
if session_id:
headers.update(
{'X-Configuration-Session': session_id}
)
uri = 'v1/environments/{0}/services'.format(environment_id)
resp, body = self.get(uri, headers)
self.expected_success(200, resp.status)
# TODO(freerunner): Need to replace json.loads() to _parse_resp
# method, when fix for https://bugs.launchpad.net/tempest/+bug/1539927
# will resolved and new version of tempest-lib released.
return json.loads(body)
def get_service(self, environment_id, service_id, session_id=None):
headers = self.get_headers()
if session_id:
headers.update(
{'X-Configuration-Session': session_id}
)
uri = 'v1/environments/{0}/services/{1}'.format(environment_id,
service_id)
resp, body = self.get(uri, headers)
self.expected_success(200, resp.status)
return self._parse_resp(body)
# -----------------------------Category methods--------------------------------
def list_categories(self):
uri = 'v1/catalog/packages/categories'
resp, body = self.get(uri)
self.expected_success(200, resp.status)
return self._parse_resp(body)
def create_category(self, name):
body = {'name': name}
uri = 'v1/catalog/categories'
resp, body = self.post(uri, json.dumps(body))
self.expected_success(200, resp.status)
return self._parse_resp(body)
def delete_category(self, category_id):
uri = 'v1/catalog/categories/{0}'.format(category_id)
resp, body = self.delete(uri)
self.expected_success(200, resp.status)
return self._parse_resp(body)
def get_category(self, category_id):
uri = 'v1/catalog/categories/{0}'.format(category_id)
resp, body = self.get(uri)
self.expected_success(200, resp.status)
return self._parse_resp(body)
# ----------------------Environment templates methods--------------------------
def get_env_templates_list(self):
uri = 'v1/templates'
resp, body = self.get(uri)
self.expected_success(200, resp.status)
return self._parse_resp(body)
def get_public_env_templates_list(self):
uri = 'v1/templates?is_public=true'
resp, body = self.get(uri)
self.expected_success(200, resp.status)
return self._parse_resp(body)
def get_private_env_templates_list(self):
uri = 'v1/templates?is_public=false'
resp, body = self.get(uri)
self.expected_success(200, resp.status)
return self._parse_resp(body)
def create_env_template(self, env_template_name):
body = {'name': env_template_name, "is_public": False}
uri = 'v1/templates'
resp, body = self.post(uri, json.dumps(body))
self.expected_success(200, resp.status)
return self._parse_resp(body)
def clone_env_template(self, env_template_id, cloned_env_template_name):
body = {'name': cloned_env_template_name}
uri = 'v1/templates/{0}/clone'.format(env_template_id)
resp, body = self.post(uri, json.dumps(body))
self.expected_success(200, resp.status)
return self._parse_resp(body)
def create_public_env_template(self, env_template_name):
body = {'name': env_template_name, "is_public": True}
uri = 'v1/templates'
resp, body = self.post(uri, json.dumps(body))
self.expected_success(200, resp.status)
return self._parse_resp(body)
def create_env_template_with_services(self, env_template_name, post_body):
body = {
'name': env_template_name,
'services': [post_body]
}
uri = 'v1/templates'
resp, body = self.post(uri, json.dumps(body))
self.expected_success(200, resp.status)
return self._parse_resp(body)
def create_service_in_env_template(self, env_template_id, post_body):
uri = 'v1/templates/{0}/services'.format(env_template_id)
resp, body = self.post(uri, json.dumps(post_body))
self.expected_success(200, resp.status)
return self._parse_resp(body)
def get_services_list_in_env_template(self, env_template_id):
uri = 'v1/templates/{0}/services'.format(env_template_id)
resp, body = self.get(uri)
self.expected_success(200, resp.status)
# TODO(freerunner): Need to replace json.loads() to _parse_resp
# method, when fix for https://bugs.launchpad.net/tempest/+bug/1539927
# will resolved and new version of tempest-lib released.
return json.loads(body)
def get_service_in_env_template(self, env_template_name, service_id):
uri = 'v1/templates/{0}/services/{1}'.format(env_template_name,
service_id)
resp, body = self.get(uri)
self.expected_success(200, resp.status)
return json.loads(body)
def delete_service_from_env_template(self, env_template_name, service_id):
uri = 'v1/templates/{0}/services/{1}'.format(env_template_name,
service_id)
resp, body = self.delete(uri)
self.expected_success(200, resp.status)
return self._parse_resp(body)
def delete_env_template(self, env_template_id):
uri = 'v1/templates/{0}'.format(env_template_id)
resp, body = self.delete(uri)
self.expected_success(200, resp.status)
return self._parse_resp(body)
def get_env_template(self, env_template_id):
uri = 'v1/templates/{0}'.format(env_template_id)
resp, body = self.get(uri)
self.expected_success(200, resp.status)
return self._parse_resp(body)
def create_env_from_template(self, env_template_id, env_name):
body = {'name': env_name}
uri = 'v1/templates/{0}/create-environment'.format(env_template_id)
resp, body = self.post(uri, json.dumps(body))
self.expected_success(200, resp.status)
return self._parse_resp(body)
|
|
from datetime import datetime
from django.contrib.auth.models import User
from django.db import models
from jsonfield import JSONField
from orchestra.core.errors import ModelSaveError
from orchestra.workflow import get_workflow_choices
from orchestra.workflow import get_step_choices
from orchestra.workflow import get_workflow_by_slug
from orchestra.workflow import Step
from orchestra.utils.assignment_snapshots import load_snapshots
# TODO(marcua): Convert ManyToManyFields to django-hstore referencefields or
# wait for django-postgres ArrayFields in Django 1.8.
class Certification(models.Model):
"""
Certifications allow workers to perform different types of tasks.
Attributes:
slug (str):
Unique identifier for the certification.
name (str):
Human-readable name for the certification.
description (str):
A longer description of the certification.
required_certifications ([orchestra.models.Certification]):
Prerequisite certifications for possessing this one.
"""
slug = models.CharField(max_length=200, unique=True)
name = models.CharField(max_length=200)
description = models.TextField()
required_certifications = models.ManyToManyField('self',
blank=True)
def __str__(self):
return '{}'.format(self.slug)
class Worker(models.Model):
"""
Workers are human experts within the Orchestra ecosystem.
Attributes:
user (django.contrib.auth.models.User):
Django user whom the worker represents.
start_datetime (datetime.datetime):
The time the worker was created.
slack_username (str):
The worker's Slack username if Slack integration is enabled.
"""
user = models.OneToOneField(User)
start_datetime = models.DateTimeField(default=datetime.now)
slack_username = models.CharField(max_length=200, blank=True, null=True)
def __str__(self):
return '{}'.format(self.user.username)
class WorkerCertification(models.Model):
"""
A WorkerCertification maps a worker to a certification they possess.
Attributes:
certification (orchestra.models.Certification):
Certification belonging to the corresponding worker.
worker (orchestra.models.Worker):
Worker possessing the given certification.
task_class (orchestra.models.WorkerCertification.TaskClass):
Represents whether the worker is in training for the given
certification or prepared to work on real tasks.
role (orchestra.models.WorkerCertification.Role):
Represents whather the worker is an entry-level or review
worker for the given certification.
Constraints:
`certification`, `worker`, `task_class`, and `role` are taken
to be unique_together.
Worker must possess an entry-level WorkerCertification before
obtaining a reviewer one.
"""
class Meta:
unique_together = ('certification', 'worker', 'task_class', 'role')
class TaskClass:
TRAINING = 0
REAL = 1
TASK_CLASS_CHOICES = (
(TaskClass.TRAINING, 'Training tasks'),
(TaskClass.REAL, 'A real task'))
# If a worker has a REVIEWER certification, then they must have
# an ENTRY_LEVEL certification
class Role:
ENTRY_LEVEL = 0
REVIEWER = 1
ROLE_CHOICES = (
(Role.ENTRY_LEVEL, 'Entry-level'),
(Role.REVIEWER, 'Reviewer'))
certification = models.ForeignKey(Certification)
worker = models.ForeignKey(Worker, related_name='certifications')
task_class = models.IntegerField(choices=TASK_CLASS_CHOICES)
role = models.IntegerField(choices=ROLE_CHOICES)
def __str__(self):
return '{} - {} - {} - {}'.format(
self.worker.user.username, self.certification.slug,
dict(WorkerCertification.TASK_CLASS_CHOICES)[self.task_class],
dict(WorkerCertification.ROLE_CHOICES)[self.role])
def save(self, *args, **kwargs):
if self.role == WorkerCertification.Role.REVIEWER:
if not (WorkerCertification.objects
.filter(worker=self.worker, task_class=self.task_class,
certification=self.certification,
role=WorkerCertification.Role.ENTRY_LEVEL)
.exists()):
raise ModelSaveError('You are trying to add a reviewer '
'certification ({}) for a worker without '
'an entry-level certification'
.format(self))
super(WorkerCertification, self).save(*args, **kwargs)
class Project(models.Model):
"""
A project is a collection of tasks representing a workflow.
Attributes:
status (orchestra.models.Project.Status):
Represents whether the project is being actively worked on.
workflow_slug (str):
Identifies the workflow that the project represents.
start_datetime (datetime.datetime):
The time the project was created.
priority (int):
Represents the relative priority of the project.
task_class (int):
Represents whether the project is a worker training exercise
or a deliverable project.
review_document_url (str):
The URL for the review document to be passed between workers
and reviwers for the project's tasks.
slack_group_id (str):
The project's internal Slack group ID if Slack integration
is enabled.
"""
class Status:
ACTIVE = 0
ABORTED = 2
STATUS_CHOICES = (
(Status.ACTIVE, 'Active'),
(Status.ABORTED, 'Aborted'))
status = models.IntegerField(choices=STATUS_CHOICES,
default=Status.ACTIVE)
workflow_slug = models.CharField(max_length=200,
choices=get_workflow_choices())
short_description = models.TextField()
start_datetime = models.DateTimeField(auto_now_add=True)
priority = models.IntegerField()
project_data = JSONField(default={})
task_class = models.IntegerField(
choices=WorkerCertification.TASK_CLASS_CHOICES)
review_document_url = models.URLField(null=True, blank=True)
slack_group_id = models.CharField(max_length=200, null=True, blank=True)
def __str__(self):
return '{} ({})'.format(str(self.workflow_slug),
self.short_description)
class Task(models.Model):
"""
A task is a cohesive unit of work representing a workflow step.
Attributes:
step_slug (str):
Identifies the step that the project represents.
project (orchestra.models.Project):
The project to which the task belongs.
status (orchestra.models.Task.Status):
Represents the task's stage within its lifecycle.
"""
class Status:
AWAITING_PROCESSING = 0
PROCESSING = 1
PENDING_REVIEW = 2
REVIEWING = 3
POST_REVIEW_PROCESSING = 4
COMPLETE = 5
ABORTED = 6
STATUS_CHOICES = (
(Status.AWAITING_PROCESSING, 'Awaiting Processing'),
(Status.PROCESSING, 'Processing'),
(Status.PENDING_REVIEW, 'Pending Review'),
(Status.REVIEWING, 'Reviewing'),
(Status.POST_REVIEW_PROCESSING, 'Post-review Processing'),
(Status.ABORTED, 'Aborted'),
(Status.COMPLETE, 'Complete'))
step_slug = models.CharField(max_length=200,
choices=get_step_choices())
project = models.ForeignKey(Project, related_name='tasks')
status = models.IntegerField(choices=STATUS_CHOICES)
def __str__(self):
return '{} - {}'.format(str(self.project), str(self.step_slug))
class TaskAssignment(models.Model):
"""
A task assignment is a worker's assignment for a given task.
Attributes:
start_datetime (datetime.datetime):
The time the project was created.
worker (orchestra.models.Worker):
The worker to whom the given task is assigned.
task (orchestra.models.Task):
The given task for the task assignment.
status (orchestra.models.Project.Status):
Represents whether the assignment is currently being worked
on.
assignment_counter (int):
Identifies the level of the assignment in the given task's
review hierarchy (i.e., 0 represents an entry-level worker,
1 represents the task's first reviewer, etc.).
in_progress_task_data (str):
A JSON blob containing the worker's input data for the task
assignment.
snapshots (str):
A JSON blob containing saved snapshots of previous data from
the task assignment.
Constraints:
`task` and `assignment_counter` are taken to be unique_together.
Task assignments for machine-type tasks cannot have a `worker`,
while those for human-type tasks must have one.
"""
class Meta:
unique_together = ('task', 'assignment_counter')
class SnapshotType:
SUBMIT = 0
ACCEPT = 1
REJECT = 2
class Status:
PROCESSING = 0
SUBMITTED = 1
STATUS_CHOICES = (
(Status.PROCESSING, 'Processing'),
(Status.SUBMITTED, 'Submitted'))
start_datetime = models.DateTimeField(auto_now_add=True)
worker = models.ForeignKey(Worker,
null=True,
blank=True)
task = models.ForeignKey(Task, related_name='assignments')
status = models.IntegerField(choices=STATUS_CHOICES)
# Counter of a worker assigned to the task
assignment_counter = models.IntegerField(default=0)
# Opaque field that stores current state of task as per the Step's
# description
in_progress_task_data = JSONField()
# When a worker submits, accepts, or rejects a task, we snapshot their
# in_workflow_task_data along with the date in the following format:
# {'snapshots': [
# {'data': snapshotted_task_data,
# 'datetime': ISO 8601 datetime in UTC time,
# 'work_time_seconds': integer seconds,
# 'type': value from SnapshotType}]
# '__version': 1}
snapshots = JSONField()
def save(self, *args, **kwargs):
workflow = get_workflow_by_slug(self.task.project.workflow_slug)
step = workflow.get_step(self.task.step_slug)
if step.worker_type == Step.WorkerType.HUMAN:
if self.worker is None:
raise ModelSaveError('Worker has to be present '
'if worker type is Human')
else:
if self.worker is not None:
raise ModelSaveError('Worker should not be assigned '
'if worker type is Machine')
super(TaskAssignment, self).save(*args, **kwargs)
# Attach a post-init signal to TaskAssigment. Every
# TaskAssignment that gets constructed will now call
# this post-init signal after loading from the database
# (or memory). We run `load_snapshots` after loading from
# the database so that we can migrate old JSON task assignment
# snapshots.
def task_assignment_post_init(sender, instance, **kwargs):
instance.snapshots = load_snapshots(instance.snapshots)
models.signals.post_init.connect(
task_assignment_post_init, sender=TaskAssignment)
|
|
# Copyright (c) 2010 OpenStack Foundation
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Scheduler Service
"""
from oslo.config import cfg
from oslo import messaging
from cinder import context
from cinder import db
from cinder import exception
from cinder import flow_utils
from cinder.i18n import _
from cinder import manager
from cinder.openstack.common import excutils
from cinder.openstack.common import importutils
from cinder.openstack.common import log as logging
from cinder import quota
from cinder import rpc
from cinder.scheduler.flows import create_volume
from cinder.volume import rpcapi as volume_rpcapi
scheduler_driver_opt = cfg.StrOpt('scheduler_driver',
default='cinder.scheduler.filter_scheduler.'
'FilterScheduler',
help='Default scheduler driver to use')
CONF = cfg.CONF
CONF.register_opt(scheduler_driver_opt)
QUOTAS = quota.QUOTAS
LOG = logging.getLogger(__name__)
class SchedulerManager(manager.Manager):
"""Chooses a host to create volumes."""
RPC_API_VERSION = '1.7'
target = messaging.Target(version=RPC_API_VERSION)
def __init__(self, scheduler_driver=None, service_name=None,
*args, **kwargs):
if not scheduler_driver:
scheduler_driver = CONF.scheduler_driver
if scheduler_driver in ['cinder.scheduler.chance.ChanceScheduler',
'cinder.scheduler.simple.SimpleScheduler']:
scheduler_driver = ('cinder.scheduler.filter_scheduler.'
'FilterScheduler')
LOG.deprecated(_('ChanceScheduler and SimpleScheduler have been '
'deprecated due to lack of support for advanced '
'features like: volume types, volume encryption,'
' QoS etc. These two schedulers can be fully '
'replaced by FilterScheduler with certain '
'combination of filters and weighers.'))
self.driver = importutils.import_object(scheduler_driver)
super(SchedulerManager, self).__init__(*args, **kwargs)
def init_host(self):
ctxt = context.get_admin_context()
self.request_service_capabilities(ctxt)
def update_service_capabilities(self, context, service_name=None,
host=None, capabilities=None, **kwargs):
"""Process a capability update from a service node."""
if capabilities is None:
capabilities = {}
self.driver.update_service_capabilities(service_name,
host,
capabilities)
def create_consistencygroup(self, context, topic,
group_id,
request_spec_list=None,
filter_properties_list=None):
try:
self.driver.schedule_create_consistencygroup(
context, group_id,
request_spec_list,
filter_properties_list)
except exception.NoValidHost:
msg = (_("Could not find a host for consistency group "
"%(group_id)s.") %
{'group_id': group_id})
LOG.error(msg)
db.consistencygroup_update(context, group_id,
{'status': 'error'})
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_("Failed to create consistency group "
"%(group_id)s."),
{'group_id': group_id})
db.consistencygroup_update(context, group_id,
{'status': 'error'})
def create_volume(self, context, topic, volume_id, snapshot_id=None,
image_id=None, request_spec=None,
filter_properties=None):
try:
flow_engine = create_volume.get_flow(context,
db, self.driver,
request_spec,
filter_properties,
volume_id,
snapshot_id,
image_id)
except Exception:
LOG.exception(_("Failed to create scheduler manager volume flow"))
raise exception.CinderException(
_("Failed to create scheduler manager volume flow"))
with flow_utils.DynamicLogListener(flow_engine, logger=LOG):
flow_engine.run()
def request_service_capabilities(self, context):
volume_rpcapi.VolumeAPI().publish_service_capabilities(context)
def migrate_volume_to_host(self, context, topic, volume_id, host,
force_host_copy, request_spec,
filter_properties=None):
"""Ensure that the host exists and can accept the volume."""
def _migrate_volume_set_error(self, context, ex, request_spec):
volume_state = {'volume_state': {'migration_status': None}}
self._set_volume_state_and_notify('migrate_volume_to_host',
volume_state,
context, ex, request_spec)
try:
tgt_host = self.driver.host_passes_filters(context, host,
request_spec,
filter_properties)
except exception.NoValidHost as ex:
_migrate_volume_set_error(self, context, ex, request_spec)
except Exception as ex:
with excutils.save_and_reraise_exception():
_migrate_volume_set_error(self, context, ex, request_spec)
else:
volume_ref = db.volume_get(context, volume_id)
volume_rpcapi.VolumeAPI().migrate_volume(context, volume_ref,
tgt_host,
force_host_copy)
def retype(self, context, topic, volume_id,
request_spec, filter_properties=None):
"""Schedule the modification of a volume's type.
:param context: the request context
:param topic: the topic listened on
:param volume_id: the ID of the volume to retype
:param request_spec: parameters for this retype request
:param filter_properties: parameters to filter by
"""
def _retype_volume_set_error(self, context, ex, request_spec,
volume_ref, msg, reservations):
if reservations:
QUOTAS.rollback(context, reservations)
if not volume_ref['volume_attachment']:
orig_status = 'available'
else:
orig_status = 'in-use'
volume_state = {'volume_state': {'status': orig_status}}
self._set_volume_state_and_notify('retype', volume_state,
context, ex, request_spec, msg)
volume_ref = db.volume_get(context, volume_id)
reservations = request_spec.get('quota_reservations')
new_type = request_spec.get('volume_type')
if new_type is None:
msg = _('New volume type not specified in request_spec.')
ex = exception.ParameterNotFound(param='volume_type')
_retype_volume_set_error(self, context, ex, request_spec,
volume_ref, msg, reservations)
# Default migration policy is 'never'
migration_policy = request_spec.get('migration_policy')
if not migration_policy:
migration_policy = 'never'
try:
tgt_host = self.driver.find_retype_host(context, request_spec,
filter_properties,
migration_policy)
except exception.NoValidHost as ex:
msg = (_("Could not find a host for volume %(volume_id)s with "
"type %(type_id)s.") %
{'type_id': new_type['id'], 'volume_id': volume_id})
_retype_volume_set_error(self, context, ex, request_spec,
volume_ref, msg, reservations)
except Exception as ex:
with excutils.save_and_reraise_exception():
_retype_volume_set_error(self, context, ex, request_spec,
volume_ref, None, reservations)
else:
volume_rpcapi.VolumeAPI().retype(context, volume_ref,
new_type['id'], tgt_host,
migration_policy, reservations)
def manage_existing(self, context, topic, volume_id,
request_spec, filter_properties=None):
"""Ensure that the host exists and can accept the volume."""
def _manage_existing_set_error(self, context, ex, request_spec):
volume_state = {'volume_state': {'status': 'error'}}
self._set_volume_state_and_notify('manage_existing', volume_state,
context, ex, request_spec)
volume_ref = db.volume_get(context, volume_id)
try:
self.driver.host_passes_filters(context,
volume_ref['host'],
request_spec,
filter_properties)
except exception.NoValidHost as ex:
_manage_existing_set_error(self, context, ex, request_spec)
except Exception as ex:
with excutils.save_and_reraise_exception():
_manage_existing_set_error(self, context, ex, request_spec)
else:
volume_rpcapi.VolumeAPI().manage_existing(context, volume_ref,
request_spec.get('ref'))
def get_pools(self, context, filters=None):
"""Get active pools from scheduler's cache."""
return self.driver.get_pools(context, filters)
def _set_volume_state_and_notify(self, method, updates, context, ex,
request_spec, msg=None):
# TODO(harlowja): move into a task that just does this later.
if not msg:
msg = (_("Failed to schedule_%(method)s: %(ex)s") %
{'method': method, 'ex': ex})
LOG.error(msg)
volume_state = updates['volume_state']
properties = request_spec.get('volume_properties', {})
volume_id = request_spec.get('volume_id', None)
if volume_id:
db.volume_update(context, volume_id, volume_state)
payload = dict(request_spec=request_spec,
volume_properties=properties,
volume_id=volume_id,
state=volume_state,
method=method,
reason=ex)
rpc.get_notifier("scheduler").error(context,
'scheduler.' + method,
payload)
|
|
import logging
import traceback
import ujson as json
from collections import *
from itertools import chain
from bs4 import BeautifulSoup, SoupStrainer
from openvenues.extract.util import *
logger = logging.getLogger('extract.soup')
def tag_value_and_attr(tag):
value_attr = None
value_attr = property_values.get(tag.name.lower())
if value_attr and value_attr in tag.attrs:
value = tag.attrs[value_attr]
else:
value = tag.text.strip()
return value, value_attr
def extract_links(soup):
def not_nofollow(rel):
return rel != 'nofollow'
for tag in soup.find_all('a', attrs={'href': True,
'rel': not_nofollow}):
link = tag['href']
# Make link absolute
link = urlparse.urljoin(url, link)
yield link
def extract_basic_metadata(soup):
title_tags = soup.select('meta[property="og:title"]') + soup.select('meta[name="title"]') + soup.find_all('title')
title = None
for t in title_tags:
value, value_attr = tag_value_and_attr(t)
if value and value.strip():
title = value.strip()
break
ret = {}
if title:
ret['title'] = title
description_tags = soup.select('meta[property="og:description"]') or soup.select('meta[name="description"]')
if description_tags:
for d in description_tags:
value, value_attr = tag_value_and_attr(d)
if value and value.strip():
description = value.strip()
ret['description'] = description
break
canonical = soup.select('link[rel="canonical"]')
if canonical and canonical[0].get('href'):
ret['canonical'] = canonical[0]['href']
alternates = soup.select('link[rel="alternate"]')
if alternates:
ret['alternates'] = [{'link': tag['href'],
'lang': tag.get('hreflang')
} for tag in alternates if tag.get('href')]
meta_tags = set(soup.select('meta[property]')) | set(soup.select('meta[name]'))
meta_dict = defaultdict(list)
for t in meta_tags:
name = t.get('property', t.get('name', '')).strip().lower()
value, value_attr = tag_value_and_attr(t)
if value and value.strip() and not name.startswith('og:') and not name.startswith('place:') and not name.startswith('business:'):
meta_dict[name].append(value)
if meta_dict:
ret['other_meta'] = dict(meta_dict)
rel_tag = soup.select('[rel="tag"]')
if rel_tag:
all_tags = []
for t in rel_tag:
tag = {}
value = t.text.strip()
if value:
tag['value'] = value
link, link_attr = tag_value_and_attr(t)
if link_attr and value:
tag['link'] = link_attr
tag['link_value'] = link
elif link_attr:
tag['value'] = link
tag['attr'] = link_attr
else:
continue
all_tags.append(tag)
ret['tags'] = all_tags
return ret
def extract_schema_dot_org(soup, use_rdfa=False):
items = []
scope_attr = 'itemtype'
prop_attr = 'itemprop'
schema_type = SCHEMA_DOT_ORG_TYPE if not use_rdfa else RDFA_TYPE
xmlns = None
if use_rdfa:
data_vocabulary = None
# Verify that we have xmlns defined
for tag in soup.find_all(True):
data_vocabulary = [k for k, v in tag.attrs.iteritems()
if k.startswith('xmlns:') and 'data-vocabulary' in v]
if data_vocabulary:
data_vocabulary = data_vocabulary[0]
break
if not data_vocabulary:
return items
else:
xmlns = data_vocabulary.split(':', 1)[-1]
queue = deque([(None, tag) for tag in soup.find_all(True, recursive=False)])
have_street = False
have_latlon = False
while queue:
parent_item, tag = queue.popleft()
if not tag.name:
continue
current_item = parent_item
item = None
prop = None
has_vocab = False
item_scope = tag.get(scope_attr)
if not item_scope and use_rdfa:
item_scope = tag.get('typeof', tag.get('vocab'))
if not item_scope or not item_scope.startswith('{}:'.format(xmlns)):
item_scope = None
item_prop = tag.get(prop_attr)
item_type = item_scope
if not item_prop and use_rdfa:
item_prop = tag.get('property')
if not item_prop or not item_prop.startswith('{}:'.format(xmlns)):
item_prop = tag.get('rel', [])
item_prop = [p for p in item_prop if p.startswith('{}:'.format(xmlns))]
if not item_prop:
item_prop = None
else:
item_prop = item_prop[0]
if item_prop:
prop_name = item_prop
if use_rdfa:
prop_name = prop_name.split(':', 1)[-1]
prop_name = prop_name.replace('-', '_')
prop = {'name': prop_name}
value_attr = None
if not item_scope:
value, value_attr = tag_value_and_attr(tag)
if use_rdfa and not value and tag.get('content'):
value, value_attr = tag['content'], 'content'
prop['value'] = value
attributes = {k: v for k, v in tag.attrs.iteritems() if k not in (scope_attr, prop_attr)}
if value_attr:
prop['text'] = tag.text.strip()
prop['value_attr'] = value_attr
if attributes:
prop['attributes'] = attributes
if current_item is not None:
current_item['properties'] = current_item['properties'] or []
current_item['properties'].append(prop)
if item_scope:
if prop is not None:
item = prop
else:
item = {}
is_place_item = False
if item_type:
if not use_rdfa:
item_type = item_type.split('/')[-1]
elif use_rdfa and xmlns and item_type.startswith('{}:'.format(xmlns)):
item_type = item_type.split(':', 1)[-1]
is_place_item = item_type.lower() in PLACE_SCHEMA_TYPES
item.update({
'item_type': schema_type,
'type': item_type,
})
item['properties'] = []
if is_place_item:
items.append(item)
current_item = item
queue.extend([(current_item, child) for child in tag.find_all(True, recursive=False)])
ret = []
for item in items:
have_street = False
have_latlon = False
item_type = item.get('item_type')
if item_type == 'schema.org':
for prop in item.get('properties', []):
name = prop.get('name', '').lower()
if name == 'address':
props = set([p.get('name', '').lower() for p in prop.get('properties', [])])
if props & street_props:
have_street = True
if len(latlon_props & props) >= 2:
have_latlon = True
if name == 'geo':
props = set([p.get('name') for p in prop.get('properties', [])])
if len(latlon_props & props) >= 2:
have_latlon = True
if name in latlon_props:
have_latlon = True
if name in street_props:
have_street = True
elif item_type == 'rdfa':
props = set([p.get('name', '').lower() for p in item.get('properties', [])])
have_street = props & street_props
have_latlon = len(props & latlon_props) >= 2
if have_street or have_latlon:
ret.append(item)
return ret
FACEBOOK = 'facebook'
TWITTER = 'twitter'
INSTAGRAM = 'instagram'
PINTEREST = 'pinterest'
YELP = 'yelp'
FOURSQUARE = 'foursquare'
GOOGLE_PLUS = 'google_plus'
YOUTUBE = 'youtube'
VIMEO = 'vimeo'
social_href_patterns = {
'facebook.com': FACEBOOK,
'twitter.com': TWITTER,
'instagram.com': INSTAGRAM,
'pinterest.com': PINTEREST,
'yelp.': YELP,
'foursquare': FOURSQUARE,
'plus.google': GOOGLE_PLUS,
'youtube': YOUTUBE,
'youtu.be': YOUTUBE,
'vimeo.com': VIMEO,
}
def extract_social_handles(soup):
max_matches = 0
ids = defaultdict(list)
for pattern, site in social_href_patterns.iteritems():
matches = soup.select(u'a[href*="{}"]'.format(pattern))
if len(matches) > max_matches:
max_matches = len(matches)
for m in matches:
value, value_attr = tag_value_and_attr(m)
ids[site].append(value)
return dict(ids)
value_attr_regex = re.compile("value-.*")
def extract_vcards(soup):
items = []
def gen_prop(name, selector):
prop = None
if selector:
result = selector[0]
prop = {'name': name}
val_select = result.select('.value')
if val_select:
value, value_attr = tag_value_and_attr(val_select[0])
else:
val_select = result.find_all(class_=value_attr_regex)
if not val_select:
value, value_attr = tag_value_and_attr(result)
else:
value_attr = val_select[0].attrs['class'][0].split('-', 1)[-1]
value = val_select[0].attrs.get(value_attr)
if not value:
value, value_attr = tag_value_and_attr(result)
text = (result.text or u'').strip()
if not value_attr:
prop['value'] = text
else:
prop['text'] = text
prop['value'] = value
prop['value_attr'] = value_attr
attributes = {k: v for k, v in result.attrs.iteritems() if k not in ('class', value_attr)}
if attributes:
prop['attributes'] = attributes
if 'text' not in prop and 'value' not in prop and 'attributes' not in prop:
prop = None
return prop
vcards = soup.select('.vcard')
if not vcards:
vcards = soup.select('.adr')
for vcard in vcards:
item = {}
properties = []
have_address = False
street = gen_prop('street_address', vcard.select('.street-address'))
if street:
properties.append(street)
have_address = True
locality = gen_prop('locality', vcard.select('.locality'))
if locality:
properties.append(locality)
region = gen_prop('region', vcard.select('.region'))
if region:
properties.append(region)
postal_code = gen_prop('postal_code', vcard.select('.postal-code'))
if postal_code:
properties.append(postal_code)
country = gen_prop('country', vcard.select('.country-name'))
if country:
properties.append(country)
have_latlon = False
latitude = gen_prop('latitude', vcard.select('.latitude'))
longitude = gen_prop('longitude', vcard.select('.longitude'))
if not latitude and longitude:
latitude = gen_prop('latitude', vcard.select('.p-latitude'))
longtitude = gen_prop('longitude', vcard.select('.p-longitude'))
if latitude and longitude:
properties.append(latitude)
properties.append(longitude)
have_latlon = True
if have_address or have_latlon:
org_name = gen_prop('org_name', vcard.select('.org'))
if org_name:
properties.append(org_name)
name = gen_prop('name', vcard.select('.fn'))
if name:
properties.append(name)
photo = gen_prop('photo', vcard.select('.photo'))
if photo:
properties.append(photo)
vcard_url = gen_prop('url', vcard.select('.url a'))
if not vcard_url:
vcard_url = gen_prop('url', vcard.select('a.url'))
if vcard_url:
properties.append(vcard_url)
telephone = gen_prop('telephone', vcard.select('.tel'))
if telephone:
properties.append(telephone)
category = gen_prop('category', vcard.select('.category'))
if category:
properties.append(category)
else:
continue
if properties:
item['item_type'] = VCARD_TYPE
item['properties'] = properties
items.append(item)
return items
def extract_address_elements(soup):
items = []
for addr in soup.select('address'):
html = unicode(addr)
items.append({'item_type': ADDRESS_ELEMENT_TYPE, 'address': BeautifulSoup(html).text.strip(),
'original_html': html})
return items
def extract_geotags(soup):
placename = soup.select('meta[name="geo.placename"]')
position = soup.select('meta[name="geo.position"]')
region = soup.select('meta[name="geo.region"]')
icbm = soup.select('meta[name="ICBM"]')
title = soup.select('meta[name="DC.title"]')
item = {}
if position:
position = position[0]
value, value_attr = tag_value_and_attr(position)
if value and value.strip():
item['geotags.position'] = value.strip()
if not position and icbm:
icbm = icbm[0]
value, value_attr = tag_value_and_attr(icbm)
if value and value.strip():
item['geotags.icbm'] = value.strip()
if placename:
placename = placename[0]
value, value_attr = tag_value_and_attr(placename)
if value:
item['geotags.placename'] = value.strip()
if region:
region = region[0]
value, value_attr = tag_value_and_attr(region)
if value:
item['geotags.region'] = value.strip()
if title:
title = title[0]
value, value_attr = tag_value_and_attr(title)
if value:
item['geotags.title'] = value.strip()
if item:
item['item_type'] = GEOTAG_TYPE
return item or None
def extract_opengraph_tags(soup):
og_attrs = {}
for el in soup.select('meta[property]'):
name = el['property'].strip().lower()
value = el.get('content', '').strip()
if name.startswith('og:') and value and name not in og_attrs:
og_attrs[name] = value
return og_attrs or None
def extract_opengraph_business_tags(soup):
og_attrs = {}
for el in soup.select('meta[property]'):
name = el['property'].strip().lower()
value = el.get('content', '').strip()
if (name.startswith('business:') or name.startswith('place:')) and value and name not in og_attrs:
og_attrs[name] = value
return og_attrs or None
def gen_og_props(og_tags, proplist, prefix='og'):
props = {}
for prop in proplist:
og_tag_name = '{}:{}'.format(prefix, prop)
value = og_tags.get(og_tag_name, '').strip()
if value:
props[og_tag_name] = value
return props
def opengraph_item(og_tags):
latitude_value = None
for val in ('og:latitude', 'og:lat'):
if val in og_tags:
latitude_value = val
longitude_value = None
for val in ('og:longitude', 'og:lng'):
if val in og_tags:
longitude_value = val
have_latlon = latitude_value and longitude_value
item = {}
if have_latlon:
try:
latitude = og_tags[latitude_value].strip()
longitude = og_tags[longitude_value].strip()
except Exception:
logger.error('Error in opengraph tags extracting lat/lon: {}'.format(traceback.format_exc()))
if latitude and longitude:
item['og:latitude'] = latitude
item['og:longitude'] = longitude
address_props = gen_og_props(og_tags, ['street-address', 'locality', 'region', 'postal-code', 'country-name', 'phone_number'])
have_address = len(address_props) > 0
if have_address:
item.update(address_props)
if have_address or have_latlon:
item['item_type'] = OG_TAG_TYPE
title_props = gen_og_props(og_tags, ['title', 'description', 'locale', 'site_name', 'type', 'url'])
item.update(title_props)
return item or None
def opengraph_business(og_tags):
item = {}
address_props = gen_og_props(og_tags, ['street_address', 'locality', 'region', 'postal_code',
'country', 'phone_number', 'website'], prefix='business:contact_data')
have_address = len(address_props) > 0
if have_address:
item.update(address_props)
latitude = og_tags.get('place:location:latitude', '').strip()
longitude = og_tags.get('place:location:longitude', '').strip()
have_latlon = latitude and longitude
if have_latlon:
item['place:location:latitude'] = latitude
item['place:location:longitude'] = longitude
if have_address or have_latlon:
item['item_type'] = OG_BUSINESS_TAG_TYPE
title_props = gen_og_props(og_tags, ['title', 'description', 'locale', 'site_name', 'type', 'url'])
item.update(title_props)
return item or None
google_maps_lat_lon_path_regex = re.compile('/maps.*?@[\d]+', re.I)
def item_from_google_maps_url(url):
query_param = 'q'
ll_param_names = ('ll', 'sll', 'center')
ll_param = 'll'
alt_ll_param = 'sll'
near_param_names = ('hnear', 'near')
daddr_param = 'daddr'
latitude = None
longitude = None
split = urlparse.urlsplit(url)
query_string = split.query
path = split.path
if query_string:
params = {k.lower(): v for k, v in urlparse.parse_qs(query_string).iteritems()}
for param in ll_param_names:
latlon = params.get(param)
try:
latitude, longitude = latlon_comma_splitter.split(latlon[0])
if not latitude and longitude:
continue
except Exception:
continue
query = params.get(query_param)
if query:
query = query[0]
for param in near_param_names:
near = params.get(param)
if near:
near = near[0]
break
daddr = params.get(daddr_param)
if daddr:
daddr = daddr[0]
item = {}
if latitude and longitude:
item['latitude'] = latitude
item['longitude'] = longitude
if query:
item['googlemaps.query'] = query
if near:
item['googlemaps.near'] = near
if daddr:
item['googlemaps.daddr'] = daddr
if item:
item['googlemaps.url'] = url
item['item_type'] = GOOGLE_MAP_EMBED_TYPE
return item
if path and google_maps_lat_lon_path_regex.search(path):
path_components = path.split('/')
for p in path_components:
if p.startswith('@'):
values = p.strip('@').split(',')
if len(values) >= 2:
latitude, longitude = values[:2]
if latitude and longitude:
item = {
'item_type': GOOGLE_MAP_EMBED_TYPE,
'latitude': latitude,
'longitude': longitude,
}
return item
return None
google_maps_href_regex = re.compile('google\.[^/]+\/maps', re.I)
google_maps_embed_regex = re.compile('google\.[^/]+\/maps/embed/.*/place', re.I)
def extract_google_map_embeds(soup):
items = []
iframe = soup.select('iframe[src*="maps.google"]')
if not iframe:
iframe = soup.find_all('iframe', src=google_maps_embed_regex)
seen = set()
if iframe:
for f in iframe:
u = f.get('src')
if u not in seen:
item = item_from_google_maps_url(u)
if item:
items.append(item)
seen.add(u)
a_tag = soup.select('a[href*="maps.google"]')
if not a_tag:
a_tag = soup.find_all('a', href=google_maps_href_regex)
if a_tag:
for a in a_tag:
u = a.get('href')
if u not in seen:
item = item_from_google_maps_url(u)
if item:
items.append(item)
seen.add(u)
static_maps = soup.select('img[src*="maps.google"]')
if static_maps:
for img in static_maps:
u = img.get('src')
if u not in seen:
item = item_from_google_maps_url(u)
if item:
items.append(item)
seen.add(u)
shortener_a_tag = soup.select('a[href*="goo.gl/maps"]')
if shortener_a_tag:
for a in a_tag:
u = a.get('href')
if u not in seen:
text = (a.text or '').strip()
item = {
'item_type': GOOGLE_MAP_SHORTENED,
'url': u,
}
if text:
item['anchor'] = text
items.append(item)
seen.add(u)
return items
def extract_data_lat_lon_attributes(soup):
lat = soup.find_all(attrs={'data-lat': True})
items = []
for tag in lat:
latitude = tag['data-lat'].strip()
longitude = tag.get('data-lng', tag.get('data-lon', tag.get('data-long', None)))
if latitude and longitude:
items.append({'item_type': DATA_LATLON_TYPE,
'latitude': latitude,
'longitude': longitude,
'attrs': tag.attrs
})
return items
hopstop_route_regex = re.compile('hopstop\.[^/]+/route')
hopstop_map_regex = re.compile('hopstop\.[^/]+/map')
def extract_hopstop_direction_embeds(soup):
hopstop_embeds = soup.find_all('a', href=hopstop_route_regex)
items = []
for tag in hopstop_embeds:
split = urlparse.urlsplit(tag.attrs['href'])
query_string = split.query
if query_string:
params = urlparse.parse_qs(query_string)
if params and 'address2' in params and 'zip2' in params:
item = {'item_type': HOPSTOP_ROUTE_TYPE,
'address': params['address2'][0],
'postal_code': params['zip2'][0]
}
items.append(item)
return items
def extract_hopstop_map_embeds(soup):
hopstop_embeds = soup.find_all('a', href=hopstop_map_regex)
items = []
for tag in hopstop_embeds:
split = urlparse.urlsplit(tag.attrs['href'])
query_string = split.query
if query_string:
params = urlparse.parse_qs(query_string)
if params and 'address' in params:
item = {'item_type': HOPSTOP_MAP_TYPE,
'address': params['address'][0]}
items.append(item)
return items
# Some big sites like yellowpages.com use this
def extract_mappoint_embeds(soup):
pushpins = soup.find_all(attrs={'data-pushpin': True})
items = []
if len(pushpins) == 1:
try:
item = json.loads(pushpins[0]['data-pushpin'])
latitude = item.get('lat', item.get('latitude'))
longitude = item.get('lon', item.get('long', item.get('longitude')))
if latitude and longitude:
return [{'item_type': MAPPOINT_EMBED_TYPE,
'mappoint.latitude': latitude,
'mappoint.longitude': longitude}]
except Exception:
logger.error('Error in extracting mappoint embed: {}'.format(traceback.format_exc()))
return []
def extract_items(soup):
items = []
schema_dot_org_items = extract_schema_dot_org(soup)
rdfa_items = extract_schema_dot_org(soup, use_rdfa=True)
vcards = extract_vcards(soup)
address_elements = extract_address_elements(soup)
opengraph_tags = extract_opengraph_tags(soup)
opengraph_business_tags = extract_opengraph_business_tags(soup)
google_maps_embeds = extract_google_map_embeds(soup)
geotags = extract_geotags(soup)
mappoint_pushpins = extract_mappoint_embeds(soup)
hopstop_route_embeds = extract_hopstop_direction_embeds(soup)
hopstop_map_embeds = extract_hopstop_map_embeds(soup)
data_latlon_attrs = extract_data_lat_lon_attributes(soup)
if geotags:
geotags = [geotags]
basic_metadata = extract_basic_metadata(soup)
items = list(chain(*(c for c in (schema_dot_org_items,
rdfa_items,
vcards,
address_elements,
geotags,
google_maps_embeds,
mappoint_pushpins,
hopstop_route_embeds,
hopstop_map_embeds,
data_latlon_attrs) if c)))
if opengraph_tags:
i = opengraph_item(opengraph_tags)
if i:
items.append(i)
if opengraph_business_tags:
i = opengraph_business(opengraph_business_tags)
if i:
items.append(i)
social_handles = extract_social_handles(soup)
ret = {}
if items:
ret['items'] = items
if social_handles:
ret['social'] = social_handles
if opengraph_tags:
ret['og'] = opengraph_tags
if basic_metadata:
ret.update(basic_metadata)
return ret
|
|
""" Philips PAR file interpreter. Reads several important fields from PAR
files, and returns them in a structure. Reads version number of Philips
research tools and interpretes accordingly. Research tools are used to
extract data from database; dataformats differ considerably between
versions. We now handle V3 and V4
function read_par
par_fname: string with complete par-file name (with path)
rec_fname: string with complete rec-file name (with path)
returns:
par: A PARFile instance
"""
from __future__ import division
import logging
import numpy as np
import re
import par_defines
from PARFile import PARFile
__all__ = ['read_par']
#Maps image def values that have multiple fields -> names of those fields
_SUBVAR_NAMES = {
'recon_resolution': ('x', 'y'),
'image_angulation': ('ap', 'fh', 'rl'),
'image_offcentre': ('ap', 'fh', 'rl'),
'pixel_spacing': ('x', 'y'),
'diffusion': ('ap', 'fh', 'rl'),
}
_IMAGE_INFORMATION_LINE = ('# === IMAGE INFORMATION ==========================='
'===============================')
def read_par(par_fname, rec_fname):
logger = logging.getLogger('raw2nii')
par = PARFile()
par.par_fname = par_fname
par.rec_fname = rec_fname
try:
with open(par_fname, 'rb') as parfile:
_skip_lines(parfile, 7) # Skip first 7 lines
par.version = parfile.readline().split()[-1]
logger.debug('PAR version: {0}'.format(par.version))
if 'V3' == par.version:
raise NotImplementedError
elif par.version in ('V4', 'V4.1', 'V4.2'):
_skip_lines(parfile, 5)
gen_info = _parse_general_info_V4X(par, parfile)
logger.debug('Parameters name: {0}'.format(par.gen_info))
_parse_definition_V4X(par, parfile)
_skip_comment_lines(parfile)
slices = _parse_slices_V4X(par, parfile)
except OSError as e:
par.problem_reading = True
logger.error('Failed to read par file "{0}": {1}'.format(par_fname, e))
return par
if par.version in ('V4', 'V4.1', 'V4.2'):
first_row = slices[0]
last_row = slices[-1]
#If there is more than one slice, check the order of the
#slice numbers. 1 = ascending order, 2 = descending order
if slices.shape[0] > 1:
par.are_slices_sorted = (2, 1)[slices.slice_number[0] >
slices.slice_number[1]]
else:
par.are_slices_sorted = True
if gen_info.max_number_of_dynamics > 1:
#estimate scan-duration from dtime PAR file row
par.RT = (last_row.dyn_scan_begin_time -
first_row.dyn_scan_begin_time) / (
gen_info.max_number_of_dynamics - 1)
else:
par.RT = np.nan
par.sliceorient = first_row.slice_orientation
x = first_row.recon_resolution_x
y = first_row.recon_resolution_y
z = gen_info.max_number_of_slices_locations
par.dim = np.array([x, y, z])
par.multi_scaling_factors = (
np.product(np.unique(par.slices.scale_slope).shape) != 1
or np.product(np.unique(par.slices.rescale_intercept).shape) != 1
or np.product(np.unique(par.slices.rescale_slope).shape) != 1)
if par.multi_scaling_factors:
logger.warning('Multiple scaling factors detected. Switching to '
'float 32 nifti and rescaling')
par.rescale_slope = slices.rescale_slope
par.rescale_interc = slices.rescale_intercept
par.scale_slope = slices.scale_slope
else:
par.rescale_slope = 1 / first_row.scale_slope
par.rescale_interc = first_row.rescale_intercept
par.bit = first_row.image_pixel_size
par.slth = first_row.slice_thickness
par.gap = first_row.slice_gap
voxx = first_row.pixel_spacing_x
voxy = first_row.pixel_spacing_y
voxz = par.slth + par.gap
par.vox = np.array([voxx, voxy, voxz])
fovz, fovx, fovy = gen_info.fov
par.fov = np.array([fovx, fovy, fovz])
par.fov_apfhrl = np.array([fovz, fovx, fovy])
par.angAP, par.angFH, par.angRL = gen_info.angulation_midslice
par.offAP, par.offFH, par.offRL = gen_info.off_centre_midslice
_check_number_of_volumes(par)
_check_slice_orientation(par)
_check_slice_order(par)
_check_dti(par)
logger.debug('PARFile {0}'.format(par))
return par
def _parse_general_info_V4X(par, parfile):
""" Reads the GENERAL INFORMATION section from the PAR file """
line = None
while line != '':
pos = parfile.tell()
line = parfile.readline()
#Parse the useful parts of the general info entry on the left and
#right of the colon: key and value
m = re.search(r'\. ([^<>\(\)\[\]]*[a-zA-Z]).*: *(.*)', line)
if not m:
parfile.seek(pos)
break
key, val = m.group(1, 2)
key = _sanitize_to_identifer(key).lower()
#Try to guess the type of the field by conversion
_val_split = val.split()
if len(_val_split) > 1:
try:
val = np.array(tuple(float(x) for x in _val_split))
except:
pass
else:
try:
val = int(val)
except ValueError:
pass
#logger.debug("Key = '{0}' Val = '{1}'".format(key, val))
setattr(par.gen_info, key, val)
return par.gen_info
def _parse_definition_V4X(par, parfile):
""" Reads the IMAGE INFORMATION DEFINITION section from the PAR file """
line = None
while line != '':
pos = parfile.tell()
line = parfile.readline().strip()
#Parse the useful parts of the definition entry:
#the identifier-valid name, the number of columns, and the type
m = re.search(r'# ([^<>\(\)\[\]]*[a-zA-Z]).*\((\d+)?[\*]?(\w+)\)', line)
if not m:
if not par.fields:
continue
else:
parfile.seek(pos)
break
var_descrip, type_len, type_descrip = m.group(1, 2, 3)
var_name = _sanitize_to_identifer(var_descrip).lower()
if type_len:
type_len = int(type_len)
else:
type_len = 1
#'string' should be interpreted as integer regardless
if type_descrip == 'integer' or type_descrip == 'string':
type_code = np.int64
elif type_descrip == 'float':
type_code = np.float64 # Same as MATLAB double
else:
raise ValueError(descrip)
#Sub variables exist for variables that have size > 1
#We add an underscore plus the name of the sub variable
#i.e. image_angulation_x, image_angulation_y, image_angulation_z
if type_len > 1:
par.fields.extend(tuple((var_name + '_' + s, type_code)
for s in _SUBVAR_NAMES[var_name]))
else:
par.fields.append((var_name, type_code))
par.field_len = len(par.fields)
return par.fields
def _parse_slices_V4X(par, parfile):
""" Reads each slice line from the PAR file and calculates some metrics """
logger = logging.getLogger('raw2nii')
par.slices = np.loadtxt(parfile, dtype=par.fields, ndmin=1).view(
np.recarray)
if not par.slices.shape:
logger.warning('par.slices has wrong shape: {0}, reshaping...'.format(
par.slices.shape))
par.slices = np.reshape(par.slices, (1,))
if len(par.slices[0]) != par.field_len:
raise ValueError('Slice tag format does not match the number of '
'entries')
#Determine number of interleaved image sequences (was:types,
#name kept for historic reasons) (e.g. angio)
par.nr_mrtypes = np.unique(par.slices.scanning_sequence).shape[0]
#Determine number of interleaved echos
par.nr_echos = np.unique(par.slices.echo_number).shape[0]
#Determine number of interleaved image types (e.g. angio)
par.nr_realmrtypes = np.unique(par.slices.image_type_mr).shape[0]
#Determine number of diffusion gradients (e.g. DTI)
par.nr_diffgrads = np.unique(par.slices.gradient_orientation_number
).shape[0]
#Determine number of dynamics(directly from slice lines in
#PAR file instead of PAR file header info!)
par.nr_dyn = np.unique(par.slices.dynamic_scan_number).shape[0]
if par.nr_dyn != par.gen_info.max_number_of_dynamics:
logger.warning('Number of dynamics in header of PAR file does not '
'match number of dynamics in the body')
par.nr_bvalues = np.unique(par.slices.diffusion_b_value_number).shape[0]
#Check if multishell
par.is_multishell = par.nr_bvalues > 2
#Sort the slices
sort_order = (par.slices.slice_number, par.slices.dynamic_scan_number,
par.slices.diffusion_b_value_number,
par.slices.gradient_orientation_number, par.slices.echo_number,
par.slices.image_type_mr, par.slices.scanning_sequence)
if par.is_multishell:
sort_order = sort_order[:2] + (sort_order[3], sort_order[2]) + (
sort_order[4:]) # Swap diffusion b value and gradient orientation
else:
pass # B0 and B1 diffusion weighting
par.slices_sorted = par.slices[np.lexsort(sort_order)]
return par.slices
def _check_number_of_volumes(par):
logger = logging.getLogger('raw2nii')
slices = par.slices
par.NumberOfVolumes = np.unique(slices.dynamic_scan_number).shape[0]
NoV = (slices.dynamic_scan_number.shape[0] //
np.unique(slices.slice_number).shape[0])
if NoV != par.NumberOfVolumes:
logger.warning('Dynamic Scan Number does not match number of slices.'
'Assuming slices are ordered.')
#This code, transliterated from original override, is not quite right.
#It also seems nonessential
#cnt = np.zeros((max(slices.slice_number),))
#for s in slices:
# cnt[s.slice_number - 1] += 1
# s.dynamic_scan_number = cnt[s.slice_number - 1]
#par.NumberOfVolumes = np.unique(slices.dynamic_scan_number).shape[0]
par.NumberOfVolumes = NoV
def _check_slice_orientation(par):
logger = logging.getLogger('raw2nii')
fov = par.gen_info.fov
if par.sliceorient == par_defines.ORIENT_TRA:
if not np.allclose(fov[0], fov[2]):
logger.warning('AXIAL (TRA): par.gen_info.fov[0] != '
'par.gen_info.fov[2]. Setting to max')
fov[[0, 2]] = np.max(fov[[0, 2]])
fov_div_slices = (fov[1] / par.gen_info.max_number_of_slices_locations)
elif par.sliceorient == par_defines.ORIENT_COR:
if not np.allclose(fov[1], fov[2]):
logger.warning('AXIAL (COR): par.gen_info.fov[1] != '
'par.gen_info.fov[2]. Setting to max')
fov[[1, 2]] = np.max(fov[[1, 2]])
fov_div_slices = (fov[0] / par.gen_info.max_number_of_slices_locations)
elif par.sliceorient == par_defines.ORIENT_SAG:
if not np.allclose(fov[1], fov[0]):
logger.warning('AXIAL (SAG): par.gen_info.fov[1] != '
'par.gen_info.fov[0]. Setting to max')
fov[[0, 1]] = np.max(fov[[0, 1]])
fov_div_slices = (fov[2] / par.gen_info.max_number_of_slices_locations)
if not np.allclose(par.slth, fov_div_slices):
logger.warning('Slice Thickness does not match fov/num_slices. '
'ADJUSTING!!!')
par.slth = fov_div_slices
if not np.allclose(par.gap, 0):
logger.warning('Non-zero slice gap: adjusting slice thickness')
#Read the header variables
par.slth += par.gap
par.gap = 0
def _check_slice_order(par):
""" Determine File Volume/Slice Order (inputVolumeSliceOrder):
a) volume - all slices are listed (in order) for each volume before
the next volume
b) slices - the same slice is listed for all volumes before the next
slice of the first volume (volumes are ordered)
c) other - some other ordering (any ordering of volumes/slices is
supported in the PAR file format)
Procedure: Build a matrix with each row having: [VOLUME SLICE IDX] """
logger = logging.getLogger('raw2nii')
slices = par.slices
N = slices.shape[0]
SRT = np.concatenate((slices.dynamic_scan_number[np.newaxis].T,
slices.slice_number[np.newaxis].T, np.arange(N)[np.newaxis].T),
axis=1).reshape(N, 3)
SRT = SRT[np.lexsort((SRT[:,0], SRT[:,1], SRT[:,2]))]
is_in_volume_order = np.all(SRT[:,2] == np.arange(N))
if is_in_volume_order:
par.inputVolumeSliceOrder = 'volume'
else:
SRT = SRT[np.lexsort((SRT[:,1], SRT[:,0], SRT[:,2]))]
is_in_slice_order = np.all(SRT[:,2] == np.arange(N))
if is_in_slice_order:
par.inputVolumeSliceOrder = 'slice'
else:
par.inputVolumeSliceOrder = 'unknown'
logger.warning('Slice ordering is not a predefined type.')
logger.info('This toolbox is compatible with arbitrary '
'ordering of slices in PAR/REC files.')
logger.info('However, other toolboxes or REC readers may '
'assume a specific ordering.')
def _check_dti(par):
par.dti_revertb0 = np.allclose(par.gen_info.diffusion, 1) # True if dti
def _sanitize_to_identifer(name):
""" Changes a string with various characters into an identifier that is
valid for python. i.e. 'minimum RR-interval' -> 'minimum_RR_interval'
"""
n = name.strip()
n = re.sub('/', ' ', n)
n = re.sub('-', ' ', n)
n = re.sub(' +', '_', n)
n = re.sub('[\W]+', '', n)
return n
def _skip_lines(parfile, n):
for i in range(n):
parfile.readline()
def _skip_comment_lines(parfile):
line = None
while line != '':
pos = parfile.tell()
line = parfile.readline()
if not line.startswith('#') and not line.strip() == '':
parfile.seek(pos)
break
|
|
from __future__ import with_statement
from decimal import Decimal, InvalidOperation
import time
from django.core import serializers
from django.db import models
from django.db.models import Q
from django.db.models.signals import post_save
from django.db.utils import DatabaseError
from django.dispatch.dispatcher import receiver
from django.test import TestCase
from django.utils.unittest import expectedFailure, skip
from .fields import ListField, SetField, DictField, EmbeddedModelField
def count_calls(func):
def wrapper(*args, **kwargs):
wrapper.calls += 1
return func(*args, **kwargs)
wrapper.calls = 0
return wrapper
class Target(models.Model):
index = models.IntegerField()
class Source(models.Model):
target = models.ForeignKey(Target)
index = models.IntegerField()
class DecimalModel(models.Model):
decimal = models.DecimalField(max_digits=9, decimal_places=2)
class DecimalKey(models.Model):
decimal = models.DecimalField(max_digits=9, decimal_places=2, primary_key=True)
class DecimalParent(models.Model):
child = models.ForeignKey(DecimalKey)
class DecimalsList(models.Model):
decimals = ListField(models.ForeignKey(DecimalKey))
class ListModel(models.Model):
integer = models.IntegerField(primary_key=True)
floating_point = models.FloatField()
names = ListField(models.CharField)
names_with_default = ListField(models.CharField(max_length=500),
default=[])
names_nullable = ListField(models.CharField(max_length=500), null=True)
class OrderedListModel(models.Model):
ordered_ints = ListField(models.IntegerField(max_length=500), default=[],
ordering=count_calls(lambda x: x), null=True)
ordered_nullable = ListField(ordering=lambda x: x, null=True)
class SetModel(models.Model):
setfield = SetField(models.IntegerField())
class DictModel(models.Model):
dictfield = DictField(models.IntegerField)
dictfield_nullable = DictField(null=True)
auto_now = DictField(models.DateTimeField(auto_now=True))
class EmbeddedModelFieldModel(models.Model):
simple = EmbeddedModelField('EmbeddedModel', null=True)
simple_untyped = EmbeddedModelField(null=True)
decimal_parent = EmbeddedModelField(DecimalParent, null=True)
typed_list = ListField(EmbeddedModelField('SetModel'))
typed_list2 = ListField(EmbeddedModelField('EmbeddedModel'))
untyped_list = ListField(EmbeddedModelField())
untyped_dict = DictField(EmbeddedModelField())
ordered_list = ListField(EmbeddedModelField(),
ordering=lambda obj: obj.index)
class EmbeddedModel(models.Model):
some_relation = models.ForeignKey(DictModel, null=True)
someint = models.IntegerField(db_column='custom')
auto_now = models.DateTimeField(auto_now=True)
auto_now_add = models.DateTimeField(auto_now_add=True)
class IterableFieldsTest(TestCase):
floats = [5.3, 2.6, 9.1, 1.58]
names = [u'Kakashi', u'Naruto', u'Sasuke', u'Sakura']
unordered_ints = [4, 2, 6, 1]
def setUp(self):
for i, float in zip(range(1, 5), IterableFieldsTest.floats):
ListModel(integer=i, floating_point=float,
names=IterableFieldsTest.names[:i]).save()
def test_startswith(self):
self.assertEquals(
dict([(entity.pk, entity.names) for entity in
ListModel.objects.filter(names__startswith='Sa')]),
dict([(3, ['Kakashi', 'Naruto', 'Sasuke']),
(4, ['Kakashi', 'Naruto', 'Sasuke', 'Sakura']), ]))
def test_options(self):
self.assertEqual([entity.names_with_default for entity in
ListModel.objects.filter(names__startswith='Sa')],
[[], []])
self.assertEqual([entity.names_nullable for entity in
ListModel.objects.filter(names__startswith='Sa')],
[None, None])
def test_default_value(self):
# Make sure default value is copied.
ListModel().names_with_default.append(2)
self.assertEqual(ListModel().names_with_default, [])
def test_ordering(self):
f = OrderedListModel._meta.fields[1]
f.ordering.calls = 0
# Ensure no ordering happens on assignment.
obj = OrderedListModel()
obj.ordered_ints = self.unordered_ints
self.assertEqual(f.ordering.calls, 0)
obj.save()
self.assertEqual(OrderedListModel.objects.get().ordered_ints,
sorted(self.unordered_ints))
# Ordering should happen only once, i.e. the order function may
# be called N times at most (N being the number of items in the
# list).
self.assertLessEqual(f.ordering.calls, len(self.unordered_ints))
def test_gt(self):
self.assertEquals(
dict([(entity.pk, entity.names) for entity in
ListModel.objects.filter(names__gt='Kakashi')]),
dict([(2, [u'Kakashi', u'Naruto']),
(3, [u'Kakashi', u'Naruto', u'Sasuke']),
(4, [u'Kakashi', u'Naruto', u'Sasuke', u'Sakura']), ]))
def test_lt(self):
self.assertEquals(
dict([(entity.pk, entity.names) for entity in
ListModel.objects.filter(names__lt='Naruto')]),
dict([(1, [u'Kakashi']),
(2, [u'Kakashi', u'Naruto']),
(3, [u'Kakashi', u'Naruto', u'Sasuke']),
(4, [u'Kakashi', u'Naruto', u'Sasuke', u'Sakura']), ]))
def test_gte(self):
self.assertEquals(
dict([(entity.pk, entity.names) for entity in
ListModel.objects.filter(names__gte='Sakura')]),
dict([(3, [u'Kakashi', u'Naruto', u'Sasuke']),
(4, [u'Kakashi', u'Naruto', u'Sasuke', u'Sakura']), ]))
def test_lte(self):
self.assertEquals(
dict([(entity.pk, entity.names) for entity in
ListModel.objects.filter(names__lte='Kakashi')]),
dict([(1, [u'Kakashi']),
(2, [u'Kakashi', u'Naruto']),
(3, [u'Kakashi', u'Naruto', u'Sasuke']),
(4, [u'Kakashi', u'Naruto', u'Sasuke', u'Sakura']), ]))
def test_equals(self):
self.assertEquals([entity.names for entity in
ListModel.objects.filter(names='Sakura')],
[[u'Kakashi', u'Naruto', u'Sasuke', u'Sakura']])
# Test with additonal pk filter (for DBs that have special pk
# queries).
query = ListModel.objects.filter(names='Sakura')
self.assertEquals(query.get(pk=query[0].pk).names,
[u'Kakashi', u'Naruto', u'Sasuke', u'Sakura'])
def test_is_null(self):
self.assertEquals(ListModel.objects.filter(
names__isnull=True).count(), 0)
def test_exclude(self):
self.assertEquals(
dict([(entity.pk, entity.names) for entity in
ListModel.objects.all().exclude(names__lt='Sakura')]),
dict([(3, [u'Kakashi', u'Naruto', u'Sasuke']),
(4, [u'Kakashi', u'Naruto', u'Sasuke', u'Sakura']), ]))
def test_chained_filter(self):
self.assertEquals(
[entity.names for entity in ListModel.objects
.filter(names='Sasuke').filter(names='Sakura')],
[['Kakashi', 'Naruto', 'Sasuke', 'Sakura'], ])
self.assertEquals(
[entity.names for entity in ListModel.objects
.filter(names__startswith='Sa').filter(names='Sakura')],
[['Kakashi', 'Naruto', 'Sasuke', 'Sakura']])
# Test across multiple columns. On app engine only one filter
# is allowed to be an inequality filter.
self.assertEquals(
[entity.names for entity in ListModel.objects
.filter(floating_point=9.1).filter(names__startswith='Sa')],
[['Kakashi', 'Naruto', 'Sasuke'], ])
def test_setfield(self):
setdata = [1, 2, 3, 2, 1]
# At the same time test value conversion.
SetModel(setfield=map(str, setdata)).save()
item = SetModel.objects.filter(setfield=3)[0]
self.assertEqual(item.setfield, set(setdata))
# This shouldn't raise an error because the default value is
# an empty list.
SetModel().save()
def test_dictfield(self):
DictModel(dictfield=dict(a=1, b='55', foo=3.14),
auto_now={'a': None}).save()
item = DictModel.objects.get()
self.assertEqual(item.dictfield, {u'a': 1, u'b': 55, u'foo': 3})
dt = item.auto_now['a']
self.assertNotEqual(dt, None)
item.save()
time.sleep(0.5) # Sleep to avoid false positive failure on the assertion below
self.assertGreater(DictModel.objects.get().auto_now['a'], dt)
item.delete()
# Saving empty dicts shouldn't throw errors.
DictModel().save()
# Regression tests for djangoappengine issue #39.
DictModel.add_to_class('new_dict_field', DictField())
DictModel.objects.get()
@skip("GAE specific?")
def test_Q_objects(self):
self.assertEquals(
[entity.names for entity in ListModel.objects
.exclude(Q(names__lt='Sakura') | Q(names__gte='Sasuke'))],
[['Kakashi', 'Naruto', 'Sasuke', 'Sakura']])
def test_list_with_foreignkeys(self):
class ReferenceList(models.Model):
keys = ListField(models.ForeignKey('Model'))
class Model(models.Model):
pass
model1 = Model.objects.create()
model2 = Model.objects.create()
ReferenceList.objects.create(keys=[model1.pk, model2.pk])
self.assertEqual(ReferenceList.objects.get().keys[0], model1.pk)
self.assertEqual(ReferenceList.objects.filter(keys=model1.pk).count(), 1)
def test_list_with_foreign_conversion(self):
decimal = DecimalKey.objects.create(decimal=Decimal('1.5'))
DecimalsList.objects.create(decimals=[decimal.pk])
@expectedFailure
def test_nested_list(self):
"""
Some back-ends expect lists to be strongly typed or not contain
other lists (e.g. GAE), this limits how the ListField can be
used (unless the back-end were to serialize all lists).
"""
class UntypedListModel(models.Model):
untyped_list = ListField()
UntypedListModel.objects.create(untyped_list=[1, [2, 3]])
class Child(models.Model):
pass
class Parent(models.Model):
id = models.IntegerField(primary_key=True)
integer_list = ListField(models.IntegerField)
integer_dict = DictField(models.IntegerField)
embedded_list = ListField(EmbeddedModelField(Child))
embedded_dict = DictField(EmbeddedModelField(Child))
class EmbeddedModelFieldTest(TestCase):
def assertEqualDatetime(self, d1, d2):
"""Compares d1 and d2, ignoring microseconds."""
self.assertEqual(d1.replace(microsecond=0),
d2.replace(microsecond=0))
def assertNotEqualDatetime(self, d1, d2):
self.assertNotEqual(d1.replace(microsecond=0),
d2.replace(microsecond=0))
def _simple_instance(self):
EmbeddedModelFieldModel.objects.create(
simple=EmbeddedModel(someint='5'))
return EmbeddedModelFieldModel.objects.get()
def test_simple(self):
instance = self._simple_instance()
self.assertIsInstance(instance.simple, EmbeddedModel)
# Make sure get_prep_value is called.
self.assertEqual(instance.simple.someint, 5)
# Primary keys should not be populated...
self.assertEqual(instance.simple.id, None)
# ... unless set explicitly.
instance.simple.id = instance.id
instance.save()
instance = EmbeddedModelFieldModel.objects.get()
self.assertEqual(instance.simple.id, instance.id)
def _test_pre_save(self, instance, get_field):
# Make sure field.pre_save is called for embedded objects.
from time import sleep
instance.save()
auto_now = get_field(instance).auto_now
auto_now_add = get_field(instance).auto_now_add
self.assertNotEqual(auto_now, None)
self.assertNotEqual(auto_now_add, None)
sleep(1) # FIXME
instance.save()
self.assertNotEqualDatetime(get_field(instance).auto_now,
get_field(instance).auto_now_add)
instance = EmbeddedModelFieldModel.objects.get()
instance.save()
# auto_now_add shouldn't have changed now, but auto_now should.
self.assertEqualDatetime(get_field(instance).auto_now_add,
auto_now_add)
self.assertGreater(get_field(instance).auto_now, auto_now)
def test_pre_save(self):
obj = EmbeddedModelFieldModel(simple=EmbeddedModel())
self._test_pre_save(obj, lambda instance: instance.simple)
def test_pre_save_untyped(self):
obj = EmbeddedModelFieldModel(simple_untyped=EmbeddedModel())
self._test_pre_save(obj, lambda instance: instance.simple_untyped)
def test_pre_save_in_list(self):
obj = EmbeddedModelFieldModel(untyped_list=[EmbeddedModel()])
self._test_pre_save(obj, lambda instance: instance.untyped_list[0])
def test_pre_save_in_dict(self):
obj = EmbeddedModelFieldModel(untyped_dict={'a': EmbeddedModel()})
self._test_pre_save(obj, lambda instance: instance.untyped_dict['a'])
def test_pre_save_list(self):
# Also make sure auto_now{,add} works for embedded object *lists*.
EmbeddedModelFieldModel.objects.create(typed_list2=[EmbeddedModel()])
instance = EmbeddedModelFieldModel.objects.get()
auto_now = instance.typed_list2[0].auto_now
auto_now_add = instance.typed_list2[0].auto_now_add
self.assertNotEqual(auto_now, None)
self.assertNotEqual(auto_now_add, None)
instance.typed_list2.append(EmbeddedModel())
instance.save()
instance = EmbeddedModelFieldModel.objects.get()
self.assertEqualDatetime(instance.typed_list2[0].auto_now_add,
auto_now_add)
self.assertGreater(instance.typed_list2[0].auto_now, auto_now)
self.assertNotEqual(instance.typed_list2[1].auto_now, None)
self.assertNotEqual(instance.typed_list2[1].auto_now_add, None)
def test_error_messages(self):
for kwargs, expected in (
({'simple': 42}, EmbeddedModel),
({'simple_untyped': 42}, models.Model),
({'typed_list': [EmbeddedModel()]}, SetModel)):
self.assertRaisesRegexp(
TypeError, "Expected instance of type %r." % expected,
EmbeddedModelFieldModel(**kwargs).save)
def test_typed_listfield(self):
EmbeddedModelFieldModel.objects.create(
typed_list=[SetModel(setfield=range(3)),
SetModel(setfield=range(9))],
ordered_list=[Target(index=i) for i in xrange(5, 0, -1)])
obj = EmbeddedModelFieldModel.objects.get()
self.assertIn(5, obj.typed_list[1].setfield)
self.assertEqual([target.index for target in obj.ordered_list],
range(1, 6))
def test_untyped_listfield(self):
EmbeddedModelFieldModel.objects.create(untyped_list=[
EmbeddedModel(someint=7),
OrderedListModel(ordered_ints=range(5, 0, -1)),
SetModel(setfield=[1, 2, 2, 3])])
instances = EmbeddedModelFieldModel.objects.get().untyped_list
for instance, cls in zip(instances,
[EmbeddedModel, OrderedListModel, SetModel]):
self.assertIsInstance(instance, cls)
self.assertNotEqual(instances[0].auto_now, None)
self.assertEqual(instances[1].ordered_ints, range(1, 6))
def test_untyped_dict(self):
EmbeddedModelFieldModel.objects.create(untyped_dict={
'a': SetModel(setfield=range(3)),
'b': DictModel(dictfield={'a': 1, 'b': 2}),
'c': DictModel(dictfield={}, auto_now={'y': 1})})
data = EmbeddedModelFieldModel.objects.get().untyped_dict
self.assertIsInstance(data['a'], SetModel)
self.assertNotEqual(data['c'].auto_now['y'], None)
def test_foreignkey_in_embedded_object(self):
simple = EmbeddedModel(some_relation=DictModel.objects.create())
obj = EmbeddedModelFieldModel.objects.create(simple=simple)
simple = EmbeddedModelFieldModel.objects.get().simple
self.assertNotIn('some_relation', simple.__dict__)
self.assertIsInstance(simple.__dict__['some_relation_id'],
type(obj.id))
self.assertIsInstance(simple.some_relation, DictModel)
def test_embedded_field_with_foreign_conversion(self):
decimal = DecimalKey.objects.create(decimal=Decimal('1.5'))
decimal_parent = DecimalParent.objects.create(child=decimal)
EmbeddedModelFieldModel.objects.create(decimal_parent=decimal_parent)
def test_update(self):
"""
Test that update can be used on an a subset of objects
containing collections of embedded instances; see issue #13.
Also ensure that updated values are coerced according to
collection field.
"""
child1 = Child.objects.create()
child2 = Child.objects.create()
parent = Parent.objects.create(pk=1,
integer_list=[1], integer_dict={'a': 2},
embedded_list=[child1], embedded_dict={'a': child2})
Parent.objects.filter(pk=1).update(
integer_list=['3'], integer_dict={'b': '3'},
embedded_list=[child2], embedded_dict={'b': child1})
parent = Parent.objects.get()
self.assertEqual(parent.integer_list, [3])
self.assertEqual(parent.integer_dict, {'b': 3})
self.assertEqual(parent.embedded_list, [child2])
self.assertEqual(parent.embedded_dict, {'b': child1})
class BaseModel(models.Model):
pass
class ExtendedModel(BaseModel):
name = models.CharField(max_length=20)
class BaseModelProxy(BaseModel):
class Meta:
proxy = True
class ExtendedModelProxy(ExtendedModel):
class Meta:
proxy = True
class ProxyTest(TestCase):
def test_proxy(self):
list(BaseModelProxy.objects.all())
def test_proxy_with_inheritance(self):
self.assertRaises(DatabaseError,
lambda: list(ExtendedModelProxy.objects.all()))
class SignalTest(TestCase):
def test_post_save(self):
created = []
@receiver(post_save, sender=SetModel)
def handle(**kwargs):
created.append(kwargs['created'])
SetModel().save()
self.assertEqual(created, [True])
SetModel.objects.get().save()
self.assertEqual(created, [True, False])
qs = SetModel.objects.all()
list(qs)[0].save()
self.assertEqual(created, [True, False, False])
list(qs)[0].save()
self.assertEqual(created, [True, False, False, False])
list(qs.select_related())[0].save()
self.assertEqual(created, [True, False, False, False, False])
class SelectRelatedTest(TestCase):
def test_select_related(self):
target = Target(index=5)
target.save()
Source(target=target, index=8).save()
source = Source.objects.all().select_related()[0]
self.assertEqual(source.target.pk, target.pk)
self.assertEqual(source.target.index, target.index)
source = Source.objects.all().select_related('target')[0]
self.assertEqual(source.target.pk, target.pk)
self.assertEqual(source.target.index, target.index)
class DBColumn(models.Model):
a = models.IntegerField(db_column='b')
class OrderByTest(TestCase):
def test_foreign_keys(self):
target1 = Target.objects.create(index=1)
target2 = Target.objects.create(index=2)
source1 = Source.objects.create(target=target1, index=3)
source2 = Source.objects.create(target=target2, index=4)
self.assertEqual(list(Source.objects.all().order_by('target')),
[source1, source2])
self.assertEqual(list(Source.objects.all().order_by('-target')),
[source2, source1])
def test_db_column(self):
model1 = DBColumn.objects.create(a=1)
model2 = DBColumn.objects.create(a=2)
self.assertEqual(list(DBColumn.objects.all().order_by('a')),
[model1, model2])
self.assertEqual(list(DBColumn.objects.all().order_by('-a')),
[model2, model1])
def test_reverse(self):
model1 = DBColumn.objects.create(a=1)
model2 = DBColumn.objects.create(a=2)
self.assertEqual(list(DBColumn.objects.all().order_by('a').reverse()),
[model2, model1])
self.assertEqual(list(DBColumn.objects.all().order_by('-a').reverse()),
[model1, model2])
def test_chain(self):
model1 = Target.objects.create(index=1)
model2 = Target.objects.create(index=2)
self.assertEqual(
list(Target.objects.all().order_by('index').order_by('-index')),
[model2, model1])
class SerializableSetModel(models.Model):
setfield = SetField(models.IntegerField())
setcharfield = SetField(models.CharField(), null=True)
class SerializationTest(TestCase):
"""
JSON doesn't support sets, so they need to be converted to lists
for serialization; see issue #12.
TODO: Check if the fix works with embedded models / nested sets.
"""
names = ['foo', 'bar', 'baz', 'monkey']
def test_json_listfield(self):
for i in range(1, 5):
ListModel(integer=i, floating_point=0,
names=SerializationTest.names[:i]).save()
objects = ListModel.objects.all()
serialized = serializers.serialize('json', objects)
deserialized = serializers.deserialize('json', serialized)
for m in deserialized:
integer = m.object.integer
names = m.object.names
self.assertEqual(names, SerializationTest.names[:integer])
def test_json_setfield(self):
for i in range(1, 5):
SerializableSetModel(
setfield=set([i - 1]),
setcharfield=set(SerializationTest.names[:i])).save()
objects = SerializableSetModel.objects.all()
serialized = serializers.serialize('json', objects)
deserialized = serializers.deserialize('json', serialized)
for m in deserialized:
integer = m.object.setfield.pop()
names = m.object.setcharfield
self.assertEqual(names, set(SerializationTest.names[:integer + 1]))
class String(models.Model):
s = models.CharField(max_length=20)
class LazyObjectsTest(TestCase):
def test_translation(self):
"""
Using a lazy translation call should work just the same as
a non-lazy one (or a plain string).
"""
from django.utils.translation import ugettext_lazy
a = String.objects.create(s='a')
b = String.objects.create(s=ugettext_lazy('b'))
self.assertEqual(String.objects.get(s='a'), a)
self.assertEqual(list(String.objects.filter(s='a')), [a])
self.assertEqual(list(String.objects.filter(s__lte='a')), [a])
self.assertEqual(String.objects.get(s=ugettext_lazy('a')), a)
self.assertEqual(
list(String.objects.filter(s__lte=ugettext_lazy('a'))), [a])
self.assertEqual(String.objects.get(s='b'), b)
self.assertEqual(list(String.objects.filter(s='b')), [b])
self.assertEqual(list(String.objects.filter(s__gte='b')), [b])
self.assertEqual(String.objects.get(s=ugettext_lazy('b')), b)
self.assertEqual(
list(String.objects.filter(s__gte=ugettext_lazy('b'))), [b])
def test_marked_strings(self):
"""
Check that strings marked as safe or needing escaping do not
confuse the back-end.
"""
from django.utils.safestring import mark_safe, mark_for_escaping
a = String.objects.create(s='a')
b = String.objects.create(s=mark_safe('b'))
c = String.objects.create(s=mark_for_escaping('c'))
self.assertEqual(String.objects.get(s='a'), a)
self.assertEqual(list(String.objects.filter(s__startswith='a')), [a])
self.assertEqual(String.objects.get(s=mark_safe('a')), a)
self.assertEqual(
list(String.objects.filter(s__startswith=mark_safe('a'))), [a])
self.assertEqual(String.objects.get(s=mark_for_escaping('a')), a)
self.assertEqual(
list(String.objects.filter(s__startswith=mark_for_escaping('a'))),
[a])
self.assertEqual(String.objects.get(s='b'), b)
self.assertEqual(list(String.objects.filter(s__startswith='b')), [b])
self.assertEqual(String.objects.get(s=mark_safe('b')), b)
self.assertEqual(
list(String.objects.filter(s__startswith=mark_safe('b'))), [b])
self.assertEqual(String.objects.get(s=mark_for_escaping('b')), b)
self.assertEqual(
list(String.objects.filter(s__startswith=mark_for_escaping('b'))),
[b])
self.assertEqual(String.objects.get(s='c'), c)
self.assertEqual(list(String.objects.filter(s__startswith='c')), [c])
self.assertEqual(String.objects.get(s=mark_safe('c')), c)
self.assertEqual(
list(String.objects.filter(s__startswith=mark_safe('c'))), [c])
self.assertEqual(String.objects.get(s=mark_for_escaping('c')), c)
self.assertEqual(
list(String.objects.filter(s__startswith=mark_for_escaping('c'))),
[c])
class FeaturesTest(TestCase):
"""
Some things are unlikely to cause problems for SQL back-ends, but
require special handling in nonrel.
"""
def test_subqueries(self):
"""
Django includes SQL statements as WHERE tree values when
filtering using a QuerySet -- this won't "just work" with
nonrel back-ends.
TODO: Subqueries handling may require a bit of Django
changing, but should be easy to support.
"""
target = Target.objects.create(index=1)
source = Source.objects.create(index=2, target=target)
targets = Target.objects.all()
with self.assertRaises(DatabaseError):
Source.objects.get(target__in=targets)
self.assertEqual(
Source.objects.get(target__in=list(targets)),
source)
class DecimalFieldTest(TestCase):
"""
Some NoSQL databases can't handle Decimals, so respective back-ends
convert them to strings or floats. This can cause some precision
and sorting problems.
"""
def setUp(self):
for d in (Decimal('12345.6789'), Decimal('5'), Decimal('345.67'),
Decimal('45.6'), Decimal('2345.678'),):
DecimalModel(decimal=d).save()
def test_filter(self):
d = DecimalModel.objects.get(decimal=Decimal('5.0'))
self.assertTrue(isinstance(d.decimal, Decimal))
self.assertEquals(str(d.decimal), '5.00')
d = DecimalModel.objects.get(decimal=Decimal('45.60'))
self.assertEquals(str(d.decimal), '45.60')
# Filter argument should be converted to Decimal with 2 decimal
#_places.
d = DecimalModel.objects.get(decimal='0000345.67333333333333333')
self.assertEquals(str(d.decimal), '345.67')
def test_order(self):
"""
Standard Django decimal-to-string conversion isn't monotonic
(see `django.db.backends.util.format_number`).
"""
rows = DecimalModel.objects.all().order_by('decimal')
values = list(d.decimal for d in rows)
self.assertEquals(values, sorted(values))
def test_sign_extend(self):
DecimalModel(decimal=Decimal('-0.0')).save()
try:
# If we've written a valid string we should be able to
# retrieve the DecimalModel object without error.
DecimalModel.objects.filter(decimal__lt=1)[0]
except InvalidOperation:
self.assertTrue(False)
|
|
#!/usr/bin/env python
"""This file implements a VFS abstraction on the client."""
from grr.client import client_utils
from grr.lib import rdfvalue
from grr.lib import registry
from grr.lib import utils
# A central Cache for vfs handlers. This can be used to keep objects alive
# for a limited time.
DEVICE_CACHE = utils.TimeBasedCache()
class VFSHandler(object):
"""Base class for handling objects in the VFS."""
supported_pathtype = -1
# Should this handler be auto-registered?
auto_register = False
size = 0
offset = 0
# This is the VFS path to this specific handler.
path = "/"
# This will be set by the VFSOpen factory to the pathspec of the final
# destination of this handler. This pathspec will be case corrected and
# updated to reflect any potential recursion.
pathspec = None
base_fd = None
__metaclass__ = registry.MetaclassRegistry
def __init__(self, base_fd, pathspec=None, progress_callback=None):
"""Constructor.
Args:
base_fd: A handler to the predecessor handler.
pathspec: The pathspec to open.
progress_callback: A callback to indicate that the open call is still
working but needs more time.
Raises:
IOError: if this handler can not be instantiated over the
requested path.
"""
_ = pathspec
self.base_fd = base_fd
self.progress_callback = progress_callback
if base_fd is None:
self.pathspec = rdfvalue.PathSpec()
else:
# Make a copy of the base pathspec.
self.pathspec = base_fd.pathspec.Copy()
self.metadata = {}
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.Close()
return False
def Seek(self, offset, whence=0):
"""Seek to an offset in the file."""
if whence == 0:
self.offset = offset
elif whence == 1:
self.offset += offset
elif whence == 2:
self.offset = self.size + offset
else:
raise RuntimeError("Illegal whence value %s" % whence)
def Read(self, length):
"""Reads some data from the file."""
raise NotImplementedError
def Stat(self):
"""Returns a StatResponse proto about this file."""
raise NotImplementedError
def IsDirectory(self):
"""Returns true if this object can contain other objects."""
raise NotImplementedError
def Tell(self):
return self.offset
def Close(self):
"""Close internal file descriptors."""
def OpenAsContainer(self):
"""Guesses a container from the current object."""
if self.IsDirectory():
return self
# TODO(user): Add support for more container here (e.g. registries, zip
# files etc).
else: # For now just guess TSK.
return VFS_HANDLERS[rdfvalue.PathSpec.PathType.TSK](
self, rdfvalue.PathSpec(path="/",
pathtype=rdfvalue.PathSpec.PathType.TSK),
progress_callback=self.progress_callback)
def MatchBestComponentName(self, component):
"""Returns the name of the component which matches best our base listing.
In order to do the best case insensitive matching we list the files in the
base handler and return the base match for this component.
Args:
component: A component name which should be present in this directory.
Returns:
the best component name.
"""
fd = self.OpenAsContainer()
# Adjust the component casing
file_listing = set(fd.ListNames())
# First try an exact match
if component not in file_listing:
# Now try to match lower case
lower_component = component.lower()
for x in file_listing:
if lower_component == x.lower():
component = x
break
new_pathspec = rdfvalue.PathSpec(path=component,
pathtype=fd.supported_pathtype)
return new_pathspec
def ListFiles(self):
"""An iterator over all VFS files contained in this directory.
Generates a StatResponse proto for each file or directory.
Raises:
IOError: if this fails.
"""
def ListNames(self):
"""A generator for all names in this directory."""
return []
# These are file object conformant namings for library functions that
# grr uses, and that expect to interact with 'real' file objects.
read = utils.Proxy("Read")
seek = utils.Proxy("Seek")
stat = utils.Proxy("Stat")
tell = utils.Proxy("Tell")
close = utils.Proxy("Close")
@classmethod
def Open(cls, fd, component, pathspec=None, progress_callback=None):
"""Try to correct the casing of component.
This method is called when we failed to open the component directly. We try
to transform the component into something which is likely to work.
In this implementation, we correct the case of the component until we can
not open the path any more.
Args:
fd: The base fd we will use.
component: The component we should open.
pathspec: The rest of the pathspec object.
progress_callback: A callback to indicate that the open call is still
working but needs more time.
Returns:
A file object.
Raises:
IOError: If nothing could be opened still.
"""
# The handler for this component
try:
handler = VFS_HANDLERS[component.pathtype]
except KeyError:
raise IOError(
"VFS handler %d not supported." % component.pathtype)
# We will not do any case folding unless requested.
if component.path_options == rdfvalue.PathSpec.Options.CASE_LITERAL:
return handler(base_fd=fd, pathspec=component)
path_components = client_utils.LocalPathToCanonicalPath(component.path)
path_components = ["/"] + filter(None, path_components.split("/"))
for i, path_component in enumerate(path_components):
try:
if fd:
new_pathspec = fd.MatchBestComponentName(path_component)
else:
new_pathspec = component
new_pathspec.path = path_component
# The handler for this component
try:
handler = VFS_HANDLERS[new_pathspec.pathtype]
except KeyError:
raise IOError(
"VFS handler %d not supported." % new_pathspec.pathtype)
fd = handler(base_fd=fd, pathspec=new_pathspec,
progress_callback=progress_callback)
except IOError:
# Can not open the first component, we must raise here.
if i <= 1:
raise IOError("File not found")
# Insert the remaining path at the front of the pathspec.
pathspec.Insert(0, path=utils.JoinPath(*path_components[i:]),
pathtype=rdfvalue.PathSpec.PathType.TSK)
break
return fd
def GetMetadata(self):
return self.metadata
# A registry of all VFSHandler registered
VFS_HANDLERS = {}
class VFSInit(registry.InitHook):
"""Register all known vfs handlers to open a pathspec types."""
def Run(self):
for handler in VFSHandler.classes.values():
if handler.auto_register:
VFS_HANDLERS[handler.supported_pathtype] = handler
def VFSOpen(pathspec, progress_callback=None):
"""Expands pathspec to return an expanded Path.
A pathspec is a specification of how to access the file by recursively opening
each part of the path by different drivers. For example the following
pathspec:
pathtype: OS
path: "/dev/sda1"
nested_path {
pathtype: TSK
path: "/home/image2.img"
nested_path {
pathtype: TSK
path: "/home/a.txt"
}
}
Instructs the system to:
1) open /dev/sda1 using the OS driver.
2) Pass the obtained filelike object to the TSK driver to open
"/home/image2.img".
3) The obtained filelike object should be passed to the TSK driver to open
"/home/a.txt".
The problem remains how to get to this expanded path specification. Since the
server is not aware of all the files on the client, the server may request
this:
pathtype: OS
path: "/dev/sda1"
nested_path {
pathtype: TSK
path: "/home/image2.img/home/a.txt"
}
Or even this:
pathtype: OS
path: "/dev/sda1/home/image2.img/home/a.txt"
This function converts the pathspec requested by the server into an expanded
pathspec required to actually open the file. This is done by expanding each
component of the pathspec in turn.
Expanding the component is done by opening each leading directory in turn and
checking if it is a directory of a file. If its a file, we examine the file
headers to determine the next appropriate driver to use, and create a nested
pathspec.
Args:
pathspec: A Path() protobuf to normalize.
progress_callback: A callback to indicate that the open call is still
working but needs more time.
Returns:
The open filelike object. This will contain the expanded Path() protobuf as
the member fd.pathspec.
Raises:
IOError: if one of the path components can not be opened.
"""
fd = None
# Opening changes the pathspec so we work on a copy.
pathspec = pathspec.Copy()
# For each pathspec step, we get the handler for it and instantiate it with
# the old object, and the current step.
while pathspec:
component = pathspec.Pop()
try:
handler = VFS_HANDLERS[component.pathtype]
except KeyError:
raise IOError(
"VFS handler %d not supported." % component.pathtype)
try:
# Open the component.
fd = handler.Open(fd, component, pathspec=pathspec,
progress_callback=progress_callback)
except IOError as e:
raise IOError("%s: %s" % (e, component))
return fd
def ReadVFS(pathspec, offset, length, progress_callback=None):
"""Read from the VFS and return the contents.
Args:
pathspec: path to read from
offset: number of bytes to skip
length: number of bytes to read
progress_callback: A callback to indicate that the open call is still
working but needs more time.
Returns:
VFS file contents
"""
fd = VFSOpen(pathspec, progress_callback=progress_callback)
fd.Seek(offset)
return fd.Read(length)
|
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import datetime
import logging
import logging.config
import os
import shutil
import subprocess
import sys
import tempfile
from platforms.chocolatey import build_chocolatey, publish_chocolatey
from platforms.common import ReleaseException, docker, run
from platforms.debian import build_deb
from platforms.homebrew import (
build_bottle,
log_about_manual_tap_push,
publish_tap_changes,
validate_tap,
)
from releases import (
add_assets,
create_new_release,
get_all_releases,
get_current_user,
get_release_for_tag,
get_token,
)
TARGET_MACOS_VERSION = "yosemite"
TARGET_MACOS_VERSION_SPEC = TARGET_MACOS_VERSION
def parse_args(args):
parser = argparse.ArgumentParser("Publish releases of buck to github")
parser.add_argument(
"--valid-git-upstreams",
default=(
"git@github.com:facebook/buck.git",
"https://github.com/facebook/buck.git",
),
nargs="+",
help="List of valid upstreams for the git repository in order to publish",
)
parser.add_argument(
"--github-token-file",
default=os.path.expanduser("~/.buck-github-token"),
help="A file containing the github token to use",
)
parser.add_argument(
"--github-token",
help="If provided, use this github token instead of the one in `--github-token-file`",
)
parser.add_argument(
"--repository",
default="facebook/buck",
help="The github repository to operate on",
)
parser.add_argument(
"--tap-repository", default="facebook/fb", help="The tap to use for homebrew"
)
parser.add_argument(
"--version",
default=datetime.datetime.now().strftime("%Y.%m.%d.01"),
help=(
"Version to use in git tags and github releases. This is generated "
"by default"
),
)
parser.add_argument(
"--use-existing-release",
action="store_true",
help=(
"If specified, use an existing release (specified by --version), rather "
"than pushing tags and creating a new release"
),
)
parser.add_argument(
"--release-message",
help=(
"If specified, use this for the release message. If not specified, "
"and a new release is created, user will be prompted for a message"
),
)
parser.add_argument(
"--no-prompt-for-message",
help="If set, use a default message rather than prompting for a message",
action="store_false",
dest="prompt_for_message",
)
parser.add_argument(
"--no-build-deb",
dest="build_deb",
action="store_false",
help="Do not build deb packages for this release",
)
parser.add_argument(
"--no-build-homebrew",
dest="build_homebrew",
action="store_false",
help="Do not build homebrew packages for this release",
)
parser.add_argument(
"--no-build-chocolatey",
dest="build_chocolatey",
action="store_false",
help="Do not build chocolatey packages for this release",
)
parser.add_argument(
"--deb-file",
help="Upload this file as the deb for this release. Implies --no-build-deb",
)
parser.add_argument(
"--homebrew-file",
help="Upload this file as the bottle for this release. Implies --no-build-homebrew",
)
parser.add_argument(
"--chocolatey-file",
help="Upload this file as the nupkg for this release. Implies --no-build-chocolatey",
)
parser.add_argument(
"--docker-linux-host",
help="If provided, the docker:port to connect to to build linux images",
)
parser.add_argument(
"--docker-windows-host",
help="If provided, the docker:port to connect to to build windows images",
)
parser.add_argument(
"--docker-windows-memory",
default="4g",
help="The memory argument to pass to docker for windows containers",
)
parser.add_argument(
"--docker-windows-isolation",
default="process",
help="The --isolation= argument for windows docker commands",
)
parser.add_argument(
"--keep-temp-files",
action="store_true",
help="Keep temporary files regardless of success/failure",
)
parser.add_argument(
"--no-upload-assets",
dest="upload_assets",
action="store_false",
help="Do not upload assets",
)
parser.add_argument(
"--homebrew-target-macos-version",
default=TARGET_MACOS_VERSION,
help="The target macos version to use in homebrew specs",
)
parser.add_argument(
"--homebrew-target-macos-version-spec",
default=TARGET_MACOS_VERSION_SPEC,
help="The target macos version spec to use in homebrew specs",
)
parser.add_argument(
"--no-homebrew-push-tap",
dest="homebrew_push_tap",
action="store_false",
help="Do not push the homebrew tap. A manual commit will have to be made",
)
parser.add_argument(
"--no-chocolatey-publish",
dest="chocolatey_publish",
action="store_false",
help="Do not publish to chocolatey's community stream",
)
parser.add_argument(
"--chocolatey-token",
help="If provided, use this chocolatey token instead of the one in `--chocolatey-token-file`",
)
parser.add_argument(
"--chocolatey-token-file",
default=os.path.expanduser("~/.buck-chocolatey-token"),
help="A file containing the chocolatey token to use",
)
parser.add_argument(
"--output-dir",
help=(
"If specified, artifacts will be written to this directory, instead of "
"a temporary one"
),
)
parser.add_argument(
"--homebrew-dir",
help=(
"Where homebrew is (e.g. /usr/local). If not specified, homebrew will be "
"installed in a separate, temporary directory that gets cleaned up after "
"building (unless --keep-temp-files is specified). If --output-dir is "
"specified, homebrew will be installed in a subdirectory there. This can "
"be useful to ensure that tap directories are preserved and can be "
"validated and pushed to github if a first run fails, or if a "
"--no-upload-asset run is done"
),
)
parser.add_argument(
"--insecure-chocolatey-upload",
action="store_true",
help=(
"Do less certificate verification when uploading to chocolatey. "
"This is a workaround for "
"https://github.com/chocolatey/chocolatey.org/issues/584"
),
)
parsed_kwargs = dict(parser.parse_args(args)._get_kwargs())
if parsed_kwargs["deb_file"]:
parsed_kwargs["build_deb"] = False
if parsed_kwargs["homebrew_file"]:
parsed_kwargs["build_homebrew"] = False
if parsed_kwargs["chocolatey_file"]:
parsed_kwargs["build_chocolatey"] = False
return argparse.Namespace(**parsed_kwargs)
def configure_logging():
# Bold message
TTY_LOGGING = " publish_release => \033[1m%(message)s\033[0m"
NOTTY_LOGGING = " publish_release => %(message)s"
msg_format = TTY_LOGGING if sys.stderr.isatty() else NOTTY_LOGGING
# Red message for errors
TTY_ERROR_LOGGING = " publish_release => \033[1;31mERROR: %(message)s\033[0m"
NOTTY_ERROR_LOGGING = " publish_release => ERROR: %(message)s"
error_msg_format = TTY_ERROR_LOGGING if sys.stderr.isatty() else NOTTY_ERROR_LOGGING
class LevelFilter(logging.Filter):
def filter(self, record):
return record.levelno < logging.ERROR
logging.config.dictConfig(
{
"version": 1,
"filters": {"lower_than_error": {"()": LevelFilter}},
"formatters": {
"info": {"format": msg_format},
"error": {"format": error_msg_format},
},
"handlers": {
"info": {
"level": "INFO",
"class": "logging.StreamHandler",
"formatter": "info",
"filters": ["lower_than_error"],
},
"error": {
"level": "ERROR",
"class": "logging.StreamHandler",
"formatter": "error",
},
},
"loggers": {"": {"handlers": ["info", "error"], "level": "INFO"}},
}
)
def validate_repo_upstream(args):
"""Make sure we're in the right repository, not a fork"""
output = subprocess.check_output(
["git", "remote", "get-url", "origin"], encoding="utf-8"
).strip()
if output not in args.valid_git_upstreams:
raise ReleaseException(
"Releases may only be published from the upstream OSS buck repository"
)
def validate_environment(args):
"""Make sure we can build"""
validate_repo_upstream(args)
if args.build_deb:
ret = docker(
args.docker_linux_host,
["info", "-f", "{{.OSType}}"],
check=False,
capture_output=True,
)
host = args.docker_linux_host or "localhost"
if ret.returncode != 0:
raise ReleaseException(
"docker info on linux host {} failed. debs cannot be built", host
)
host_os = ret.stdout.decode("utf-8").strip()
if host_os != "linux":
raise ReleaseException(
"docker info on host {} returned type '{}' not 'linux'. debs cannot be built",
host,
host_os,
)
if args.build_chocolatey:
ret = docker(
args.docker_windows_host,
["info", "-f", "{{.OSType}}"],
check=False,
capture_output=True,
)
host = args.docker_windows_host or "localhost"
if ret.returncode != 0:
raise ReleaseException(
"docker info on windows host {} failed. chocolatey nupkgs cannot be built",
host,
)
host_os = ret.stdout.decode("utf-8").strip()
if host_os != "windows":
raise ReleaseException(
"docker info on host {} returned type '{}' not 'windows'. chocolatey nupkgs cannot be built",
host,
host_os,
)
if args.build_homebrew:
if args.homebrew_dir:
if not os.path.exists(args.homebrew_dir):
raise ReleaseException(
"Specified homebrew path, {}, does not exist", args.homebrew_dir
)
brew_path = os.path.join(args.homebrew_dir, "bin", "brew")
try:
ret = run([brew_path, "--version"])
except Exception:
raise ReleaseException(
"{} --version failed. bottles cannot be created", brew_path
)
def build(args, output_dir, release, github_token, homebrew_dir):
deb_file = args.deb_file
chocolatey_file = args.chocolatey_file
homebrew_file = args.homebrew_file
if args.build_deb:
user = get_current_user(github_token)
releases = get_all_releases(args.repository, github_token)
deb_file = build_deb(
args.repository, release, user, releases, args.docker_linux_host, output_dir
)
if args.build_homebrew:
homebrew_file = build_bottle(
homebrew_dir,
release,
args.repository,
args.tap_repository,
args.homebrew_target_macos_version,
args.homebrew_target_macos_version_spec,
output_dir,
)
if args.build_chocolatey:
chocolatey_file = build_chocolatey(
args.repository,
release,
args.docker_windows_host,
args.docker_windows_memory,
args.docker_windows_isolation,
output_dir,
)
return deb_file, homebrew_file, chocolatey_file
def publish(
args,
release,
github_token,
chocolatey_token,
deb_file,
homebrew_file,
homebrew_dir,
chocolatey_file,
):
if args.upload_assets:
if deb_file:
add_assets(release, github_token, deb_file)
if chocolatey_file:
add_assets(release, github_token, chocolatey_file)
if args.chocolatey_publish:
publish_chocolatey(
chocolatey_file, chocolatey_token, args.insecure_chocolatey_upload
)
if homebrew_file:
add_assets(release, github_token, homebrew_file)
validate_tap(homebrew_dir, args.tap_repository, args.version)
if args.homebrew_push_tap:
publish_tap_changes(homebrew_dir, args.tap_repository, args.version)
else:
log_about_manual_tap_push(args.tap_repository)
def main():
args = parse_args(sys.argv[1:])
configure_logging()
version_tag = "v" + args.version
github_token = (
args.github_token if args.github_token else get_token(args.github_token_file)
)
if args.chocolatey_publish:
chocolatey_token = (
args.chocolatey_token
if args.chocolatey_token
else get_token(args.chocolatey_token_file)
)
else:
chocolatey_token = None
temp_dir = None
temp_homebrew_dir = None
homebrew_file = None
try:
validate_environment(args)
if args.use_existing_release:
release = get_release_for_tag(args.repository, github_token, version_tag)
else:
release = create_new_release(
args.repository,
github_token,
version_tag,
args.release_message,
args.prompt_for_message,
)
if args.output_dir:
output_dir = args.output_dir
if not os.path.exists(output_dir):
logging.info("{} does not exist. Creating it".format(output_dir))
os.makedirs(output_dir, exist_ok=True)
else:
temp_dir = tempfile.mkdtemp()
output_dir = temp_dir
if args.homebrew_dir:
homebrew_dir = args.homebrew_dir
elif args.output_dir:
homebrew_dir = os.path.abspath(
os.path.join(output_dir, "homebrew_" + version_tag)
)
else:
temp_homebrew_dir = tempfile.mkdtemp()
homebrew_dir = temp_homebrew_dir
deb_file, homebrew_file, chocolatey_file = build(
args, output_dir, release, github_token, homebrew_dir
)
publish(
args,
release,
github_token,
chocolatey_token,
deb_file,
homebrew_file,
homebrew_dir,
chocolatey_file,
)
except ReleaseException as e:
logging.error(str(e))
finally:
if not args.keep_temp_files:
def remove(path):
try:
shutil.rmtree(path)
except Exception:
logging.error("Could not remove temp dir at {}".format(path))
if temp_dir:
remove(temp_dir)
if temp_homebrew_dir:
# If the person didn't want to publish, we need to keep this around
if not homebrew_file or args.homebrew_push_tap:
remove(temp_homebrew_dir)
if __name__ == "__main__":
main()
|
|
import numpy as np
from os import name as os_name
import mrptlib
class MRPTIndex(object):
"""
An MRPT index object
"""
def __init__(self, data, shape=None, mmap=False):
"""
Initializes an MRPT index object.
:param data: Input data either as a NxDim numpy ndarray or as a filepath to a binary file containing the data.
:param shape: Shape of the data as a tuple (N, dim). Needs to be specified only if loading the data from a file.
:param mmap: If true, the data is mapped into memory. Has effect only if the data is loaded from a file.
:return:
"""
if isinstance(data, np.ndarray):
if len(data) == 0 or len(data.shape) != 2:
raise ValueError("The data matrix should be non-empty and two-dimensional")
if data.dtype != np.float32:
raise ValueError("The data matrix should have type float32")
if not data.flags['C_CONTIGUOUS'] or not data.flags['ALIGNED']:
raise ValueError("The data matrix has to be C_CONTIGUOUS and ALIGNED")
n_samples, dim = data.shape
elif isinstance(data, str):
if not isinstance(shape, tuple) or len(shape) != 2:
raise ValueError("You must specify the shape of the data as a tuple (N, dim) "
"when loading data from a binary file")
n_samples, dim = shape
elif data is not None:
raise ValueError("Data must be either an ndarray or a filepath")
if mmap and os_name == 'nt':
raise ValueError("Memory mapping is not available on Windows")
if data is not None:
self.index = mrptlib.MrptIndex(data, n_samples, dim, mmap)
self.dim = dim
self.built = False
self.autotuned = False
def _compute_sparsity(self, projection_sparsity):
if projection_sparsity == 'auto':
return 1. / np.sqrt(self.dim)
elif projection_sparsity is None:
return 1
elif not 0 < projection_sparsity <= 1:
raise ValueError("Sparsity should be in (0, 1]")
def build(self, depth, n_trees, projection_sparsity='auto'):
"""
Builds a normal MRPT index.
:param depth: The depth of the trees; should be in the set {1, 2, ..., floor(log2(n))}.
:param n_trees: The number of trees used in the index.
:param projection_sparsity: Expected ratio of non-zero components in a projection matrix.
:return:
"""
if self.built:
raise RuntimeError("The index has already been built")
projection_sparsity = self._compute_sparsity(projection_sparsity)
self.index.build(n_trees, depth, projection_sparsity)
self.built = True
def build_autotune(self, target_recall, Q, k, trees_max=-1, depth_min=-1, depth_max=-1,
votes_max=-1, projection_sparsity='auto', shape=None):
"""
Builds an autotuned MRPT index.
:param target_recall: The target recall level (float) or None if the target recall level
is set later using the subset function.
:param Q: A matrix of test queries used for tuning, one per row.
:param k: Number of nearest neighbors searched for.
:param trees_max: Maximum number of trees grown; can be used to control the building time
and memory usage; a default value -1 sets this to min(sqrt(n), 1000).
:param depth_min: Minimum depth of trees considered when searching for optimal parameters;
a default value -1 sets this to min(log2(n), 5).
:param depth_max: Maximum depth of trees considered when searching for optimal parameters;
a default value -1 sets this to log2(n) - 4:
:param votes_max: Maximum number of votes considered when searching for optimal parameters;
a default value -1 sets this to max(trees / 10, 10).
:param projection_sparsity: Expected ratio of non-zero components in a projection matrix
:param shape: Shape of the test query matrix as a tuple (n_test, dim). Needs to be specified
only if loading the test query matrix from a file.
:return:
"""
if self.built:
raise RuntimeError("The index has already been built")
if isinstance(Q, np.ndarray):
if len(Q) == 0 or len(Q.shape) != 2:
raise ValueError("The test query matrix should be non-empty and two-dimensional")
if Q.dtype != np.float32:
raise ValueError("The test query matrix should have type float32")
if not Q.flags['C_CONTIGUOUS'] or not Q.flags['ALIGNED']:
raise ValueError("The test query matrix has to be C_CONTIGUOUS and ALIGNED")
n_test, dim = Q.shape
elif isinstance(Q, str):
if not isinstance(shape, tuple) or len(shape) != 2:
raise ValueError("You must specify the shape of the data as a tuple (n_test, dim) "
"when loading the test query matrix from a binary file")
n_test, dim = shape
else:
raise ValueError("The test query matrix must be either an ndarray or a filepath")
if dim != self.dim:
raise ValueError("The test query matrix should have the same number of columns as the data matrix")
self.built = target_recall is not None
self.autotuned = True
if target_recall is None:
target_recall = -1
projection_sparsity = self._compute_sparsity(projection_sparsity)
self.index.build_autotune(
target_recall, Q, n_test, k, trees_max, depth_min, depth_max, votes_max, projection_sparsity)
def build_autotune_sample(self, target_recall, k, n_test=100, trees_max=-1,
depth_min=-1, depth_max=-1, votes_max=-1, projection_sparsity='auto'):
"""
Builds an autotuned MRPT index.
:param target_recall: The target recall level (float) or None if the target recall level
is set later using the subset function.
:param k: Number of nearest neighbors searched for.
:param n_test: Number of test queries to sample.
:param trees_max: Maximum number of trees grown; can be used to control the building time
and memory usage; a default value -1 sets this to min(sqrt(n), 1000).
:param depth_min: Minimum depth of trees considered when searching for optimal parameters;
a default value -1 sets this to min(log2(n), 5).
:param depth_max: Maximum depth of trees considered when searching for optimal parameters;
a default value -1 sets this to log2(n) - 4:
:param votes_max: Maximum number of votes considered when searching for optimal parameters;
a default value -1 sets this to max(trees / 10, 10).
:param projection_sparsity: Expected ratio of non-zero components in a projection matrix
:return:
"""
if self.built:
raise RuntimeError("The index has already been built")
self.built = target_recall is not None
self.autotuned = True
if target_recall is None:
target_recall = -1
projection_sparsity = self._compute_sparsity(projection_sparsity)
self.index.build_autotune_sample(
target_recall, n_test, k, trees_max, depth_min, depth_max, votes_max, projection_sparsity)
def subset(self, target_recall):
"""
Create a new index for a specified target recall level by copying trees from an autotuned index grown
without a prespecified recall level.
:param target_recall: The target recall level.
:return: A new MRPT object autotuned for the specified recall level.
"""
if not self.autotuned:
raise RuntimeError("Only an autotuned index can be subset")
new_index = MRPTIndex(None)
new_index.index = self.index.subset(target_recall)
new_index.dim = self.dim
new_index.built = True
new_index.autotuned = True
return new_index
def parameters(self):
"""
Get the parameters of the index.
:return: A dictionary of the hyperparameters of the index.
"""
n_trees, depth, votes, k, qtime, recall = self.index.parameters()
if self.index.is_autotuned():
return {'n_trees': n_trees, 'depth': depth, 'k': k, 'votes': votes,
'estimated_qtime': qtime, 'estimated_recall': recall}
return {'n_trees': n_trees, 'depth': depth}
def save(self, path):
"""
Saves the MRPT index to a file.
:param path: Filepath to the location of the saved index.
:return:
"""
if not self.built:
raise RuntimeError("Cannot save index before building")
self.index.save(path)
def load(self, path):
"""
Loads the MRPT index from a file.
:param path: Filepath to the location of the index.
:return:
"""
self.index.load(path)
self.built = True
self.autotuned = self.index.is_autotuned()
def ann(self, q, k=None, votes_required=None, return_distances=False):
"""
Performs an approximate nearest neighbor query for a single query vector or multiple query vectors
in parallel. The queries are given as a numpy vector or a numpy matrix where each row contains a query.
:param q: The query object. Can be either a single query vector or a matrix with one query vector per row.
:param k: The number of nearest neighbors to be returned, has to be specified if the index has not been autotuned.
:param votes_required: The number of votes an object has to get to be included in the linear search part of the query;
has to be specified if the index has not been autotuned.
:param return_distances: Whether the distances are also returned.
:return: If return_distances is false, returns a vector or matrix of indices of the approximate
nearest neighbors in the original input data for the corresponding query. Otherwise,
returns a tuple where the first element contains the nearest neighbors and the second
element contains their distances to the query.
"""
if not self.built:
raise RuntimeError("Cannot query before building index")
if q.dtype != np.float32:
raise ValueError("The query matrix should have type float32")
if not self.autotuned and (k is None or votes_required is None):
raise ValueError("k and votes_required must be set if the index has not been autotuned")
if k is None:
k = -1
if votes_required is None:
votes_required = -1
return self.index.ann(q, k, votes_required, return_distances)
def exact_search(self, q, k, return_distances=False):
"""
Performs an exact nearest neighbor query for a single query several queries in parallel. The queries are
given as a numpy matrix where each row contains a query. Useful for measuring accuracy.
:param q: The query object. Can be either a single query vector or a matrix with one query vector per row.
:param k: The number of nearest neighbors to return.
:param return_distances: Whether the distances are also returned.
:return: If return_distances is false, returns a vector or matrix of indices of the exact
nearest neighbors in the original input data for the corresponding query. Otherwise,
returns a tuple where the first element contains the nearest neighbors and the second
element contains their distances to the query.
"""
if q.dtype != np.float32:
raise ValueError("The query matrix should have type float32")
if k < 1:
raise ValueError("k must be positive")
return self.index.exact_search(q, k, return_distances)
|
|
import hashlib
from collections import OrderedDict
from decimal import Decimal as D
from django.conf import settings
from django.db import models
from django.db.models import Sum
from django.utils import timezone
from django.utils.datastructures import SortedDict
from django.utils.encoding import python_2_unicode_compatible
from django.utils.timezone import now
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import pgettext_lazy
from oscar.core.compat import AUTH_USER_MODEL
from oscar.core.utils import get_default_currency
from oscar.models.fields import AutoSlugField
from . import exceptions
@python_2_unicode_compatible
class AbstractOrder(models.Model):
"""
The main order model
"""
number = models.CharField(
_("Order number"), max_length=128, db_index=True, unique=True)
# We track the site that each order is placed within
site = models.ForeignKey(
'sites.Site', verbose_name=_("Site"), null=True,
on_delete=models.SET_NULL)
basket = models.ForeignKey(
'basket.Basket', verbose_name=_("Basket"),
null=True, blank=True, on_delete=models.SET_NULL)
# Orders can be placed without the user authenticating so we don't always
# have a customer ID.
user = models.ForeignKey(
AUTH_USER_MODEL, related_name='orders', null=True, blank=True,
verbose_name=_("User"), on_delete=models.SET_NULL)
# Billing address is not always required (eg paying by gift card)
billing_address = models.ForeignKey(
'order.BillingAddress', null=True, blank=True,
verbose_name=_("Billing Address"),
on_delete=models.SET_NULL)
# Total price looks like it could be calculated by adding up the
# prices of the associated lines, but in some circumstances extra
# order-level charges are added and so we need to store it separately
currency = models.CharField(
_("Currency"), max_length=12, default=get_default_currency)
total_incl_tax = models.DecimalField(
_("Order total (inc. tax)"), decimal_places=2, max_digits=12)
total_excl_tax = models.DecimalField(
_("Order total (excl. tax)"), decimal_places=2, max_digits=12)
# Shipping charges
shipping_incl_tax = models.DecimalField(
_("Shipping charge (inc. tax)"), decimal_places=2, max_digits=12,
default=0)
shipping_excl_tax = models.DecimalField(
_("Shipping charge (excl. tax)"), decimal_places=2, max_digits=12,
default=0)
# Not all lines are actually shipped (such as downloads), hence shipping
# address is not mandatory.
shipping_address = models.ForeignKey(
'order.ShippingAddress', null=True, blank=True,
verbose_name=_("Shipping Address"),
on_delete=models.SET_NULL)
shipping_method = models.CharField(
_("Shipping method"), max_length=128, blank=True)
# Identifies shipping code
shipping_code = models.CharField(blank=True, max_length=128, default="")
# Use this field to indicate that an order is on hold / awaiting payment
status = models.CharField(_("Status"), max_length=100, blank=True)
# TODO Remove the max_length kwarg when support for Django 1.7 is dropped
guest_email = models.EmailField(_("Guest email address"), max_length=75,
blank=True)
# Index added to this field for reporting
date_placed = models.DateTimeField(db_index=True)
#: Order status pipeline. This should be a dict where each (key, value) #:
#: corresponds to a status and a list of possible statuses that can follow
#: that one.
pipeline = getattr(settings, 'OSCAR_ORDER_STATUS_PIPELINE', {})
#: Order status cascade pipeline. This should be a dict where each (key,
#: value) pair corresponds to an *order* status and the corresponding
#: *line* status that needs to be set when the order is set to the new
#: status
cascade = getattr(settings, 'OSCAR_ORDER_STATUS_CASCADE', {})
@classmethod
def all_statuses(cls):
"""
Return all possible statuses for an order
"""
return list(cls.pipeline.keys())
def available_statuses(self):
"""
Return all possible statuses that this order can move to
"""
return self.pipeline.get(self.status, ())
def set_status(self, new_status):
"""
Set a new status for this order.
If the requested status is not valid, then ``InvalidOrderStatus`` is
raised.
"""
if new_status == self.status:
return
if new_status not in self.available_statuses():
raise exceptions.InvalidOrderStatus(
_("'%(new_status)s' is not a valid status for order %(number)s"
" (current status: '%(status)s')")
% {'new_status': new_status,
'number': self.number,
'status': self.status})
self.status = new_status
if new_status in self.cascade:
for line in self.lines.all():
line.status = self.cascade[self.status]
line.save()
self.save()
set_status.alters_data = True
@property
def is_anonymous(self):
# It's possible for an order to be placed by a customer who then
# deletes their profile. Hence, we need to check that a guest email is
# set.
return self.user is None and bool(self.guest_email)
@property
def basket_total_before_discounts_incl_tax(self):
"""
Return basket total including tax but before discounts are applied
"""
total = D('0.00')
for line in self.lines.all():
total += line.line_price_before_discounts_incl_tax
return total
@property
def basket_total_before_discounts_excl_tax(self):
"""
Return basket total excluding tax but before discounts are applied
"""
total = D('0.00')
for line in self.lines.all():
total += line.line_price_before_discounts_excl_tax
return total
@property
def basket_total_incl_tax(self):
"""
Return basket total including tax
"""
return self.total_incl_tax - self.shipping_incl_tax
@property
def basket_total_excl_tax(self):
"""
Return basket total excluding tax
"""
return self.total_excl_tax - self.shipping_excl_tax
@property
def total_before_discounts_incl_tax(self):
return (self.basket_total_before_discounts_incl_tax +
self.shipping_incl_tax)
@property
def total_before_discounts_excl_tax(self):
return (self.basket_total_before_discounts_excl_tax +
self.shipping_excl_tax)
@property
def total_discount_incl_tax(self):
"""
The amount of discount this order received
"""
discount = D('0.00')
for line in self.lines.all():
discount += line.discount_incl_tax
return discount
@property
def total_discount_excl_tax(self):
discount = D('0.00')
for line in self.lines.all():
discount += line.discount_excl_tax
return discount
@property
def total_tax(self):
return self.total_incl_tax - self.total_excl_tax
@property
def num_lines(self):
return self.lines.count()
@property
def num_items(self):
"""
Returns the number of items in this order.
"""
num_items = 0
for line in self.lines.all():
num_items += line.quantity
return num_items
@property
def shipping_tax(self):
return self.shipping_incl_tax - self.shipping_excl_tax
@property
def shipping_status(self):
"""Return the last complete shipping event for this order."""
# As safeguard against identical timestamps, also sort by the primary
# key. It's not recommended to rely on this behaviour, but in practice
# reasonably safe if PKs are not manually set.
events = self.shipping_events.order_by('-date_created', '-pk').all()
if not len(events):
return ''
# Collect all events by event-type
event_map = OrderedDict()
for event in events:
event_name = event.event_type.name
if event_name not in event_map:
event_map[event_name] = []
event_map[event_name].extend(list(event.line_quantities.all()))
# Determine last complete event
status = _("In progress")
for event_name, event_line_quantities in event_map.items():
if self._is_event_complete(event_line_quantities):
return event_name
return status
@property
def has_shipping_discounts(self):
return len(self.shipping_discounts) > 0
@property
def shipping_before_discounts_incl_tax(self):
# We can construct what shipping would have been before discounts by
# adding the discounts back onto the final shipping charge.
total = D('0.00')
for discount in self.shipping_discounts:
total += discount.amount
return self.shipping_incl_tax + total
def _is_event_complete(self, event_quantities):
# Form map of line to quantity
event_map = {}
for event_quantity in event_quantities:
line_id = event_quantity.line_id
event_map.setdefault(line_id, 0)
event_map[line_id] += event_quantity.quantity
for line in self.lines.all():
if event_map.get(line.pk, 0) != line.quantity:
return False
return True
class Meta:
abstract = True
app_label = 'order'
ordering = ['-date_placed']
verbose_name = _("Order")
verbose_name_plural = _("Orders")
def __str__(self):
return u"#%s" % (self.number,)
def verification_hash(self):
key = '%s%s' % (self.number, settings.SECRET_KEY)
hash = hashlib.md5(key.encode('utf8'))
return hash.hexdigest()
@property
def email(self):
if not self.user:
return self.guest_email
return self.user.email
@property
def basket_discounts(self):
# This includes both offer- and voucher- discounts. For orders we
# don't need to treat them differently like we do for baskets.
return self.discounts.filter(
category=AbstractOrderDiscount.BASKET)
@property
def shipping_discounts(self):
return self.discounts.filter(
category=AbstractOrderDiscount.SHIPPING)
@property
def post_order_actions(self):
return self.discounts.filter(
category=AbstractOrderDiscount.DEFERRED)
def set_date_placed_default(self):
if self.date_placed is None:
self.date_placed = now()
def save(self, *args, **kwargs):
# Ensure the date_placed field works as it auto_now_add was set. But
# this gives us the ability to set the date_placed explicitly (which is
# useful when importing orders from another system).
self.set_date_placed_default()
super(AbstractOrder, self).save(*args, **kwargs)
@python_2_unicode_compatible
class AbstractOrderNote(models.Model):
"""
A note against an order.
This are often used for audit purposes too. IE, whenever an admin
makes a change to an order, we create a note to record what happened.
"""
order = models.ForeignKey('order.Order', related_name="notes",
verbose_name=_("Order"))
# These are sometimes programatically generated so don't need a
# user everytime
user = models.ForeignKey(AUTH_USER_MODEL, null=True,
verbose_name=_("User"))
# We allow notes to be classified although this isn't always needed
INFO, WARNING, ERROR, SYSTEM = 'Info', 'Warning', 'Error', 'System'
note_type = models.CharField(_("Note Type"), max_length=128, blank=True)
message = models.TextField(_("Message"))
date_created = models.DateTimeField(_("Date Created"), auto_now_add=True)
date_updated = models.DateTimeField(_("Date Updated"), auto_now=True)
# Notes can only be edited for 5 minutes after being created
editable_lifetime = 300
class Meta:
abstract = True
app_label = 'order'
verbose_name = _("Order Note")
verbose_name_plural = _("Order Notes")
def __str__(self):
return u"'%s' (%s)" % (self.message[0:50], self.user)
def is_editable(self):
if self.note_type == self.SYSTEM:
return False
delta = timezone.now() - self.date_updated
return delta.seconds < self.editable_lifetime
@python_2_unicode_compatible
class AbstractCommunicationEvent(models.Model):
"""
An order-level event involving a communication to the customer, such
as an confirmation email being sent.
"""
order = models.ForeignKey(
'order.Order', related_name="communication_events",
verbose_name=_("Order"))
event_type = models.ForeignKey(
'customer.CommunicationEventType', verbose_name=_("Event Type"))
date_created = models.DateTimeField(_("Date"), auto_now_add=True)
class Meta:
abstract = True
app_label = 'order'
verbose_name = _("Communication Event")
verbose_name_plural = _("Communication Events")
ordering = ['-date_created']
def __str__(self):
return _("'%(type)s' event for order #%(number)s") \
% {'type': self.event_type.name, 'number': self.order.number}
# LINES
@python_2_unicode_compatible
class AbstractLine(models.Model):
"""
An order line
"""
order = models.ForeignKey(
'order.Order', related_name='lines', verbose_name=_("Order"))
# PARTNER INFORMATION
# -------------------
# We store the partner and various detail their SKU and the title for cases
# where the product has been deleted from the catalogue (but we still need
# the data for reporting). We also store the partner name in case the
# partner gets deleted at a later date.
partner = models.ForeignKey(
'partner.Partner', related_name='order_lines', blank=True, null=True,
on_delete=models.SET_NULL, verbose_name=_("Partner"))
partner_name = models.CharField(
_("Partner name"), max_length=128, blank=True)
partner_sku = models.CharField(_("Partner SKU"), max_length=128)
# A line reference is the ID that a partner uses to represent this
# particular line (it's not the same as a SKU).
partner_line_reference = models.CharField(
_("Partner reference"), max_length=128, blank=True,
help_text=_("This is the item number that the partner uses "
"within their system"))
partner_line_notes = models.TextField(
_("Partner Notes"), blank=True)
# We keep a link to the stockrecord used for this line which allows us to
# update stocklevels when it ships
stockrecord = models.ForeignKey(
'partner.StockRecord', on_delete=models.SET_NULL, blank=True,
null=True, verbose_name=_("Stock record"))
# PRODUCT INFORMATION
# -------------------
# We don't want any hard links between orders and the products table so we
# allow this link to be NULLable.
product = models.ForeignKey(
'catalogue.Product', on_delete=models.SET_NULL, blank=True, null=True,
verbose_name=_("Product"))
title = models.CharField(
pgettext_lazy(u"Product title", u"Title"), max_length=255)
# UPC can be null because it's usually set as the product's UPC, and that
# can be null as well
upc = models.CharField(_("UPC"), max_length=128, blank=True, null=True)
quantity = models.PositiveIntegerField(_("Quantity"), default=1)
# REPORTING INFORMATION
# ---------------------
# Price information (these fields are actually redundant as the information
# can be calculated from the LinePrice models
line_price_incl_tax = models.DecimalField(
_("Price (inc. tax)"), decimal_places=2, max_digits=12)
line_price_excl_tax = models.DecimalField(
_("Price (excl. tax)"), decimal_places=2, max_digits=12)
# Price information before discounts are applied
line_price_before_discounts_incl_tax = models.DecimalField(
_("Price before discounts (inc. tax)"),
decimal_places=2, max_digits=12)
line_price_before_discounts_excl_tax = models.DecimalField(
_("Price before discounts (excl. tax)"),
decimal_places=2, max_digits=12)
# Cost price (the price charged by the fulfilment partner for this
# product).
unit_cost_price = models.DecimalField(
_("Unit Cost Price"), decimal_places=2, max_digits=12, blank=True,
null=True)
# Normal site price for item (without discounts)
unit_price_incl_tax = models.DecimalField(
_("Unit Price (inc. tax)"), decimal_places=2, max_digits=12,
blank=True, null=True)
unit_price_excl_tax = models.DecimalField(
_("Unit Price (excl. tax)"), decimal_places=2, max_digits=12,
blank=True, null=True)
# Retail price at time of purchase
unit_retail_price = models.DecimalField(
_("Unit Retail Price"), decimal_places=2, max_digits=12,
blank=True, null=True)
# Partners often want to assign some status to each line to help with their
# own business processes.
status = models.CharField(_("Status"), max_length=255, blank=True)
# Estimated dispatch date - should be set at order time
est_dispatch_date = models.DateField(
_("Estimated Dispatch Date"), blank=True, null=True)
#: Order status pipeline. This should be a dict where each (key, value)
#: corresponds to a status and the possible statuses that can follow that
#: one.
pipeline = getattr(settings, 'OSCAR_LINE_STATUS_PIPELINE', {})
class Meta:
abstract = True
app_label = 'order'
verbose_name = _("Order Line")
verbose_name_plural = _("Order Lines")
def __str__(self):
if self.product:
title = self.product.title
else:
title = _('<missing product>')
return _("Product '%(name)s', quantity '%(qty)s'") % {
'name': title, 'qty': self.quantity}
@classmethod
def all_statuses(cls):
"""
Return all possible statuses for an order line
"""
return list(cls.pipeline.keys())
def available_statuses(self):
"""
Return all possible statuses that this order line can move to
"""
return self.pipeline.get(self.status, ())
def set_status(self, new_status):
"""
Set a new status for this line
If the requested status is not valid, then ``InvalidLineStatus`` is
raised.
"""
if new_status == self.status:
return
if new_status not in self.available_statuses():
raise exceptions.InvalidLineStatus(
_("'%(new_status)s' is not a valid status (current status:"
" '%(status)s')")
% {'new_status': new_status, 'status': self.status})
self.status = new_status
self.save()
set_status.alters_data = True
@property
def category(self):
"""
Used by Google analytics tracking
"""
return None
@property
def description(self):
"""
Returns a description of this line including details of any
line attributes.
"""
desc = self.title
ops = []
for attribute in self.attributes.all():
ops.append("%s = '%s'" % (attribute.type, attribute.value))
if ops:
desc = "%s (%s)" % (desc, ", ".join(ops))
return desc
@property
def discount_incl_tax(self):
return self.line_price_before_discounts_incl_tax \
- self.line_price_incl_tax
@property
def discount_excl_tax(self):
return self.line_price_before_discounts_excl_tax \
- self.line_price_excl_tax
@property
def line_price_tax(self):
return self.line_price_incl_tax - self.line_price_excl_tax
@property
def unit_price_tax(self):
return self.unit_price_incl_tax - self.unit_price_excl_tax
# Shipping status helpers
@property
def shipping_status(self):
"""
Returns a string summary of the shipping status of this line
"""
status_map = self.shipping_event_breakdown
if not status_map:
return ''
events = []
last_complete_event_name = None
for event_dict in reversed(list(status_map.values())):
if event_dict['quantity'] == self.quantity:
events.append(event_dict['name'])
last_complete_event_name = event_dict['name']
else:
events.append("%s (%d/%d items)" % (
event_dict['name'], event_dict['quantity'],
self.quantity))
if last_complete_event_name == list(status_map.values())[0]['name']:
return last_complete_event_name
return ', '.join(events)
def is_shipping_event_permitted(self, event_type, quantity):
"""
Test whether a shipping event with the given quantity is permitted
This method should normally be overriden to ensure that the
prerequisite shipping events have been passed for this line.
"""
# Note, this calculation is simplistic - normally, you will also need
# to check if previous shipping events have occurred. Eg, you can't
# return lines until they have been shipped.
current_qty = self.shipping_event_quantity(event_type)
return (current_qty + quantity) <= self.quantity
def shipping_event_quantity(self, event_type):
"""
Return the quantity of this line that has been involved in a shipping
event of the passed type.
"""
result = self.shipping_event_quantities.filter(
event__event_type=event_type).aggregate(Sum('quantity'))
if result['quantity__sum'] is None:
return 0
else:
return result['quantity__sum']
def has_shipping_event_occurred(self, event_type, quantity=None):
"""
Test whether this line has passed a given shipping event
"""
if not quantity:
quantity = self.quantity
return self.shipping_event_quantity(event_type) == quantity
def get_event_quantity(self, event):
"""
Fetches the ShippingEventQuantity instance for this line
Exists as a separate method so it can be overridden to avoid
the DB query that's caused by get().
"""
return event.line_quantities.get(line=self)
@property
def shipping_event_breakdown(self):
"""
Returns a dict of shipping events that this line has been through
"""
status_map = SortedDict()
for event in self.shipping_events.all():
event_type = event.event_type
event_name = event_type.name
event_quantity = self.get_event_quantity(event).quantity
if event_name in status_map:
status_map[event_name]['quantity'] += event_quantity
else:
status_map[event_name] = {
'event_type': event_type,
'name': event_name,
'quantity': event_quantity
}
return status_map
# Payment event helpers
def is_payment_event_permitted(self, event_type, quantity):
"""
Test whether a payment event with the given quantity is permitted.
Allow each payment event type to occur only once per quantity.
"""
current_qty = self.payment_event_quantity(event_type)
return (current_qty + quantity) <= self.quantity
def payment_event_quantity(self, event_type):
"""
Return the quantity of this line that has been involved in a payment
event of the passed type.
"""
result = self.payment_event_quantities.filter(
event__event_type=event_type).aggregate(Sum('quantity'))
if result['quantity__sum'] is None:
return 0
else:
return result['quantity__sum']
@property
def is_product_deleted(self):
return self.product is None
def is_available_to_reorder(self, basket, strategy):
"""
Test if this line can be re-ordered using the passed strategy and
basket
"""
if not self.product:
return False, (_("'%(title)s' is no longer available") %
{'title': self.title})
try:
basket_line = basket.lines.get(product=self.product)
except basket.lines.model.DoesNotExist:
desired_qty = self.quantity
else:
desired_qty = basket_line.quantity + self.quantity
result = strategy.fetch_for_product(self.product)
is_available, reason = result.availability.is_purchase_permitted(
quantity=desired_qty)
if not is_available:
return False, reason
return True, None
@python_2_unicode_compatible
class AbstractLineAttribute(models.Model):
"""
An attribute of a line
"""
line = models.ForeignKey(
'order.Line', related_name='attributes',
verbose_name=_("Line"))
option = models.ForeignKey(
'catalogue.Option', null=True, on_delete=models.SET_NULL,
related_name="line_attributes", verbose_name=_("Option"))
type = models.CharField(_("Type"), max_length=128)
value = models.CharField(_("Value"), max_length=255)
class Meta:
abstract = True
app_label = 'order'
verbose_name = _("Line Attribute")
verbose_name_plural = _("Line Attributes")
def __str__(self):
return "%s = %s" % (self.type, self.value)
@python_2_unicode_compatible
class AbstractLinePrice(models.Model):
"""
For tracking the prices paid for each unit within a line.
This is necessary as offers can lead to units within a line
having different prices. For example, one product may be sold at
50% off as it's part of an offer while the remainder are full price.
"""
order = models.ForeignKey(
'order.Order', related_name='line_prices', verbose_name=_("Option"))
line = models.ForeignKey(
'order.Line', related_name='prices', verbose_name=_("Line"))
quantity = models.PositiveIntegerField(_("Quantity"), default=1)
price_incl_tax = models.DecimalField(
_("Price (inc. tax)"), decimal_places=2, max_digits=12)
price_excl_tax = models.DecimalField(
_("Price (excl. tax)"), decimal_places=2, max_digits=12)
shipping_incl_tax = models.DecimalField(
_("Shiping (inc. tax)"), decimal_places=2, max_digits=12, default=0)
shipping_excl_tax = models.DecimalField(
_("Shipping (excl. tax)"), decimal_places=2, max_digits=12, default=0)
class Meta:
abstract = True
app_label = 'order'
ordering = ('id',)
verbose_name = _("Line Price")
verbose_name_plural = _("Line Prices")
def __str__(self):
return _("Line '%(number)s' (quantity %(qty)d) price %(price)s") % {
'number': self.line,
'qty': self.quantity,
'price': self.price_incl_tax}
# PAYMENT EVENTS
@python_2_unicode_compatible
class AbstractPaymentEventType(models.Model):
"""
Payment event types are things like 'Paid', 'Failed', 'Refunded'.
These are effectively the transaction types.
"""
name = models.CharField(_("Name"), max_length=128, unique=True)
code = AutoSlugField(_("Code"), max_length=128, unique=True,
populate_from='name')
class Meta:
abstract = True
app_label = 'order'
verbose_name = _("Payment Event Type")
verbose_name_plural = _("Payment Event Types")
ordering = ('name', )
def __str__(self):
return self.name
@python_2_unicode_compatible
class AbstractPaymentEvent(models.Model):
"""
A payment event for an order
For example:
* All lines have been paid for
* 2 lines have been refunded
"""
order = models.ForeignKey(
'order.Order', related_name='payment_events',
verbose_name=_("Order"))
amount = models.DecimalField(
_("Amount"), decimal_places=2, max_digits=12)
# The reference should refer to the transaction ID of the payment gateway
# that was used for this event.
reference = models.CharField(
_("Reference"), max_length=128, blank=True)
lines = models.ManyToManyField(
'order.Line', through='PaymentEventQuantity',
verbose_name=_("Lines"))
event_type = models.ForeignKey(
'order.PaymentEventType', verbose_name=_("Event Type"))
# Allow payment events to be linked to shipping events. Often a shipping
# event will trigger a payment event and so we can use this FK to capture
# the relationship.
shipping_event = models.ForeignKey(
'order.ShippingEvent', related_name='payment_events',
null=True)
date_created = models.DateTimeField(_("Date created"), auto_now_add=True)
class Meta:
abstract = True
app_label = 'order'
verbose_name = _("Payment Event")
verbose_name_plural = _("Payment Events")
ordering = ['-date_created']
def __str__(self):
return _("Payment event for order %s") % self.order
def num_affected_lines(self):
return self.lines.all().count()
class PaymentEventQuantity(models.Model):
"""
A "through" model linking lines to payment events
"""
event = models.ForeignKey(
'order.PaymentEvent', related_name='line_quantities',
verbose_name=_("Event"))
line = models.ForeignKey(
'order.Line', related_name="payment_event_quantities",
verbose_name=_("Line"))
quantity = models.PositiveIntegerField(_("Quantity"))
class Meta:
app_label = 'order'
verbose_name = _("Payment Event Quantity")
verbose_name_plural = _("Payment Event Quantities")
unique_together = ('event', 'line')
# SHIPPING EVENTS
@python_2_unicode_compatible
class AbstractShippingEvent(models.Model):
"""
An event is something which happens to a group of lines such as
1 item being dispatched.
"""
order = models.ForeignKey(
'order.Order', related_name='shipping_events', verbose_name=_("Order"))
lines = models.ManyToManyField(
'order.Line', related_name='shipping_events',
through='ShippingEventQuantity', verbose_name=_("Lines"))
event_type = models.ForeignKey(
'order.ShippingEventType', verbose_name=_("Event Type"))
notes = models.TextField(
_("Event notes"), blank=True,
help_text=_("This could be the dispatch reference, or a "
"tracking number"))
date_created = models.DateTimeField(_("Date Created"), auto_now_add=True)
class Meta:
abstract = True
app_label = 'order'
verbose_name = _("Shipping Event")
verbose_name_plural = _("Shipping Events")
ordering = ['-date_created']
def __str__(self):
return _("Order #%(number)s, type %(type)s") % {
'number': self.order.number,
'type': self.event_type}
def num_affected_lines(self):
return self.lines.count()
@python_2_unicode_compatible
class ShippingEventQuantity(models.Model):
"""
A "through" model linking lines to shipping events.
This exists to track the quantity of a line that is involved in a
particular shipping event.
"""
event = models.ForeignKey(
'order.ShippingEvent', related_name='line_quantities',
verbose_name=_("Event"))
line = models.ForeignKey(
'order.Line', related_name="shipping_event_quantities",
verbose_name=_("Line"))
quantity = models.PositiveIntegerField(_("Quantity"))
class Meta:
app_label = 'order'
verbose_name = _("Shipping Event Quantity")
verbose_name_plural = _("Shipping Event Quantities")
unique_together = ('event', 'line')
def save(self, *args, **kwargs):
# Default quantity to full quantity of line
if not self.quantity:
self.quantity = self.line.quantity
# Ensure we don't violate quantities constraint
if not self.line.is_shipping_event_permitted(
self.event.event_type, self.quantity):
raise exceptions.InvalidShippingEvent
super(ShippingEventQuantity, self).save(*args, **kwargs)
def __str__(self):
return _("%(product)s - quantity %(qty)d") % {
'product': self.line.product,
'qty': self.quantity}
@python_2_unicode_compatible
class AbstractShippingEventType(models.Model):
"""
A type of shipping/fulfillment event
Eg: 'Shipped', 'Cancelled', 'Returned'
"""
# Name is the friendly description of an event
name = models.CharField(_("Name"), max_length=255, unique=True)
# Code is used in forms
code = AutoSlugField(_("Code"), max_length=128, unique=True,
populate_from='name')
class Meta:
abstract = True
app_label = 'order'
verbose_name = _("Shipping Event Type")
verbose_name_plural = _("Shipping Event Types")
ordering = ('name', )
def __str__(self):
return self.name
# DISCOUNTS
@python_2_unicode_compatible
class AbstractOrderDiscount(models.Model):
"""
A discount against an order.
Normally only used for display purposes so an order can be listed with
discounts displayed separately even though in reality, the discounts are
applied at the line level.
This has evolved to be a slightly misleading class name as this really
track benefit applications which aren't necessarily discounts.
"""
order = models.ForeignKey(
'order.Order', related_name="discounts", verbose_name=_("Order"))
# We need to distinguish between basket discounts, shipping discounts and
# 'deferred' discounts.
BASKET, SHIPPING, DEFERRED = "Basket", "Shipping", "Deferred"
CATEGORY_CHOICES = (
(BASKET, _(BASKET)),
(SHIPPING, _(SHIPPING)),
(DEFERRED, _(DEFERRED)),
)
category = models.CharField(
_("Discount category"), default=BASKET, max_length=64,
choices=CATEGORY_CHOICES)
offer_id = models.PositiveIntegerField(
_("Offer ID"), blank=True, null=True)
offer_name = models.CharField(
_("Offer name"), max_length=128, db_index=True, blank=True)
voucher_id = models.PositiveIntegerField(
_("Voucher ID"), blank=True, null=True)
voucher_code = models.CharField(
_("Code"), max_length=128, db_index=True, blank=True)
frequency = models.PositiveIntegerField(_("Frequency"), null=True)
amount = models.DecimalField(
_("Amount"), decimal_places=2, max_digits=12, default=0)
# Post-order offer applications can return a message to indicate what
# action was taken after the order was placed.
message = models.TextField(blank=True)
@property
def is_basket_discount(self):
return self.category == self.BASKET
@property
def is_shipping_discount(self):
return self.category == self.SHIPPING
@property
def is_post_order_action(self):
return self.category == self.DEFERRED
class Meta:
abstract = True
app_label = 'order'
verbose_name = _("Order Discount")
verbose_name_plural = _("Order Discounts")
def save(self, **kwargs):
if self.offer_id and not self.offer_name:
offer = self.offer
if offer:
self.offer_name = offer.name
if self.voucher_id and not self.voucher_code:
voucher = self.voucher
if voucher:
self.voucher_code = voucher.code
super(AbstractOrderDiscount, self).save(**kwargs)
def __str__(self):
return _("Discount of %(amount)r from order %(order)s") % {
'amount': self.amount, 'order': self.order}
@property
def offer(self):
Offer = models.get_model('offer', 'ConditionalOffer')
try:
return Offer.objects.get(id=self.offer_id)
except Offer.DoesNotExist:
return None
@property
def voucher(self):
Voucher = models.get_model('voucher', 'Voucher')
try:
return Voucher.objects.get(id=self.voucher_id)
except Voucher.DoesNotExist:
return None
def description(self):
if self.voucher_code:
return self.voucher_code
return self.offer_name or u""
|
|
from __future__ import absolute_import
import os
import numpy as np
import scipy
from scipy.misc import logsumexp
from scipy.special import gammaln, beta
from .dirichlet import log_dirichlet_density
from scipy.integrate import simps
from numpy import newaxis as na
import pypolyagamma as ppg
def initialize_polya_gamma_samplers():
if "OMP_NUM_THREADS" in os.environ:
num_threads = int(os.environ["OMP_NUM_THREADS"])
else:
num_threads = ppg.get_omp_num_threads()
assert num_threads > 0
# Choose random seeds
seeds = np.random.randint(2**16, size=num_threads)
return [ppg.PyPolyaGamma(seed) for seed in seeds]
def initialize_pyrngs():
from gslrandom import PyRNG, get_omp_num_threads
if "OMP_NUM_THREADS" in os.environ:
num_threads = os.environ["OMP_NUM_THREADS"]
else:
num_threads = get_omp_num_threads()
assert num_threads > 0
# Choose random seeds
seeds = np.random.randint(2**16, size=num_threads)
return [PyRNG(seed) for seed in seeds]
def log_polya_gamma_density(x, b, c, trunc=1000):
if np.isscalar(x):
xx = np.array([[x]])
else:
assert x.ndim == 1
xx = x[:,None]
logf = np.zeros(xx.size)
logf += b * np.log(np.cosh(c/2.))
logf += (b-1) * np.log(2) - gammaln(b)
# Compute the terms in the summation
ns = np.arange(trunc)[None,:].astype(np.float)
terms = np.zeros_like(ns, dtype=np.float)
terms += gammaln(ns+b) - gammaln(ns+1)
terms += np.log(2*ns+b) - 0.5 * np.log(2*np.pi)
# Compute the terms that depend on x
terms = terms - 3./2*np.log(xx)
terms += -(2*ns+b)**2 / (8*xx)
terms += -c**2/2. * xx
# logf += logsumexp(terms, axis=1)
maxlogf = np.amax(terms, axis=1)[:,None]
logf += np.log(np.exp(terms - maxlogf).dot((-1.0)**ns.T)).ravel() \
+ maxlogf.ravel()
# terms2 = terms.reshape((xx.shape[0], -1, 2))
# df = -np.diff(np.exp(terms2 - terms2.max(2)[...,None]),axis=2)
# terms2 = np.log(df+0j) + terms2.max(2)[...,None]
# logf += logsumexp(terms2.reshape((xx.shape[0], -1)), axis=1)
# plt.figure()
# plt.plot(xx, logf)
return logf
def polya_gamma_density(x, b, c, trunc=1000):
return np.exp(log_polya_gamma_density(x, b, c, trunc)).real
def logistic(x):
return 1./(1+np.exp(-x))
def logit(p):
return np.log(p/(1-p))
def psi_to_pi(psi, axis=None):
"""
Convert psi to a probability vector pi
:param psi: Length K-1 vector
:return: Length K normalized probability vector
"""
if axis is None:
if psi.ndim == 1:
K = psi.size + 1
pi = np.zeros(K)
# Set pi[1..K-1]
stick = 1.0
for k in range(K-1):
pi[k] = logistic(psi[k]) * stick
stick -= pi[k]
# Set the last output
pi[-1] = stick
# DEBUG
assert np.allclose(pi.sum(), 1.0)
elif psi.ndim == 2:
M, Km1 = psi.shape
K = Km1 + 1
pi = np.zeros((M,K))
# Set pi[1..K-1]
stick = np.ones(M)
for k in range(K-1):
pi[:,k] = logistic(psi[:,k]) * stick
stick -= pi[:,k]
# Set the last output
pi[:,-1] = stick
# DEBUG
assert np.allclose(pi.sum(axis=1), 1.0)
else:
raise ValueError("psi must be 1 or 2D")
else:
K = psi.shape[axis] + 1
pi = np.zeros([psi.shape[dim] if dim != axis else K for dim in range(psi.ndim)])
stick = np.squeeze(np.ones([psi.shape[dim] if dim != axis else 1 for dim in range(psi.ndim)]))
for k in range(K-1):
inds = [slice(None) if dim != axis else k for dim in range(psi.ndim)]
pi[inds] = logistic(psi[inds]) * stick
stick -= pi[inds]
pi[[slice(None) if dim != axis else -1 for dim in range(psi.ndim)]] = stick
assert np.allclose(pi.sum(axis=axis), 1.)
return pi
def pi_to_psi(pi):
"""
Convert probability vector pi to a vector psi
:param pi: Length K probability vector
:return: Length K-1 transformed vector psi
"""
if pi.ndim == 1:
K = pi.size
assert np.allclose(pi.sum(), 1.0)
psi = np.zeros(K-1)
stick = 1.0
for k in range(K-1):
psi[k] = logit(pi[k] / stick)
stick -= pi[k]
# DEBUG
assert np.allclose(stick, pi[-1])
elif pi.ndim == 2:
M, K = pi.shape
assert np.allclose(pi.sum(axis=1), 1.0)
psi = np.zeros((M,K-1))
stick = np.ones(M)
for k in range(K-1):
psi[:,k] = logit(pi[:,k] / stick)
stick -= pi[:,k]
assert np.allclose(stick, pi[:,-1])
else:
raise NotImplementedError
return psi
def det_jacobian_pi_to_psi(pi):
"""
Compute |J| = |d\psi_j / d\pi_k| = the jacobian of the mapping from
pi to psi. Since we have a stick breaking construction, the Jacobian
is lower triangular and the determinant is simply the product of the
diagonal. For our model, this can be computed in closed form. See the
appendix of the draft.
:param pi: K dimensional probability vector
:return:
"""
# import pdb; pdb.set_trace()
K = pi.size
# Jacobian is K-1 x K-1
diag = np.zeros(K-1)
for k in xrange(K-1):
diag[k] = (1.0 - pi[:k].sum()) / (pi[k] * (1-pi[:(k+1)].sum()))
det_jacobian = diag.prod()
return det_jacobian
def det_jacobian_psi_to_pi(psi):
"""
Compute the Jacobian of the inverse mapping psi to pi.
:param psi:
:return:
"""
pi = psi_to_pi(psi)
return 1.0 / det_jacobian_pi_to_psi(pi)
def dirichlet_to_psi_density(pi_mesh, alpha):
psi_mesh = np.array(map(pi_to_psi, pi_mesh))
valid_psi = np.all(np.isfinite(psi_mesh), axis=1)
psi_mesh = psi_mesh[valid_psi,:]
# Compute the det of the Jacobian of the inverse mapping
det_jacobian = 1.0 / np.array(map(det_jacobian_pi_to_psi, pi_mesh))
det_jacobian = det_jacobian[valid_psi]
# Compute the Dirichlet density
pi_pdf = np.exp(log_dirichlet_density(pi_mesh, alpha=alpha))
pi_pdf = pi_pdf[valid_psi]
# The psi density is scaled by the det of the Jacobian
psi_pdf = pi_pdf * det_jacobian
return psi_mesh, psi_pdf
def dirichlet_to_psi_density_closed_form(pi_mesh, alpha):
psi_mesh = np.array(map(pi_to_psi, pi_mesh))
valid_psi = np.all(np.isfinite(psi_mesh), axis=1)
psi_mesh = psi_mesh[valid_psi,:]
# import ipdb; ipdb.set_trace()
Z = np.exp(gammaln(alpha.sum()) - gammaln(alpha).sum())
sigma_psi = logistic(psi_mesh)
sigma_negpsi = logistic(-psi_mesh)
alpha_sum = np.cumsum(alpha[::-1])[::-1][1:]
# alphasum should = [\sum_{j=2}^K \alpha_j, ..., \alpha_{K-1} + \alpha_K, \alpha_K]
psi_pdf = sigma_psi**alpha[None, :-1] * sigma_negpsi**alpha_sum[None, :]
psi_pdf = Z * psi_pdf.prod(axis=1)
return psi_mesh, psi_pdf
def gaussian_to_pi_density(psi_mesh, mu, Sigma):
pi_mesh = np.array(map(psi_to_pi, psi_mesh))
valid_pi = np.all(np.isfinite(pi_mesh), axis=1)
pi_mesh = pi_mesh[valid_pi,:]
# Compute the det of the Jacobian of the inverse mapping
det_jacobian = np.array(map(det_jacobian_pi_to_psi, pi_mesh))
det_jacobian = det_jacobian[valid_pi]
# Compute the multivariate Gaussian density
# pi_pdf = np.exp(log_dirichlet_density(pi_mesh, alpha=alpha))
from scipy.stats import multivariate_normal
psi_dist = multivariate_normal(mu, Sigma)
psi_pdf = psi_dist.pdf(psi_mesh)
psi_pdf = psi_pdf[valid_pi]
# The psi density is scaled by the det of the Jacobian
pi_pdf = psi_pdf * det_jacobian
return pi_mesh, pi_pdf
def ln_psi_to_pi(psi):
"""
Convert the logistic normal psi to a probability vector pi
:param psi: Length K vector
:return: Length K normalized probability vector
"""
lognumer = psi
if psi.ndim == 1:
logdenom = logsumexp(psi)
elif psi.ndim == 2:
logdenom = logsumexp(psi, axis=1)[:, None]
pi = np.exp(lognumer - logdenom)
# assert np.allclose(pi.sum(), 1.0)
return pi
def ln_pi_to_psi(pi, scale=1.0):
"""
Convert the logistic normal psi to a probability vector pi
The transformation from psi to pi is not invertible unless
you know the scaling of the psis.
:param pi: Length K vector
:return: Length K unnormalized real vector
"""
assert scale > 0
if pi.ndim == 1:
assert np.allclose(pi.sum(), 1.0)
elif pi.ndim == 2:
assert np.allclose(pi.sum(1), 1.0)
psi = np.log(pi) + np.log(scale)
# assert np.allclose(pi, ln_psi_to_pi(psi))
return psi
def compute_uniform_mean_psi(K, alpha=2):
"""
Compute the multivariate distribution over psi that will yield approximately
Dirichlet(\alpha) prior over pi
:param K: Number of entries in pi
:return: A K-1 vector mu that yields approximately uniform distribution over pi
"""
mu, sigma = compute_psi_cmoments(alpha*np.ones(K))
return mu, np.diag(sigma)
def compute_psi_cmoments(alphas):
K = alphas.shape[0]
psi = np.linspace(-10,10,1000)
mu = np.zeros(K-1)
sigma = np.zeros(K-1)
for k in range(K-1):
density = get_density(alphas[k], alphas[k+1:].sum())
mu[k] = simps(psi*density(psi),psi)
sigma[k] = simps(psi**2*density(psi),psi) - mu[k]**2
# print '%d: mean=%0.3f var=%0.3f' % (k, mean, s - mean**2)
return mu, sigma
def get_density(alpha_k, alpha_rest):
def density(psi):
return logistic(psi)**alpha_k * logistic(-psi)**alpha_rest \
/ scipy.special.beta(alpha_k,alpha_rest)
return density
def plot_psi_marginals(alphas):
K = alphas.shape[0]
psi = np.linspace(-10,10,1000)
import matplotlib.pyplot as plt
plt.figure()
for k in range(K-1):
density = get_density(alphas[k], alphas[k+1:].sum())
plt.subplot(2,1,1)
plt.plot(psi,density(psi),label='psi_%d' % k)
plt.subplot(2,1,2)
plt.plot(psi,np.log(density(psi)),label='psi_%d' % k)
plt.subplot(2,1,1)
plt.legend()
def N_vec(x, axis=None):
"""
Compute the count vector for PG Multinomial inference
:param x:
:return:
"""
if axis is None:
if x.ndim == 1:
N = x.sum()
return np.concatenate(([N], N - np.cumsum(x)[:-2]))
elif x.ndim == 2:
N = x.sum(axis=1)
return np.hstack((N[:,None], N[:,None] - np.cumsum(x, axis=1)[:,:-2]))
else:
raise ValueError("x must be 1 or 2D")
else:
inds = [slice(None) if dim != axis else None for dim in range(x.ndim)]
inds2 = [slice(None) if dim != axis else slice(None,-2) for dim in range(x.ndim)]
N = x.sum(axis=axis)
return np.concatenate((N[inds], N[inds] - np.cumsum(x,axis=axis)[inds2]), axis=axis)
def kappa_vec(x, axis=None):
"""
Compute the kappa vector for PG Multinomial inference
:param x:
:return:
"""
if axis is None:
if x.ndim == 1:
return x[:-1] - N_vec(x)/2.0
elif x.ndim == 2:
return x[:,:-1] - N_vec(x)/2.0
else:
raise ValueError("x must be 1 or 2D")
else:
inds = [slice(None) if dim != axis else slice(None,-1) for dim in range(x.ndim)]
return x[inds] - N_vec(x, axis)/2.0
# is this doing overlapping work with dirichlet_to_psi_density_closed_form?
def get_marginal_psi_density(alpha_k, alpha_rest):
def density(psi):
return logistic(psi)**alpha_k * logistic(-psi)**alpha_rest \
/ beta(alpha_k,alpha_rest)
return density
def dirichlet_to_psi_meanvar(alphas,psigrid=np.linspace(-10,10,1000)):
K = alphas.shape[0]
def meanvar(k):
density = get_marginal_psi_density(alphas[k], alphas[k+1:].sum())
mean = simps(psigrid*density(psigrid),psigrid)
s = simps(psigrid**2*density(psigrid),psigrid)
return mean, s - mean**2
return map(np.array, zip(*[meanvar(k) for k in range(K-1)]))
def cumsum(v,strict=False):
if not strict:
return np.cumsum(v,axis=0)
else:
out = np.zeros_like(v)
out[1:] = np.cumsum(v[:-1],axis=0)
return out
def list_split(lst,num):
assert 0 < num <= len(lst)
lens = [len(lst[start::num]) for start in range(num)]
starts, stops = cumsum(lens,strict=True), cumsum(lens,strict=False)
return [lst[start:stop] for start,stop in zip(starts,stops)]
def flatten(lst):
return [item for sublist in lst for item in sublist]
def plot_gaussian_2D(mu, Sigma, color='b',centermarker=True,label='',alpha=1.,ax=None,artists=None):
from matplotlib import pyplot as plt
ax = ax if ax else plt.gca()
t = np.hstack([np.arange(0,2*np.pi,0.01),0])
circle = np.vstack([np.sin(t),np.cos(t)])
ellipse = np.dot(np.linalg.cholesky(Sigma),circle)
if artists is None:
point = ax.scatter([mu[0]],[mu[1]],marker='D',color=color,s=4,alpha=alpha) \
if centermarker else None
line, = ax.plot(
ellipse[0,:] + mu[0], ellipse[1,:] + mu[1],linestyle='-',
linewidth=2,color=color,label=label,alpha=alpha)
else:
line = artists[0]
if centermarker:
point = artists[1]
point.set_offsets(np.atleast_2d(mu))
point.set_alpha(alpha)
point.set_color(color)
else:
point = None
line.set_xdata(ellipse[0,:] + mu[0])
line.set_ydata(ellipse[1,:] + mu[1])
line.set_alpha(alpha)
line.set_color(color)
return (line, point) if point else (line,)
def solve_diagonal_plus_lowrank(diag_of_A,B,C,b):
'''
like np.linalg.solve(np.diag(diag_of_A)+B.dot(C),b) but better!
b can be a matrix
see p.673 of Convex Optimization by Boyd and Vandenberghe
'''
# TODO write a psd version where B=C.T
one_dim = b.ndim == 1
if one_dim:
b = np.reshape(b,(-1,1))
z = b/diag_of_A[:,na]
E = C.dot(B/diag_of_A[:,na])
E.flat[::E.shape[0]+1] += 1
w = np.linalg.solve(E,C.dot(z))
z -= B.dot(w)/diag_of_A[:,na]
return z if not one_dim else z.ravel()
def mkdir(path):
# http://stackoverflow.com/questions/600268/mkdir-p-functionality-in-python
import errno
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else: raise
def downsample_data_slow(X, n):
"""
Downsample each row of X such that it sums to n by randomly removing entries
"""
from pybasicbayes.util.stats import sample_discrete
assert X.ndim == 2
Xsub = X.copy()
for i in xrange(Xsub.shape[0]):
Mi = int(Xsub[i].sum())
assert Mi >= n
# if Mi > 1e8: print "Warning: M is really large!"
p = Xsub[i] / float(Mi)
# Random remove one of the entries to remove
for m in xrange(Mi-n):
k = sample_discrete(p)
assert Xsub[i,k] > 0
Xsub[i,k] -= 1
p = Xsub[i] / float(Xsub[i].sum())
assert Xsub[i].sum() == n
return Xsub
def downsample_data(X, n):
"""
Downsample each row of X such that it sums to n by randomly removing entries
"""
from pybasicbayes.util.general import ibincount
assert X.ndim == 2
D,K = X.shape
Xsub = X.copy().astype(np.int)
for d in xrange(D):
xi = ibincount(Xsub[d])
Xsub[d] = np.bincount(np.random.choice(xi, size=n, replace=False), minlength=K)
assert Xsub[d].sum() == n
return Xsub.astype(np.float)
|
|
# Copyright 2013 - Mirantis, Inc.
# Copyright 2015 - StackStorm, Inc.
# Copyright 2016 - Brocade Communications Systems, Inc.
# Copyright 2018 - Extreme Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_config import cfg
from osprofiler import profiler
from mistral.db import utils as db_utils
from mistral.db.v2 import api as db_api
from mistral.db.v2.sqlalchemy import models as db_models
from mistral.engine import action_handler
from mistral.engine import action_queue
from mistral.engine import base
from mistral.engine import workflow_handler as wf_handler
from mistral import exceptions
from mistral import utils as u
from mistral.workflow import states
# Submodules of mistral.engine will throw NoSuchOptError if configuration
# options required at top level of this __init__.py are not imported before
# the submodules are referenced.
class DefaultEngine(base.Engine):
@db_utils.retry_on_db_error
@action_queue.process
@profiler.trace('engine-start-workflow', hide_args=True)
def start_workflow(self, wf_identifier, wf_namespace='', wf_ex_id=None,
wf_input=None, description='', **params):
if wf_namespace:
params['namespace'] = wf_namespace
if cfg.CONF.notifier.notify:
if 'notify' not in params or not params['notify']:
params['notify'] = []
params['notify'].extend(cfg.CONF.notifier.notify)
try:
with db_api.transaction():
wf_ex = wf_handler.start_workflow(
wf_identifier,
wf_namespace,
wf_ex_id,
wf_input or {},
description,
params
)
return wf_ex.get_clone()
except exceptions.DBDuplicateEntryError:
# NOTE(akovi): the workflow execution with a provided
# wf_ex_id may already exist. In this case, simply
# return the existing entity.
with db_api.transaction():
wf_ex = db_api.get_workflow_execution(wf_ex_id)
return wf_ex.get_clone()
@db_utils.retry_on_db_error
@action_queue.process
def start_action(self, action_name, action_input,
description=None, **params):
with db_api.transaction():
action = action_handler.build_action_by_name(action_name)
action.validate_input(action_input)
sync = params.get('run_sync')
save = params.get('save_result')
target = params.get('target')
timeout = params.get('timeout')
is_action_sync = action.is_sync(action_input)
if sync and not is_action_sync:
raise exceptions.InputException(
"Action does not support synchronous execution.")
if not sync and (save or not is_action_sync):
action.schedule(action_input, target, timeout=timeout)
return action.action_ex.get_clone()
output = action.run(action_input, target, save=False,
timeout=timeout)
state = states.SUCCESS if output.is_success() else states.ERROR
if not save:
# Action execution is not created but we need to return similar
# object to the client anyway.
return db_models.ActionExecution(
name=action_name,
description=description,
input=action_input,
output=output.to_dict(),
state=state
)
action_ex_id = u.generate_unicode_uuid()
values = {
'id': action_ex_id,
'name': action_name,
'description': description,
'input': action_input,
'output': output.to_dict(),
'state': state,
}
return db_api.create_action_execution(values)
@db_utils.retry_on_db_error
@action_queue.process
@profiler.trace('engine-on-action-complete', hide_args=True)
def on_action_complete(self, action_ex_id, result, wf_action=False,
async_=False):
with db_api.transaction():
if wf_action:
action_ex = db_api.get_workflow_execution(action_ex_id)
else:
action_ex = db_api.get_action_execution(action_ex_id)
action_handler.on_action_complete(action_ex, result)
return action_ex.get_clone()
@db_utils.retry_on_db_error
@action_queue.process
@profiler.trace('engine-on-action-update', hide_args=True)
def on_action_update(self, action_ex_id, state, wf_action=False,
async_=False):
with db_api.transaction():
if wf_action:
action_ex = db_api.get_workflow_execution(action_ex_id)
else:
action_ex = db_api.get_action_execution(action_ex_id)
action_handler.on_action_update(action_ex, state)
return action_ex.get_clone()
@db_utils.retry_on_db_error
@action_queue.process
def pause_workflow(self, wf_ex_id):
with db_api.transaction():
wf_ex = db_api.get_workflow_execution(wf_ex_id)
wf_handler.pause_workflow(wf_ex)
return wf_ex.get_clone()
@db_utils.retry_on_db_error
@action_queue.process
def rerun_workflow(self, task_ex_id, reset=True, env=None):
with db_api.transaction():
task_ex = db_api.get_task_execution(task_ex_id)
wf_ex = task_ex.workflow_execution
wf_handler.rerun_workflow(wf_ex, task_ex, reset=reset, env=env)
return wf_ex.get_clone()
@db_utils.retry_on_db_error
@action_queue.process
def resume_workflow(self, wf_ex_id, env=None):
with db_api.transaction():
wf_ex = db_api.get_workflow_execution(wf_ex_id)
wf_handler.resume_workflow(wf_ex, env=env)
return wf_ex.get_clone()
@db_utils.retry_on_db_error
@action_queue.process
def stop_workflow(self, wf_ex_id, state, message=None):
with db_api.transaction():
wf_ex = db_api.get_workflow_execution(wf_ex_id)
wf_handler.stop_workflow(wf_ex, state, message)
return wf_ex.get_clone()
def rollback_workflow(self, wf_ex_id):
# TODO(rakhmerov): Implement.
raise NotImplementedError
|
|
# Copyright (C) 2012 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import errno
import json
import logging
import re
from webkitpy.layout_tests.layout_package.json_results_generator import convert_times_trie_to_flat_paths
from webkitpy.layout_tests.models import test_expectations
_log = logging.getLogger(__name__)
class LayoutTestFinder(object):
def __init__(self, port, options):
self._port = port
self._options = options
self._filesystem = self._port.host.filesystem
self.LAYOUT_TESTS_DIRECTORIES = ('src', 'third_party', 'WebKit', 'LayoutTests')
def find_tests(self, args, test_list=None, fastest_percentile=None):
paths = self._strip_test_dir_prefixes(args)
if test_list:
paths += self._strip_test_dir_prefixes(self._read_test_names_from_file(test_list, self._port.TEST_PATH_SEPARATOR))
all_tests = []
if not paths or fastest_percentile:
all_tests = self._port.tests(None)
path_tests = []
if paths:
path_tests = self._port.tests(paths)
test_files = None
running_all_tests = False
if fastest_percentile:
times_trie = self._times_trie()
if times_trie:
fastest_tests = self._fastest_tests(times_trie, all_tests, fastest_percentile)
test_files = list(set(fastest_tests).union(path_tests))
else:
_log.warning('Running all the tests the first time to generate timing data.')
test_files = all_tests
running_all_tests = True
elif paths:
test_files = path_tests
else:
test_files = all_tests
running_all_tests = True
return (paths, test_files, running_all_tests)
def _times_trie(self):
times_ms_path = self._port.bot_test_times_path()
if self._filesystem.exists(times_ms_path):
return json.loads(self._filesystem.read_text_file(times_ms_path))
else:
return {}
# The following line should run the fastest 50% of tests *and*
# the css3/flexbox tests. It should *not* run the fastest 50%
# of the css3/flexbox tests.
#
# run-webkit-tests --fastest=50 css3/flexbox
def _fastest_tests(self, times_trie, all_tests, fastest_percentile):
times = convert_times_trie_to_flat_paths(times_trie)
# Ignore tests with a time==0 because those are skipped tests.
sorted_times = sorted([test for (test, time) in times.iteritems() if time],
key=lambda t: (times[t], t))
clamped_percentile = max(0, min(100, fastest_percentile))
number_of_tests_to_return = int(len(sorted_times) * clamped_percentile / 100)
fastest_tests = set(sorted_times[:number_of_tests_to_return])
# Don't try to run tests in the times_trie that no longer exist,
fastest_tests = fastest_tests.intersection(all_tests)
# For fastest tests, include any tests not in the times_ms.json so that
# new tests get run in the fast set.
unaccounted_tests = set(all_tests) - set(times.keys())
# Using a set to dedupe here means that --order=None won't work, but that's
# ok because --fastest already runs in an arbitrary order.
return list(fastest_tests.union(unaccounted_tests))
def _strip_test_dir_prefixes(self, paths):
return [self._strip_test_dir_prefix(path) for path in paths if path]
def _strip_test_dir_prefix(self, path):
# Remove src/third_party/WebKit/LayoutTests/ from the front of the test path,
# or any subset of these.
for i in range(len(self.LAYOUT_TESTS_DIRECTORIES)):
# Handle both "LayoutTests/foo/bar.html" and "LayoutTests\foo\bar.html" if
# the filesystem uses '\\' as a directory separator
for separator in (self._port.TEST_PATH_SEPARATOR, self._filesystem.sep):
directory_prefix = separator.join(self.LAYOUT_TESTS_DIRECTORIES[i:]) + separator
if path.startswith(directory_prefix):
return path[len(directory_prefix):]
return path
def _read_test_names_from_file(self, filenames, test_path_separator):
fs = self._filesystem
tests = []
for filename in filenames:
try:
if test_path_separator != fs.sep:
filename = filename.replace(test_path_separator, fs.sep)
file_contents = fs.read_text_file(filename).split('\n')
for line in file_contents:
line = self._strip_comments(line)
if line:
tests.append(line)
except IOError, e:
if e.errno == errno.ENOENT:
_log.critical('')
_log.critical('--test-list file "%s" not found' % file)
raise
return tests
@staticmethod
def _strip_comments(line):
commentIndex = line.find('//')
if commentIndex is -1:
commentIndex = len(line)
line = re.sub(r'\s+', ' ', line[:commentIndex].strip())
if line == '':
return None
else:
return line
def skip_tests(self, paths, all_tests_list, expectations, http_tests):
all_tests = set(all_tests_list)
tests_to_skip = expectations.get_tests_with_result_type(test_expectations.SKIP)
if self._options.skip_failing_tests:
tests_to_skip.update(expectations.get_tests_with_result_type(test_expectations.FAIL))
tests_to_skip.update(expectations.get_tests_with_result_type(test_expectations.FLAKY))
if self._options.skipped == 'only':
tests_to_skip = all_tests - tests_to_skip
elif self._options.skipped == 'ignore':
tests_to_skip = set()
elif self._options.skipped != 'always':
# make sure we're explicitly running any tests passed on the command line; equivalent to 'default'.
tests_to_skip -= set(paths)
return tests_to_skip
def split_into_chunks(self, test_names):
"""split into a list to run and a set to skip, based on --run-chunk and --run-part."""
if not self._options.run_chunk and not self._options.run_part:
return test_names, set()
# If the user specifies they just want to run a subset of the tests,
# just grab a subset of the non-skipped tests.
chunk_value = self._options.run_chunk or self._options.run_part
try:
(chunk_num, chunk_len) = chunk_value.split(":")
chunk_num = int(chunk_num)
assert(chunk_num >= 0)
test_size = int(chunk_len)
assert(test_size > 0)
except AssertionError:
_log.critical("invalid chunk '%s'" % chunk_value)
return (None, None)
# Get the number of tests
num_tests = len(test_names)
# Get the start offset of the slice.
if self._options.run_chunk:
chunk_len = test_size
# In this case chunk_num can be really large. We need
# to make the slave fit in the current number of tests.
slice_start = (chunk_num * chunk_len) % num_tests
else:
# Validate the data.
assert(test_size <= num_tests)
assert(chunk_num <= test_size)
# To count the chunk_len, and make sure we don't skip
# some tests, we round to the next value that fits exactly
# all the parts.
rounded_tests = num_tests
if rounded_tests % test_size != 0:
rounded_tests = (num_tests + test_size - (num_tests % test_size))
chunk_len = rounded_tests / test_size
slice_start = chunk_len * (chunk_num - 1)
# It does not mind if we go over test_size.
# Get the end offset of the slice.
slice_end = min(num_tests, slice_start + chunk_len)
tests_to_run = test_names[slice_start:slice_end]
_log.debug('chunk slice [%d:%d] of %d is %d tests' % (slice_start, slice_end, num_tests, (slice_end - slice_start)))
# If we reached the end and we don't have enough tests, we run some
# from the beginning.
if slice_end - slice_start < chunk_len:
extra = chunk_len - (slice_end - slice_start)
_log.debug(' last chunk is partial, appending [0:%d]' % extra)
tests_to_run.extend(test_names[0:extra])
return (tests_to_run, set(test_names) - set(tests_to_run))
|
|
import os
import sys
import subprocess
import gdal, ogr
import numpy as np
import json
import argparse
import time
def read_dem(filepath):
print(filepath)
#"""
dem_dataset = gdal.Open(filepath)
dem_tmp_cols = dem_dataset.RasterXSize
dem_tmp_rows = dem_dataset.RasterYSize
dem_geotransform = dem_dataset.GetGeoTransform()
dem_xres = dem_geotransform[1]
dem_yres = abs(dem_geotransform[5])
dem_xmin = dem_geotransform[0]
dem_ymax = dem_geotransform[3]
dem_xmax = dem_xmin + dem_tmp_cols * dem_xres
dem_ymin = dem_ymax - dem_tmp_rows * dem_yres
dem_band = dem_dataset.GetRasterBand(1)
dem_tmp_array = dem_band.ReadAsArray(0, 0, dem_tmp_cols, dem_tmp_rows).astype(np.float32)
dem_nodata = dem_band.GetNoDataValue()
dem_xcent = (dem_xmin + dem_xmax) / 2.0
dem_ycent = (dem_ymin + dem_ymax) / 2.0
dem_ydist = dem_ymax - dem_ymin
dem_xdist = dem_xmax - dem_xmin
#dem_tmp_array = np.copy(np.flipud(dem_tmp_array))
dem_tmp_nodata_range_external = [-9999, 9999]
print(dem_tmp_rows, dem_tmp_cols)
if True:
dem_tmp_nodata = -9999
dem_tmp_array[dem_tmp_array < dem_tmp_nodata_range_external[0]] = dem_tmp_nodata
dem_tmp_array[dem_tmp_array > dem_tmp_nodata_range_external[1]] = dem_tmp_nodata
dem_tmp_array[dem_tmp_array<-500] = -9999
dem_intermed_array = dem_tmp_array
try:
#print(dem_intermed_array.shape)
dem_tmp_array_valmin = np.nanmin(dem_intermed_array[dem_intermed_array != dem_tmp_nodata])
dem_tmp_array_valmax = np.nanmax(dem_intermed_array[dem_intermed_array != dem_tmp_nodata])
except:
with open(os.path.join(scad_dirpath, scad_filename_base + '.scad.empty'), 'w') as scad_file:
scad_file.write('')
sys.exit()
dem_array = dem_intermed_array
dem_rows, dem_cols = dem_array.shape
return dem_array, dem_cols, dem_rows, dem_xmin, dem_ymax, dem_xres, dem_yres, dem_xdist, dem_ydist
def generate_terrain(clippoly_filepath, dem_dirpath, dem_prefix, dem_tilex, dem_tiley, zmean_total):
dem_filepath = os.path.join(dem_dirpath, '_'.join([dem_prefix, dem_tiley, dem_tilex]) + ".tif")
print("dem_filepath", dem_filepath, clippoly_filepath, dem_dirpath, dem_prefix, dem_tilex, dem_tiley)
polygon_extrude_height = 10000.0
polyhedron_extrude_height = 100.0
clippoly_driver = ogr.GetDriverByName("ESRI Shapefile")
clippoly_datasource = clippoly_driver.Open(clippoly_filepath, 0)
clippoly_layer = clippoly_datasource.GetLayer()
clippoly_layer_extent = clippoly_layer.GetExtent()
clippoly_layer_extent_xmin = clippoly_layer_extent[0]
clippoly_layer_extent_xmax = clippoly_layer_extent[1]
clippoly_layer_extent_ymin = clippoly_layer_extent[2]
clippoly_layer_extent_ymax = clippoly_layer_extent[3]
clippoly_layer_extent_xcent = (clippoly_layer_extent_xmax + clippoly_layer_extent_xmin) / 2.0
clippoly_layer_extent_ycent = (clippoly_layer_extent_ymax + clippoly_layer_extent_ymin) / 2.0
c = []
dem_array, dem_cols, dem_rows, dem_xmin, dem_ymax, dem_xres, dem_yres, dem_xdist, dem_ydist = read_dem(dem_filepath)
print("nanmin", np.nanmin(dem_array))
dem_array = np.copy(dem_array-zmean_total)
print("nanmin", np.nanmin(dem_array))
if True:
if True:
polyhedron_points = []
polyhedron_points_floor = []
polyhedron_points_ceil = []
polyhedron_faces = []
polyhedron_faces_array = np.zeros((dem_rows,dem_cols,16,3), dtype=np.int32)
polyhedron_faces_clean_array = np.zeros((dem_rows,dem_cols,16,3), dtype=np.int32)
polyhedron_points_floor_array = np.zeros((dem_rows*dem_cols,3), dtype=np.float32)
#polyhedron_points_ceil_array = np.zeros((dem_rows*dem_cols,3), dtype=np.float32)
z_scale = 1.0
cnt = 0
dem_x_min = None
dem_x_max = None
dem_y_min = None
dem_y_max = None
#for i in range(dem_rows,-1,-1):
for i in range(0,dem_rows):
for j in range(0,dem_cols):
#i0_coord = (dem_ymax - ((dem_yres*-1) * i) - (0.5 * (dem_yres*-1))) - dem_ydist
#j0_coord = dem_xmin + (dem_xres * j) + (0.5 * dem_xres)
i0_coord = dem_ymax - (dem_yres * i) #- dem_ydist #- (0.5 * (dem_yres*-1)))
j0_coord = dem_xmin + (dem_xres * j) #+ (0.5 * dem_xres)
#print(i,j,i0_coord,j0_coord)
z_a = (dem_array[i][j] * z_scale) #- zmean_total
dem_x = j0_coord
dem_y = i0_coord
polyhedron_points_floor_array[(i*dem_cols)+j][:] = np.array([dem_x - clippoly_layer_extent_xcent, dem_y - clippoly_layer_extent_ycent, z_a])
#polyhedron_points_floor_array[cnt][:] = np.array([j0_coord - clippoly_layer_extent_xcent, i0_coord - clippoly_layer_extent_ycent, z_a])
if not dem_x_min or dem_x < dem_x_min:
dem_x_min = dem_x
if not dem_x_max or dem_x > dem_x_max:
dem_x_max = dem_x
if not dem_y_min or dem_y < dem_y_min:
dem_y_min = dem_y
if not dem_y_max or dem_y > dem_y_max:
dem_y_max = dem_y
#print(dem_rows*dem_cols, cnt, (i*dem_rows)+j, dem_rows, dem_cols, i, j, z_a)
if i<dem_rows-1 and j < dem_cols-1:
z_b = (dem_array[i+1][j] * z_scale) #- zmean_total
z_c = (dem_array[i][j+1] * z_scale) #- zmean_total
z_d = (dem_array[i+1][j+1] * z_scale) #- zmean_total
point_a_ceil = (i*dem_cols)+j
point_b_ceil = ((i+1)*dem_cols)+j
point_c_ceil = (i*dem_cols)+j+1
point_d_ceil = ((i+1)*dem_cols)+j+1
point_a_floor = (dem_rows*dem_cols) + (i*dem_cols)+j
point_b_floor = (dem_rows*dem_cols) + ((i+1)*dem_cols)+j
point_c_floor = (dem_rows*dem_cols) + (i*dem_cols)+j+1
point_d_floor = (dem_rows*dem_cols) + ((i+1)*dem_cols)+j+1
if z_a > -5000 and z_b > -5000 and z_c > -5000:
polyhedron_faces_array[i][j][0][:] = np.array([point_c_ceil, point_b_ceil, point_a_ceil]) ## ceiling
polyhedron_faces_array[i][j][1][:] = np.array([point_b_floor, point_a_floor, point_a_ceil]) ## left sidev
polyhedron_faces_array[i][j][2][:] = np.array([point_b_ceil, point_b_floor, point_a_ceil])
polyhedron_faces_array[i][j][3][:] = np.array([point_c_floor, point_b_floor, point_b_ceil]) ## right side (diagonal)
polyhedron_faces_array[i][j][4][:] = np.array([point_c_ceil, point_c_floor, point_b_ceil])
polyhedron_faces_array[i][j][5][:] = np.array([point_a_floor, point_c_floor, point_c_ceil]) ## top side
polyhedron_faces_array[i][j][6][:] = np.array([point_a_ceil, point_a_floor, point_c_ceil])
polyhedron_faces_array[i][j][7][:] = np.array([point_a_floor, point_b_floor, point_c_floor]) ## floor
if z_b > -5000 and z_d > -5000 and z_c > -5000:
polyhedron_faces_array[i][j][8][:] = np.array([point_c_ceil, point_d_ceil, point_b_ceil]) ## ceiling
polyhedron_faces_array[i][j][9][:] = np.array([point_d_ceil, point_d_floor, point_b_floor]) ## bottom side
polyhedron_faces_array[i][j][10][:] = np.array([point_d_ceil, point_b_floor, point_b_ceil])
polyhedron_faces_array[i][j][11][:] = np.array([point_c_ceil, point_c_floor, point_d_floor]) ## right side
polyhedron_faces_array[i][j][12][:] = np.array([point_c_ceil, point_d_floor, point_d_ceil])
polyhedron_faces_array[i][j][13][:] = np.array([point_b_ceil, point_b_floor, point_c_floor]) ## left side (diagonal)
polyhedron_faces_array[i][j][14][:] = np.array([point_c_ceil, point_b_ceil, point_c_floor])
polyhedron_faces_array[i][j][15][:] = np.array([point_b_floor, point_d_floor, point_c_floor]) ## floor
cnt += 1
print(polyhedron_points_floor_array[-1])
polyhedron_faces_clean = []
for i in range(0,dem_rows):
for j in range(0,dem_cols):
for polyhedron_face_id in range(0, 16):
polyhedron_face = polyhedron_faces_array[i][j][polyhedron_face_id][:].tolist()
polyhedron_face_cnt = 0
if not (polyhedron_face[0] == 0 and polyhedron_face[1] == 0 and polyhedron_face[2] == 0):
i_bottom = i - 1 if i > 0 else 0
i_top = i + 1 if i < dem_rows-1 else dem_rows-1
j_left = j - 1 if i > 0 else 0
j_right = j + 1 if j < dem_cols-1 else dem_cols
for i_neighbour in range(i_bottom, i_top+1):
for j_neighbour in range(j_left, j_right+1):
array1 = np.array([polyhedron_face[0], polyhedron_face[1], polyhedron_face[2]])
array2 = np.array([polyhedron_face[0], polyhedron_face[2], polyhedron_face[1]])
array3 = np.array([polyhedron_face[1], polyhedron_face[0], polyhedron_face[2]])
array4 = np.array([polyhedron_face[1], polyhedron_face[2], polyhedron_face[0]])
array5 = np.array([polyhedron_face[2], polyhedron_face[1], polyhedron_face[0]])
array6 = np.array([polyhedron_face[2], polyhedron_face[0], polyhedron_face[1]])
loc1 = np.where(np.all(polyhedron_faces_array[i_neighbour][j_neighbour][:]==array1, axis=1))
loc2 = np.where(np.all(polyhedron_faces_array[i_neighbour][j_neighbour][:]==array2, axis=1))
loc3 = np.where(np.all(polyhedron_faces_array[i_neighbour][j_neighbour][:]==array3, axis=1))
loc4 = np.where(np.all(polyhedron_faces_array[i_neighbour][j_neighbour][:]==array4, axis=1))
loc5 = np.where(np.all(polyhedron_faces_array[i_neighbour][j_neighbour][:]==array5, axis=1))
loc6 = np.where(np.all(polyhedron_faces_array[i_neighbour][j_neighbour][:]==array6, axis=1))
polyhedron_face_neighbour_cnt = len(loc1[0]) + len(loc2[0]) + len(loc3[0]) + len(loc4[0]) + len(loc5[0]) + len(loc6[0])
polyhedron_face_cnt += polyhedron_face_neighbour_cnt
if polyhedron_face_cnt == 1:
polyhedron_faces_clean.append(polyhedron_face)
else:
pass
polyhedron_points = []
for l in range(1,-1,-1):
for polyhedron_point_id in range(0, polyhedron_points_floor_array.shape[0]):
polyhedron_point = [polyhedron_points_floor_array[polyhedron_point_id][0],
polyhedron_points_floor_array[polyhedron_point_id][1],
polyhedron_points_floor_array[polyhedron_point_id][2]-(l*10.0)]
#print(polyhedron_point)
polyhedron_points.append(polyhedron_point)
print('dem_extent:', dem_x_min, dem_y_min, dem_x_max, dem_y_max)
print(clippoly_layer_extent_xcent, clippoly_layer_extent_ycent)
print('points', len(polyhedron_points))
print('faces', polyhedron_faces_array.shape[0])
print('faces_clean', len(polyhedron_faces_clean))
c.append('module dem() {')
c.append(' polyhedron(points={}, faces={});'.format(polyhedron_points, polyhedron_faces_clean))
c.append('}')
#c.append('dem();')
clippoly_layer.ResetReading()
for feat_id, feat in enumerate(clippoly_layer):
geom = feat.GetGeometryRef()
geom_name = str(geom.GetGeometryName())
geom_subgeomcount = geom.GetGeometryCount()
if geom_name.lower() == 'multipolygon':
## iterate over sub polygons
for sub_id in range(0, geom.GetGeometryCount()):
polygon_points = []
polygon_paths = []
point_cnt = 0
geom_sub = geom.GetGeometryRef(sub_id)
geom_sub_subgeomcount = geom_sub.GetGeometryCount()
for bound_id in range(0, geom_sub_subgeomcount):
polygon_path = []
geom_sub_bound = geom_sub.GetGeometryRef(bound_id)
geom_sub_bound_json = json.loads(geom_sub_bound.ExportToJson())
polygon_path = [] #geom_sub_bound_json['coordinates']
for point_id in range(0, geom_sub_bound.GetPointCount()-1):
geom_sub_bound_point_x, geom_sub_bound_point_y, dummy = geom_sub_bound.GetPoint(point_id)
polygon_path.append(point_cnt)
polygon_points.append([geom_sub_bound_point_x - clippoly_layer_extent_xcent, geom_sub_bound_point_y - clippoly_layer_extent_ycent])
#polygon_points.append([0, 0])
point_cnt +=1
polygon_paths.append(polygon_path)
c.append('intersection() {')
c.append('translate([0,0,-5000]) linear_extrude(height={}) polygon({},{});'.format(polygon_extrude_height, polygon_points, polygon_paths))
c.append('dem();')
c.append('}')
elif geom_name.lower() == 'polygon':
polygon_points = []
polygon_paths = []
point_cnt = 0
for bound_id in range(0, geom_subgeomcount):
geom_bound = geom.GetGeometryRef(bound_id)
geom_bound_json = json.loads(geom_bound.ExportToJson())
polygon_path = [] #geom_bound_json['coordinates']
for point_id in range(0, geom_bound.GetPointCount()):
geom_bound_point_x, geom_bound_point_y, dummy = geom_bound.GetPoint(point_id)
polygon_path.append(point_cnt)
polygon_points.append([geom_bound_point_x - clippoly_layer_extent_xcent, geom_bound_point_y - clippoly_layer_extent_ycent])
#polygon_points.append([0, 0])
#print(geom_bound_point_x - clippoly_layer_extent_xcent, geom_bound_point_y - clippoly_layer_extent_ycent)
#print(clippoly_layer_extent_xcent, clippoly_layer_extent_ycent)
point_cnt +=1
polygon_paths.append(polygon_path)
c.append('intersection() {')
c.append('translate([0,0,-5000]) linear_extrude(height={}) polygon({},{});'.format(polygon_extrude_height, polygon_points, polygon_paths))
c.append('dem();')
c.append('}')
from operator import itemgetter
return c
scad_filename_base = 'test'
proc_target_os = 'win'
openscad_bin_filepath = 'openscad'
scad_dirpath = os.path.join(os.sep, 'mnt', 'c', 'Users', 'mic', 'dev')
proc_dirpath = os.path.join(os.sep, 'mnt', 'c', 'Users', 'mic', 'dev')
dem_filepath = os.path.join(os.sep, 'mnt', 'e', 'zh', 'gis_zh__dom_dtm__lidar', 'dtm_mosaic_tiled_offset_500x500', 'dtm_mosaic_tiled_offset_500x500_34_33.tif')
clippoly_filepath = os.path.join(os.sep, 'mnt', 'e', 'zh', 'wambachers_osm__boundaries__adm', 'data', 'district_zurich_al6_al6_2056.shp')
parser = argparse.ArgumentParser()
parser.add_argument('--dem_path', action='store', type=str, required=True)
parser.add_argument('--dem_prefix', action='store', type=str, required=True)
parser.add_argument('--dem_tilex', action='store', type=str, required=True)
parser.add_argument('--dem_tiley', action='store', type=str, required=True)
parser.add_argument('--zmin', action='store', type=str, required=False)
parser.add_argument('--zmax', action='store', type=str, required=False)
parser.add_argument('--clippoly', action='store', type=str, required=True)
parser.add_argument('--outdir', action='store', type=str, required=True)
args = parser.parse_args()
dem_dirpath = args.dem_path
dem_prefix = args.dem_prefix
dem_tilex = args.dem_tilex
dem_tiley = args.dem_tiley
clippoly_filepath = args.clippoly
proc_dirpath = args.outdir
#sys.exit()
#dem_dirpath, dem_filename = os.path.split(dem_filepath)
#dem_filename_base = dem_filename.split('.')[0]
scad_dirpath = args.outdir
scad_filename_base = "_".join([dem_prefix, dem_tiley, dem_tilex])
try:
zmin_total = float(args.zmin)
zmax_total = float(args.zmax)
zmean_total = zmin_total + ((zmax_total - zmin_total) / 2.0)
except:
zmean_total = 0
command_lines = []
#if not (os.path.isfile(os.path.join(scad_dirpath, scad_filename_base + '.bat')) or os.path.isfile(os.path.join(scad_dirpath, scad_filename_base + '.sh'))):
if True:
terrain_command_lines = generate_terrain(clippoly_filepath, dem_dirpath, dem_prefix, dem_tilex, dem_tiley, zmean_total)
command_lines += terrain_command_lines
with open(os.path.join(scad_dirpath, scad_filename_base + '.scad'), 'w') as scad_file:
for command_line in command_lines:
scad_file.write(command_line + '\n')
if not proc_target_os == 'win':
subprocess_bin = openscad_bin_filepath
subprocess_commands = [subprocess_bin, '-o', os.path.join(proc_dirpath, scad_filename_base + '.stl'), os.path.join(scad_dirpath, scad_filename_base + '.scad')]
output = subprocess.check_output(subprocess_commands, shell=False)
else:
openscad_win_bin_filepath = '\\'.join(['C:', '"Program Files"', 'OpenSCAD', 'openscad.com'])
openscad_linux_bin_filepath = os.path.join(os.sep, 'home', 'mic', 'prog', 'OpenSCAD-2019.05-x86_64', 'squashfs-root', 'usr', 'bin', 'openscad')
scad_win_dirpath = '\\'.join(['C:', 'Users', 'mic', 'dev'])
proc_win_dirpath = '\\'.join(['C:', 'Users', 'mic', 'dev'])
subprocess_args = ['-o', scad_filename_base + '.stl', scad_filename_base + '.scad']
subprocess_win_command = [openscad_win_bin_filepath] + subprocess_args
subprocess_linux_command = [openscad_linux_bin_filepath] + subprocess_args
with open(os.path.join(scad_dirpath, scad_filename_base + '.bat'), 'w') as batch_file:
batch_file.write(' '.join(subprocess_win_command))
with open(os.path.join(scad_dirpath, scad_filename_base + '.sh'), 'w') as shell_file:
shell_file.write(' '.join(subprocess_linux_command))
|
|
import os
from distutils.version import LooseVersion
import numpy as np
try:
import astropy.io.fits as fits
except ImportError:
import pyfits as fits
import pyLikelihood
import matplotlib
matplotlib.use('Agg')
matplotlib.rc('font', **{'family': 'serif', 'serif': ['Computer Modern'], 'size': 15})
matplotlib.rc('text', usetex=False)
import matplotlib.pyplot as plt
from enrico.constants import MEV_TO_ERG, ERG_TO_MEV
from enrico.config import get_config
from enrico import utils
from enrico import Loggin
from enrico.extern.astropy_bayesian_blocks import bayesian_blocks
class Params:
"""Collection of Plotting parameters like Energy bounds,
colors, file name, etc...."""
def __init__(self, srcname, Emin=100, Emax=3e5,
PlotName="LAT_SED", LineColor=2,
PointColor = 1, N = 2000):
self.Emin = Emin #Energy bounds
self.Emax = Emax
self.N = N #Number of points for the TGraph
self.srcname = srcname # Source of interest
self.PlotName = PlotName #file name
#color options
self.LineColor = LineColor
self.PointColor = PointColor
class Result(Loggin.Message):
"""Helper class to get the results from a (Un)BinnedAnalysis object
and compute the SED and errors"""
def __init__(self, Fit, pars):
super(Result,self).__init__()
Loggin.Message.__init__(self)
self.Fit = Fit
self.Model = Fit[pars.srcname].funcs['Spectrum'].genericName()
self.ptsrc = pyLikelihood.PointSource_cast(Fit[pars.srcname].src)
self.covar = np.array(utils.GetCovar(pars.srcname, self.Fit, False))
self.srcpars = pyLikelihood.StringVector()
Fit[pars.srcname].src.spectrum().getFreeParamNames(self.srcpars)
def GetDecorrelationEnergy(self,par):
self.E, self.SED = self.MakeSED(par)
self.Err = self.MakeSEDError(par)
i=np.argmin(self.Err/self.SED)
self.decE = self.E[i]
self.decFlux = self.SED[i]/self.E[i]**2*ERG_TO_MEV
self.decFluxerr = self.Err[i]/self.E[i]**2*ERG_TO_MEV
self.decSED = self.SED[i]
self.decSEDerr = self.Err[i]
def _DumpSED(self,par):
"""Save the energy, E2.dN/dE, and corresponding error in an ascii file
The count and residuals plot vs E is also made"""
try:
self.decE
except NameError:
self.GetDecorrelationEnergy(par)
self.info("Decorrelation energy : %4.2e MeV"% self.decE)
self.info("Diffential flux at the Decorrelation energy : %2.2e +/- %2.2e ph/cm2/s/MeV" \
%(self.decFlux, self.decFluxerr))
self.info("SED value at the Decorrelation energy : %2.2e +/- %2.2e erg/cm2/s" \
%(self.decSED, self.decSEDerr))
try:
self.CountsPlot(par)
except Exception as e:
print((type(e))) # the exception instance
print((e.args)) # arguments stored in .args
print(e) # __str__ allows args to be printed directly,
#raise
# Save all in ascii file
# log(E) log (E**2*dN/dE) log(E**2*dN/dE_err) is_dot (0,1) is_upper (0,1)
save_file = open(par.PlotName + '.dat', 'w')
save_file.write("# log(E) log (E**2*dN/dE) Error on log(E**2*dN/dE) \n")
for i in range(par.N):
save_file.write("%12.4e %12.4e %12.4e \n" % (self.E[i], self.SED[i], self.Err[i]))
save_file.close()
def MakeFlux(self, params):
"""Compute differential Flux distribution and
corresponding energy and return a numpy array"""
E = np.logspace(np.log10(params.Emin), np.log10(params.Emax), params.N)
Flux = np.zeros(params.N)
for i in range(params.N):
Flux[i] = self.dNde(E[i])
return E, Flux
def MakeSED(self, pars):
"""Compute Spectral energy distribution and corresponding energy
and return a numpy array"""
E = np.logspace(np.log10(pars.Emin), np.log10(pars.Emax), pars.N)
nuFnu = np.zeros(pars.N)
for i in range(pars.N):
nuFnu[i] = MEV_TO_ERG * E[i] ** 2 * self.dNde(E[i]) #Mev to Ergs
return E, nuFnu
def MakeSEDError(self, pars):
"""@todo: document me"""
estep = np.log(pars.Emax / pars.Emin) / (pars.N - 1)
energies = pars.Emin * np.exp(estep * np.arange(np.float(pars.N)))
err = np.zeros(pars.N)
j = 0
for ene in energies:
arg = pyLikelihood.dArg(ene)
partials = np.zeros(len(self.srcpars))
for i in range(len(self.srcpars)):
x = self.srcpars[i]
partials[i] = self.ptsrc.spectrum().derivByParam(arg, x)
err[j] = np.sqrt(np.dot(partials, np.dot(self.covar, partials)))
j += 1
return MEV_TO_ERG * energies ** 2 * err #Mev to Ergs
def dNde(self, energy):
arg = pyLikelihood.dArg(energy)
return self.ptsrc.spectrum()(arg)
def CountsPlot(self, Parameter):
"""@todo: document me"""
imName = "tmp.fits"
filebase = Parameter.PlotName
total = np.array([])
obs = np.array([])
obs_err = np.array([])
emax = np.array([])
emin = np.array([])
src = np.array([])
# Summed Likelihood has no writeCountsSpectra
# but we can do it component by component
for comp in self.Fit.components:
#self.Fit.writeCountsSpectra(imName)
try:
comp.writeCountsSpectra(imName)
image = fits.open(imName)
#loop on the source names to find the good one
j = 0
for ID in image[1].data.names:
if ID == Parameter.srcname:
indice = j
j += 1
for jn in range(len(image[3].data.field(0))):
energymin = image[3].data.field(1)[jn]
energymax = image[3].data.field(0)[jn]
if energymax in emax and energymin in emin:
k = np.where(energymax==emax)
obs[k] = obs[k] + image[1].data.field(0)[jn]
obs_err[k] = np.sqrt(obs[k])
src[k] = src[k] + image[1].data.field(indice)[jn]
for i in range(len(image[1].data.names) - 1):
total[k] = total[k] + image[1].data.field(i + 1)[jn]
else:
emax = np.append(emax, energymax)
emin = np.append(emin, energymin)
obs = np.append(obs,image[1].data.field(0)[jn])
obs_err = np.append(obs_err,\
np.sqrt(image[1].data.field(0)[jn]))
src = np.append(src, image[1].data.field(indice)[jn])
total = np.append(total,0)
for i in range(len(image[1].data.names) - 1):
total[-1] = total[-1] + image[1].data.field(i + 1)[jn]
except RuntimeError as e:
print("Exception RuntimeError ocurred: ")
print((type(e)))
print((e.args))
print(e)
break
except IndexError:
print("Exception IndexError ocurred (component unavailable): ")
print((type(e)))
print((e.args))
print(e)
continue
# Sort by energy
energy_order = np.argsort(emin)
src = src[energy_order]
obs = obs[energy_order]
obs_err = obs_err[energy_order]
total = total[energy_order]
emin = emin[energy_order]
emax = emax[energy_order]
other = np.array(total - src)
Nbin = len(src)
E = np.array((emax + emin) / 2.)
err_E = np.array((emax - emin) / 2.)
total = np.array(total)
residual = np.zeros(Nbin)
Dres = np.zeros(Nbin)
plt.figure()
plt.loglog()
plt.title('Counts plot')
plt.xlabel("E (MeV) ")
plt.ylabel("Counts / bin")
plt.errorbar(E,obs,xerr=err_E,yerr=obs_err,fmt='o',color="red",ls='None',label="Data")
plt.plot(E,src,ls='dashed',color="blue",label=Parameter.srcname.replace("_"," "))
plt.plot(E,other,ls='solid',color="green",label="Other Sources")
plt.plot(E,total,lw=1.5,ls='solid',label="All Sources")
plt.legend()
plt.tight_layout()
plt.savefig(filebase + "_CountsPlot.png", dpi=150, facecolor='w', edgecolor='w',
orientation='portrait', papertype=None, format=None,
transparent=False, bbox_inches=None, pad_inches=0.1,
frameon=None)
plt.figure()
plt.title('Residuals plot')
plt.semilogx()
for i in range(Nbin):
try:
residual[i] = (obs[i] - total[i]) / total[i]
Dres[i] = (obs_err[i] / total[i])
except:
residual[i] = 0.
Dres[i] = 0.
if residual[i] == -1.:
residual[i] = 0.
ymin = min(residual) - max(Dres)
ymax = max(residual) + max(Dres)
plt.ylim(ymax = ymax, ymin = ymin)
plt.xlim(xmin = min(E)*0.3, xmax = max(E)*2)
plt.xlabel("E (MeV) ")
plt.ylabel("(counts-model)/model")
plt.errorbar(E,residual,xerr=err_E,yerr=Dres,fmt='o',color="red",ls='None',label="Data")
zero = np.zeros(2)
Ezero = np.array([1e-5, 1e10])
plt.plot(Ezero,zero,lw=1.5,ls='solid',color='black')
plt.tight_layout()
plt.savefig(filebase + "ResPlot.png", dpi=150, facecolor='w',
edgecolor='w', orientation='portrait', format=None,
transparent=False, bbox_inches=None, pad_inches=0.1)
os.system("rm " + imName)
image.close()
# def PlotFoldedLC(Time, TimeErr, Flux, FluxErr, tag="Flux (photon cm^{-2} s^{-1})"):
# _, tgraph, arrows = PlotLC(Time, TimeErr, Flux, FluxErr, tag)
# xmin = 0
# xmax = 1
# if max(FluxErr)==0:
# ymin = 0.
# ymax = max(Flux)*1.3
# else:
# ymin = np.min(min(Flux) - max(FluxErr) * 1.3, 0.)
# ymax = (max(Flux) + max(FluxErr)) * 1.3
# gh = ROOT.TH2F("ghflux", "", 80, xmin, xmax, 100, ymin, ymax)
# gh.SetStats(000)
# gh.SetXTitle("Orbital Phase")
# gh.SetYTitle(tag)
# return gh, tgraph, arrows
def GetDataPoints(config,pars,ignore_missing_bins=False):
"""Collect the data points/UL and generate a TGraph for the points
and a list of TArrow for the UL. All is SED format"""
#Preparation + declaration of arrays
arrows = []
NEbin = int(config['Ebin']['NumEnergyBins'])
lEmax = np.log10(float(config['energy']['emax']))
lEmin = np.log10(float(config['energy']['emin']))
Epoint = np.zeros(NEbin)
EpointErrp = np.zeros(NEbin)
EpointErrm = np.zeros(NEbin)
Fluxpoint = np.zeros(NEbin)
FluxpointErrp = np.zeros(NEbin)
FluxpointErrm = np.zeros(NEbin)
uplim = np.zeros(NEbin,dtype=int)
ener = np.logspace(lEmin, lEmax, NEbin + 1)
mes = Loggin.Message()
mes.info("Save Ebin results in ",pars.PlotName+".Ebin.dat")
dumpfile = open(pars.PlotName+".Ebin.dat",'w')
dumpfile.write("# Energy (MeV)\tEmin (MeV)\tEmax (MeV)\tE**2. dN/dE (erg.cm-2s-1)\tGaussianError\tMinosNegativeError\tMinosPositiveError\n")
from enrico.constants import EbinPath
for i in range(NEbin):#Loop over the energy bins
#E = int(pow(10, (np.log10(ener[i + 1]) + np.log10(ener[i])) / 2))
filename = (config['out'] + '/'+EbinPath+str(NEbin)+'/' + config['target']['name'] +
"_" + str(i) + ".conf")
try:#read the config file of each data points
CurConf = get_config(filename)
mes.info("Reading "+filename)
results = utils.ReadResult(CurConf)
except:
if not ignore_missing_bins:
mes.warning("cannot read the Results of energy bin "+ str(i))
continue
#fill the energy arrays
#Epoint[i] = results.get("Scale")
#if Epoint[i] in [results.get("Emin"),results.get("Emax")]:
#### <---- is this a mistake?? does not make much sense to me
Epoint[i] = 10**((np.log10(results.get("Emin"))+np.log10(results.get("Emax")))/2.)
#Epoint[i] = int(pow(10, (np.log10(ener[i + 1]) + np.log10(ener[i])) / 2))
Epoint[i] = 10**((np.log10(results.get("Emin"))+np.log10(results.get("Emax")))/2.)
EpointErrm[i] = Epoint[i] - results.get("Emin")
EpointErrp[i] = results.get("Emax") - Epoint[i]
dprefactor = 0
#Compute the flux or the UL (in SED format)
if 'Ulvalue' in results:
PrefUl = utils.Prefactor(results.get("Ulvalue"),results.get("Index"),
results.get("Emin"),results.get("Emax"),Epoint[i])
Fluxpoint[i] = MEV_TO_ERG * PrefUl * Epoint[i] ** 2
uplim[i] = 1
else : #Not an UL : compute points + errors
Fluxpoint[i] = MEV_TO_ERG * results.get("Prefactor") * Epoint[i] ** 2
dprefactor = results.get("dPrefactor")
try:
down = abs(results.get("dPrefactor-"))
up = results.get("dPrefactor+")
if down==0 or up ==0 :
mes.error("cannot get Error value")
FluxpointErrp[i] = MEV_TO_ERG * up * Epoint[i] ** 2
FluxpointErrm[i] = MEV_TO_ERG * down * Epoint[i] ** 2
except:
try:
err = MEV_TO_ERG * dprefactor * Epoint[i] ** 2
FluxpointErrp[i] = err
FluxpointErrm[i] = err
except:
pass
mes.info("Energy bins results")
print(("Energy = ",Epoint[i]))
#Save the data point in a ascii file
if 'Ulvalue' in results:
dumpfile.write(str(Epoint[i])+"\t"+str(results.get("Emin"))+"\t"+str( results.get("Emax"))+"\t"+str(Fluxpoint[i])+"\t0\t0\t0\n")
print(("E**2. dN/dE = ",Fluxpoint[i]))
else:
dumpfile.write(str(Epoint[i])+"\t"+str(results.get("Emin"))+"\t"+str( results.get("Emax"))+"\t"+str(Fluxpoint[i])+"\t"+str( MEV_TO_ERG * dprefactor * Epoint[i] ** 2)+"\t"+str(FluxpointErrm[i])+"\t"+str(FluxpointErrp[i])+"\n")
print(("E**2. dN/dE = ",Fluxpoint[i]," + ",FluxpointErrp[i]," - ",FluxpointErrm[i]))
dumpfile.close()
return Epoint, Fluxpoint, EpointErrm, EpointErrp, FluxpointErrm, FluxpointErrp, uplim
def plot_errorbar_withuls(x,xerrm,xerrp,y,yerrm,yerrp,uplim,bblocks=False):
""" plot an errorbar plot with upper limits. Optionally compute and draw bayesian blocks (bblocks) """
# plt.errorbar(Epoint, Fluxpoint, xerr=[EpointErrm, EpointErrp], yerr=[FluxpointErrm, FluxpointErrp],fmt='o',color='black',ls='None',uplims=uplim)
uplim = np.asarray(uplim,dtype=bool) # It is an array of 1 and 0s, needs to be a bool array.
# make sure that the arrays are numpy arrays and not lists.
x = np.asarray(x)
xerrm = np.asarray(xerrm)
xerrp = np.asarray(xerrp)
y = np.asarray(y)
yerrm = np.asarray(yerrm)
yerrp = np.asarray(yerrp)
# Get the strict upper limit (best fit value + error, then set the error to 0 and the lower error to 20% of the value)
y[uplim] += yerrp[uplim]
yerrm[uplim] = 0
yerrp[uplim] = 0
optimal_markersize = (0.5+4./(1.+np.log10(len(y))))
optimal_errorlinewidth = (0.2+2./(1.+4.*np.log10(len(y))))
# Plot the significant points
plt.errorbar(x[~uplim], y[~uplim],
xerr=[xerrm[~uplim], xerrp[~uplim]],
yerr=[yerrm[~uplim], yerrp[~uplim]],
lw=optimal_errorlinewidth,
fmt='o',ms=optimal_markersize,capsize=0,zorder=10,
color='black',ls='None',uplims=False,label='LAT data')
# Plot the upper limits. For some reason, matplotlib draws the arrows inverted for uplim and lolim [?]
# This is a known issue fixed in matplotlib 1.4: https://github.com/matplotlib/matplotlib/pull/2452
if LooseVersion(matplotlib.__version__) < LooseVersion("1.4.0"):
plt.errorbar(x[uplim], y[uplim],
xerr=[xerrm[uplim], xerrp[uplim]],
yerr=[yerrm[uplim], yerrp[uplim]],
fmt='o',markersize=0,capsize=0,zorder=-1,
lw=optimal_errorlinewidth,
color='0.50',ls='None',lolims=False)
plt.errorbar(x[uplim], 0.8*y[uplim],
yerr=[0.2*y[uplim], 0.2*y[uplim]],
fmt='o',markersize=0,capsize=optimal_markersize/1.5,zorder=-1,
lw=optimal_errorlinewidth,
color='0.50',ls='None',lolims=True)
else:
plt.errorbar(x[uplim], y[uplim],
xerr=[xerrm[uplim], xerrp[uplim]],
yerr=[yerrm[uplim], yerrp[uplim]],
lw=optimal_errorlinewidth,
fmt='o',markersize=0,capsize=0,zorder=-1,
color='0.50',ls='None',uplims=False)
plt.errorbar(x[uplim], y[uplim],
yerr=[0.2*y[uplim], 0.2*y[uplim]],
lw=optimal_errorlinewidth,
fmt='o',markersize=0,capsize=optimal_markersize/1.5,zorder=-1,
color='0.50',ls='None',uplims=True)
if bblocks and len(x[~uplim])>2:
yerr = 0.5*(yerrm+yerrp)
# Set the value and error for the uls.
yerr[uplim] = y[uplim] #min(y[yerr>0]+yerr[yerr>0])
y[uplim] = 0
edges = bayesian_blocks(x,y,yerr,fitness='measures',p0=0.5)
#edges = bayesian_blocks(x[yerr>0],y[yerr>0],yerr[yerr>0],fitness='measures',p0=0.1)
xvalues = 0.5*(edges[:-1]+edges[1:])
xerrors = 0.5*(edges[1:]-edges[:-1])
yvalues = []
yerrors = []
for k in range(len(edges)-1):
xmin,xmax = edges[k],edges[k+1]
filt = (x>=xmin)*(x<=xmax)*(yerr>0)
sum_inv_square = np.sum(1./yerr[filt]**2)
yvalues.append(np.sum(y[filt]/yerr[filt]**2)/sum_inv_square)
yerrors.append(1./np.sqrt(sum_inv_square))
yvalues = np.asarray(yvalues)
yerrors = np.asarray(yerrors)
# Plot the significant points
ystep = []
ystepmin = []
ystepmax = []
xstep = []
for k in range(len(xvalues)):
for _ in range(2):
ystep.append(yvalues[k]) # 3 values, to mark the minimum and center
ystepmin.append(yvalues[k]-yerrors[k]) # 3 values, to mark the minimum and center
ystepmax.append(yvalues[k]+yerrors[k]) # 3 values, to mark the minimum and center
xstep.append(xvalues[k]-xerrors[k])
xstep.append(xvalues[k]+xerrors[k])
plt.step(xstep, ystep,
color='#d62728',zorder=-10,
ls='solid')
plt.fill_between(xstep, ystepmin, ystepmax,
color='#d62728',zorder=-10, alpha=0.5)
plt.errorbar(xvalues, yvalues,
xerr=xerrors,yerr=yerrors,
marker=None,ms=0,capsize=0,color='#d62728',zorder=-10,
ls='None',label='bayesian blocks')
plt.legend(loc=0,fontsize='small',numpoints=1)
def plot_bayesianblocks(xmin, xmax, y, yerrm, yerrp, uplim):
# Set the value and error for the uls.
yerrm[uplim] = y[uplim]
yerrp[uplim] = y[uplim]
y[uplim] = 0.
xvalues = 0.5*(xmax+xmin)
xerrors = 0.5*(xmax-xmin)
# Plot the significant points
ystep = []
ystepmin = []
ystepmax = []
xstep = []
for k in range(len(xvalues)):
for _ in range(2):
ystep.append(y[k]) # 3 values, to mark the minimum and center
ystepmin.append(y[k]-yerrm[k]) # 3 values, to mark the minimum and center
ystepmax.append(y[k]+yerrp[k]) # 3 values, to mark the minimum and center
xstep.append(xmin[k])
xstep.append(xmax[k])
plt.step(xstep, ystep,
color='#d62728',zorder=-10,
ls='solid')
plt.fill_between(xstep, ystepmin, ystepmax,
color='#d62728',zorder=-10, alpha=0.5)
plt.errorbar(xvalues, y,
xerr=xerrors,yerr=[yerrm, yerrp],
marker=None,ms=0,capsize=0,color='#d62728',zorder=-10,
ls='None')
def PlotSED(config,pars,ignore_missing_bins=False):
"""plot a nice SED with a butterfly and points"""
# Read the ascii file where the butterfly is stored
filebase = utils._SpecFileName(config)
lines = open(filebase + '.dat', 'r').readlines()
SED = []
E = []
Err = []
for i in range(len(lines) - 1):
words = lines[i + 1].split()
if float(words[0])<pars.Emax :
E.append(float(words[0]))
SED.append(float(words[1]))
Err.append(float(words[2]))
ilen = len(SED)
#From dN/dE to SED
Fluxp = np.array(SED)*np.exp(np.array(Err)/np.array(SED))
Fluxm = np.array(SED)*np.exp(-np.array(Err)/np.array(SED))
ErrorFlux = np.zeros(2 * ilen + 1)
ErrorE = np.zeros(2 * ilen + 1)
#Compute the butterfly and close it
for i in range(ilen):
ErrorFlux[i] = Fluxp[i]
ErrorE[i] = E[i]
for i in range(ilen):
ErrorFlux[ilen + i] = Fluxm[ilen - i - 1]
ErrorE[ilen + i] = E[ilen - i - 1]
ErrorFlux[-1] = Fluxp[0]
ErrorE[-1] = E[0]
#Actually make the plot
plt.figure()
plt.title(pars.PlotName.split("/")[-1])#.replace('_','\_'))
name = pars.PlotName.split("/")[-1]
plt.loglog()
plt.xlabel(r"Energy (MeV)")
plt.ylabel(r"$\mathrm{E^2\ dN/dE}\ \mathrm{(erg\ cm^{-2} s^{-1})}$")
plt.plot(E,SED,"-r",label='LAT model')
plt.plot(ErrorE,ErrorFlux,"-r")
#Plot points
NEbin = int(config['Ebin']['NumEnergyBins'])
if NEbin > 0:
Epoint, Fluxpoint, EpointErrm, EpointErrp, FluxpointErrm, FluxpointErrp, uplim = GetDataPoints(config,pars,ignore_missing_bins) #collect data points
plot_errorbar_withuls(Epoint,EpointErrm,EpointErrp,Fluxpoint,FluxpointErrm,FluxpointErrp,uplim)
#print uplim
#print FluxpointErrm
#print FluxpointErrp
#Set meaningful axes limits
xlim = plt.xlim()
ylim = plt.ylim()
xlim = (max([20,xlim[0]]),min([2e6,xlim[1]]))
ylim = (max([1e-14,ylim[0]]),min([1e-8,ylim[1]]))
plt.xlim(xlim)
plt.ylim(ylim)
# turn them into log10 scale
#xticks = plt.xticks()[0]
#xticklabels = np.array(np.log10(xticks),dtype=int)
#plt.xticks(xticks,xticklabels)
#plt.xlabel('$\mathrm{\log_{10}\mathbf{(Energy)} \\ \\ [MeV]}$')
plt.legend(fontsize='small',ncol=1,\
loc=3,numpoints=1)#,framealpha=0.75)
#Upper horizontal secondary axis with frequency
#Plt2 = plt.twiny()
#Plt2.set_xscale('log')
#Plt2.set_xlim(2.417990504024163e+20 *np.array(xlim))
#Plt2.set_xticklabels(np.array(np.log10(Plt2.get_xticks()),dtype=int))
#Plt2.set_xlabel('$\mathrm{\log_{10}\mathbf{(Frequency)} \\ \\ [Hz]}$')
#save the canvas
#plt.grid()
plt.tight_layout()
plt.savefig("%s.png" %filebase, dpi=150, facecolor='w', edgecolor='w',
orientation='portrait', papertype=None, format=None,
transparent=False, bbox_inch=None, pad_inches=0.1,
frameon=None)
def PlotUL(pars,config,ULFlux,Index):
#Compute the SED
E = np.logspace(np.log10(pars.Emin), np.log10(pars.Emax), pars.N)
SED = MEV_TO_ERG * E ** 2 * (-Index+1)*ULFlux* np.power(E,-Index)/(np.power(pars.Emax,-Index+1)-np.power(pars.Emin,-Index+1))
#Actually make the plot
plt.xlabel(r"E [MeV]")
plt.ylabel(r"$\mathrm{E^2\ dN/dE}\ \mathrm{(erg\ cm^{-2} s^{-1})}$")
plt.loglog()
plt.plot(E,SED,"-",color='black')
# Plot the upper limits. For some reason, matplotlib draws the arrows inverted for uplim and lolim [?]
# This is a known issue fixed in matplotlib 1.4: https://github.com/matplotlib/matplotlib/pull/2452
if LooseVersion(matplotlib.__version__) < LooseVersion("1.4.0"):
plt.errorbar([E[0],E[-1]], [SED[0],SED[-1]], yerr=[SED[0]*0.8,SED[-1]*0.8],fmt='.',color='black',ls='None',lolims=[1,1])
else:
plt.errorbar([E[0],E[-1]], [SED[0],SED[-1]], yerr=[SED[0]*0.8,SED[-1]*0.8],fmt='.',color='black',ls='None',uplims=[1,1])
#save the plot
filebase = utils._SpecFileName(config)
plt.tight_layout()
plt.savefig(filebase + '.png', dpi=150, facecolor='w', edgecolor='w',
orientation='portrait', papertype=None, format=None,
transparent=False, bbox_inches=None, pad_inches=0.1,
frameon=None)
def plot_sed_fromconfig(config,ignore_missing_bins=False):
config = get_config(config)
utils.mkdir_p(config["out"]+"/Spectrum")
srcname = config['target']['name']
Emin = config['energy']['emin']
Emax = config['energy']['emax']
filename = utils._SpecFileName(config)
Param = Params(srcname, Emin=Emin, Emax=Emax, PlotName=filename)
Result = utils.ReadResult(config)
# if the TS > ts limit plot the butterfly, if not draw UL
if Result["TS"]> config['UpperLimit']['TSlimit'] :
PlotSED(config,Param,ignore_missing_bins)
else :
try :
PlotUL(Param,config,Result['Ulvalue'],config['UpperLimit']['SpectralIndex'])
except :
print("Not able to plot an upper limit in a SED diagram. UL computed?")
|
|
#!/usr/bin/env python2.7
"""Check CFC - Check Compile Flow Consistency
This is a compiler wrapper for testing that code generation is consistent with
different compilation processes. It checks that code is not unduly affected by
compiler options or other changes which should not have side effects.
To use:
-Ensure that the compiler under test (i.e. clang, clang++) is on the PATH
-On Linux copy this script to the name of the compiler
e.g. cp check_cfc.py clang && cp check_cfc.py clang++
-On Windows use setup.py to generate check_cfc.exe and copy that to clang.exe
and clang++.exe
-Enable the desired checks in check_cfc.cfg (in the same directory as the
wrapper)
e.g.
[Checks]
dash_g_no_change = true
dash_s_no_change = false
-The wrapper can be run using its absolute path or added to PATH before the
compiler under test
e.g. export PATH=<path to check_cfc>:$PATH
-Compile as normal. The wrapper intercepts normal -c compiles and will return
non-zero if the check fails.
e.g.
$ clang -c test.cpp
Code difference detected with -g
--- /tmp/tmp5nv893.o
+++ /tmp/tmp6Vwjnc.o
@@ -1 +1 @@
- 0: 48 8b 05 51 0b 20 00 mov 0x200b51(%rip),%rax
+ 0: 48 39 3d 51 0b 20 00 cmp %rdi,0x200b51(%rip)
-To run LNT with Check CFC specify the absolute path to the wrapper to the --cc
and --cxx options
e.g.
lnt runtest nt --cc <path to check_cfc>/clang \\
--cxx <path to check_cfc>/clang++ ...
To add a new check:
-Create a new subclass of WrapperCheck
-Implement the perform_check() method. This should perform the alternate compile
and do the comparison.
-Add the new check to check_cfc.cfg. The check has the same name as the
subclass.
"""
from __future__ import print_function
import imp
import os
import platform
import shutil
import subprocess
import sys
import tempfile
import ConfigParser
import io
import obj_diff
def is_windows():
"""Returns True if running on Windows."""
return platform.system() == 'Windows'
class WrapperStepException(Exception):
"""Exception type to be used when a step other than the original compile
fails."""
def __init__(self, msg, stdout, stderr):
self.msg = msg
self.stdout = stdout
self.stderr = stderr
class WrapperCheckException(Exception):
"""Exception type to be used when a comparison check fails."""
def __init__(self, msg):
self.msg = msg
def main_is_frozen():
"""Returns True when running as a py2exe executable."""
return (hasattr(sys, "frozen") or # new py2exe
hasattr(sys, "importers") or # old py2exe
imp.is_frozen("__main__")) # tools/freeze
def get_main_dir():
"""Get the directory that the script or executable is located in."""
if main_is_frozen():
return os.path.dirname(sys.executable)
return os.path.dirname(sys.argv[0])
def remove_dir_from_path(path_var, directory):
"""Remove the specified directory from path_var, a string representing
PATH"""
pathlist = path_var.split(os.pathsep)
norm_directory = os.path.normpath(os.path.normcase(directory))
pathlist = filter(lambda x: os.path.normpath(
os.path.normcase(x)) != norm_directory, pathlist)
return os.pathsep.join(pathlist)
def path_without_wrapper():
"""Returns the PATH variable modified to remove the path to this program."""
scriptdir = get_main_dir()
path = os.environ['PATH']
return remove_dir_from_path(path, scriptdir)
def flip_dash_g(args):
"""Search for -g in args. If it exists then return args without. If not then
add it."""
if '-g' in args:
# Return args without any -g
return [x for x in args if x != '-g']
else:
# No -g, add one
return args + ['-g']
def derive_output_file(args):
"""Derive output file from the input file (if just one) or None
otherwise."""
infile = get_input_file(args)
if infile is None:
return None
else:
return '{}.o'.format(os.path.splitext(infile)[0])
def get_output_file(args):
"""Return the output file specified by this command or None if not
specified."""
grabnext = False
for arg in args:
if grabnext:
return arg
if arg == '-o':
# Specified as a separate arg
grabnext = True
elif arg.startswith('-o'):
# Specified conjoined with -o
return arg[2:]
assert grabnext == False
return None
def is_output_specified(args):
"""Return true is output file is specified in args."""
return get_output_file(args) is not None
def replace_output_file(args, new_name):
"""Replaces the specified name of an output file with the specified name.
Assumes that the output file name is specified in the command line args."""
replaceidx = None
attached = False
for idx, val in enumerate(args):
if val == '-o':
replaceidx = idx + 1
attached = False
elif val.startswith('-o'):
replaceidx = idx
attached = True
if replaceidx is None:
raise Exception
replacement = new_name
if attached == True:
replacement = '-o' + new_name
args[replaceidx] = replacement
return args
def add_output_file(args, output_file):
"""Append an output file to args, presuming not already specified."""
return args + ['-o', output_file]
def set_output_file(args, output_file):
"""Set the output file within the arguments. Appends or replaces as
appropriate."""
if is_output_specified(args):
args = replace_output_file(args, output_file)
else:
args = add_output_file(args, output_file)
return args
gSrcFileSuffixes = ('.c', '.cpp', '.cxx', '.c++', '.cp', '.cc')
def get_input_file(args):
"""Return the input file string if it can be found (and there is only
one)."""
inputFiles = list()
for arg in args:
testarg = arg
quotes = ('"', "'")
while testarg.endswith(quotes):
testarg = testarg[:-1]
testarg = os.path.normcase(testarg)
# Test if it is a source file
if testarg.endswith(gSrcFileSuffixes):
inputFiles.append(arg)
if len(inputFiles) == 1:
return inputFiles[0]
else:
return None
def set_input_file(args, input_file):
"""Replaces the input file with that specified."""
infile = get_input_file(args)
if infile:
infile_idx = args.index(infile)
args[infile_idx] = input_file
return args
else:
# Could not find input file
assert False
def is_normal_compile(args):
"""Check if this is a normal compile which will output an object file rather
than a preprocess or link. args is a list of command line arguments."""
compile_step = '-c' in args
# Bitcode cannot be disassembled in the same way
bitcode = '-flto' in args or '-emit-llvm' in args
# Version and help are queries of the compiler and override -c if specified
query = '--version' in args or '--help' in args
# Options to output dependency files for make
dependency = '-M' in args or '-MM' in args
# Check if the input is recognised as a source file (this may be too
# strong a restriction)
input_is_valid = bool(get_input_file(args))
return compile_step and not bitcode and not query and not dependency and input_is_valid
def run_step(command, my_env, error_on_failure):
"""Runs a step of the compilation. Reports failure as exception."""
# Need to use shell=True on Windows as Popen won't use PATH otherwise.
p = subprocess.Popen(command, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, env=my_env, shell=is_windows())
(stdout, stderr) = p.communicate()
if p.returncode != 0:
raise WrapperStepException(error_on_failure, stdout, stderr)
def get_temp_file_name(suffix):
"""Get a temporary file name with a particular suffix. Let the caller be
responsible for deleting it."""
tf = tempfile.NamedTemporaryFile(suffix=suffix, delete=False)
tf.close()
return tf.name
class WrapperCheck(object):
"""Base class for a check. Subclass this to add a check."""
def __init__(self, output_file_a):
"""Record the base output file that will be compared against."""
self._output_file_a = output_file_a
def perform_check(self, arguments, my_env):
"""Override this to perform the modified compilation and required
checks."""
raise NotImplementedError("Please Implement this method")
class dash_g_no_change(WrapperCheck):
def perform_check(self, arguments, my_env):
"""Check if different code is generated with/without the -g flag."""
output_file_b = get_temp_file_name('.o')
alternate_command = list(arguments)
alternate_command = flip_dash_g(alternate_command)
alternate_command = set_output_file(alternate_command, output_file_b)
run_step(alternate_command, my_env, "Error compiling with -g")
# Compare disassembly (returns first diff if differs)
difference = obj_diff.compare_object_files(self._output_file_a,
output_file_b)
if difference:
raise WrapperCheckException(
"Code difference detected with -g\n{}".format(difference))
# Clean up temp file if comparison okay
os.remove(output_file_b)
class dash_s_no_change(WrapperCheck):
def perform_check(self, arguments, my_env):
"""Check if compiling to asm then assembling in separate steps results
in different code than compiling to object directly."""
output_file_b = get_temp_file_name('.o')
alternate_command = arguments + ['-via-file-asm']
alternate_command = set_output_file(alternate_command, output_file_b)
run_step(alternate_command, my_env,
"Error compiling with -via-file-asm")
# Compare if object files are exactly the same
exactly_equal = obj_diff.compare_exact(self._output_file_a, output_file_b)
if not exactly_equal:
# Compare disassembly (returns first diff if differs)
difference = obj_diff.compare_object_files(self._output_file_a,
output_file_b)
if difference:
raise WrapperCheckException(
"Code difference detected with -S\n{}".format(difference))
# Code is identical, compare debug info
dbgdifference = obj_diff.compare_debug_info(self._output_file_a,
output_file_b)
if dbgdifference:
raise WrapperCheckException(
"Debug info difference detected with -S\n{}".format(dbgdifference))
raise WrapperCheckException("Object files not identical with -S\n")
# Clean up temp file if comparison okay
os.remove(output_file_b)
if __name__ == '__main__':
# Create configuration defaults from list of checks
default_config = """
[Checks]
"""
# Find all subclasses of WrapperCheck
checks = [cls.__name__ for cls in vars()['WrapperCheck'].__subclasses__()]
for c in checks:
default_config += "{} = false\n".format(c)
config = ConfigParser.RawConfigParser()
config.readfp(io.BytesIO(default_config))
scriptdir = get_main_dir()
config_path = os.path.join(scriptdir, 'check_cfc.cfg')
try:
config.read(os.path.join(config_path))
except:
print("Could not read config from {}, "
"using defaults.".format(config_path))
my_env = os.environ.copy()
my_env['PATH'] = path_without_wrapper()
arguments_a = list(sys.argv)
# Prevent infinite loop if called with absolute path.
arguments_a[0] = os.path.basename(arguments_a[0])
# Sanity check
enabled_checks = [check_name
for check_name in checks
if config.getboolean('Checks', check_name)]
checks_comma_separated = ', '.join(enabled_checks)
print("Check CFC, checking: {}".format(checks_comma_separated))
# A - original compilation
output_file_orig = get_output_file(arguments_a)
if output_file_orig is None:
output_file_orig = derive_output_file(arguments_a)
p = subprocess.Popen(arguments_a, env=my_env, shell=is_windows())
p.communicate()
if p.returncode != 0:
sys.exit(p.returncode)
if not is_normal_compile(arguments_a) or output_file_orig is None:
# Bail out here if we can't apply checks in this case.
# Does not indicate an error.
# Maybe not straight compilation (e.g. -S or --version or -flto)
# or maybe > 1 input files.
sys.exit(0)
# Sometimes we generate files which have very long names which can't be
# read/disassembled. This will exit early if we can't find the file we
# expected to be output.
if not os.path.isfile(output_file_orig):
sys.exit(0)
# Copy output file to a temp file
temp_output_file_orig = get_temp_file_name('.o')
shutil.copyfile(output_file_orig, temp_output_file_orig)
# Run checks, if they are enabled in config and if they are appropriate for
# this command line.
current_module = sys.modules[__name__]
for check_name in checks:
if config.getboolean('Checks', check_name):
class_ = getattr(current_module, check_name)
checker = class_(temp_output_file_orig)
try:
checker.perform_check(arguments_a, my_env)
except WrapperCheckException as e:
# Check failure
print("{} {}".format(get_input_file(arguments_a), e.msg), file=sys.stderr)
# Remove file to comply with build system expectations (no
# output file if failed)
os.remove(output_file_orig)
sys.exit(1)
except WrapperStepException as e:
# Compile step failure
print(e.msg, file=sys.stderr)
print("*** stdout ***", file=sys.stderr)
print(e.stdout, file=sys.stderr)
print("*** stderr ***", file=sys.stderr)
print(e.stderr, file=sys.stderr)
# Remove file to comply with build system expectations (no
# output file if failed)
os.remove(output_file_orig)
sys.exit(1)
|
|
##########################################################################
#
# Copyright (c) 2011-2012, John Haddon. All rights reserved.
# Copyright (c) 2011-2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
from __future__ import with_statement
import IECore
import Gaffer
import GafferUI
from GLWidget import GLWidget
from _GafferUI import ButtonEvent, ModifiableEvent, ContainerGadget, DragDropEvent, KeyEvent
# import lazily to improve startup of apps which don't use GL functionality
GL = Gaffer.lazyImport( "OpenGL.GL" )
IECoreGL = Gaffer.lazyImport( "IECoreGL" )
QtCore = GafferUI._qtImport( "QtCore" )
QtGui = GafferUI._qtImport( "QtGui" )
## The GadgetWidget class provides a means of
# hosting a Gadget within a Widget based interface.
class GadgetWidget( GafferUI.GLWidget ) :
## The gadget may either be a ViewportGadget in which case it will be used in a call
# to setViewportGadget, otherwise a suitable viewport will be created and the gadget will
# be placed within it.
def __init__( self, gadget=None, bufferOptions=set(), **kw ) :
GLWidget.__init__( self, bufferOptions, **kw )
self._qtWidget().setFocusPolicy( QtCore.Qt.ClickFocus )
# Force the IECoreGL lazy loading to kick in /now/. Otherwise we can get IECoreGL objects
# being returned from the GafferUIBindings without the appropriate boost::python converters
# having been registered first.
IECoreGL.Renderer
self.__requestedDepthBuffer = self.BufferOptions.Depth in bufferOptions
self.__enterConnection = self.enterSignal().connect( Gaffer.WeakMethod( self.__enter ) )
self.__leaveConnection = self.leaveSignal().connect( Gaffer.WeakMethod( self.__leave ) )
self.__keyPressConnection = self.keyPressSignal().connect( Gaffer.WeakMethod( self.__keyPress ) )
self.__keyReleaseConnection = self.keyReleaseSignal().connect( Gaffer.WeakMethod( self.__keyRelease ) )
self.__buttonPressConnection = self.buttonPressSignal().connect( Gaffer.WeakMethod( self.__buttonPress ) )
self.__buttonReleaseConnection = self.buttonReleaseSignal().connect( Gaffer.WeakMethod( self.__buttonRelease ) )
self.__buttonDoubleClickConnection = self.buttonDoubleClickSignal().connect( Gaffer.WeakMethod( self.__buttonDoubleClick ) )
self.__mouseMoveConnection = self.mouseMoveSignal().connect( Gaffer.WeakMethod( self.__mouseMove ) )
self.__dragBeginConnection = self.dragBeginSignal().connect( Gaffer.WeakMethod( self.__dragBegin ) )
self.__dragEnterConnection = self.dragEnterSignal().connect( Gaffer.WeakMethod( self.__dragEnter ) )
self.__dragMoveConnection = self.dragMoveSignal().connect( Gaffer.WeakMethod( self.__dragMove ) )
self.__dragLeaveConnection = self.dragLeaveSignal().connect( Gaffer.WeakMethod( self.__dragLeave ) )
self.__dropConnection = self.dropSignal().connect( Gaffer.WeakMethod( self.__drop ) )
self.__dragEndConnection = self.dragEndSignal().connect( Gaffer.WeakMethod( self.__dragEnd ) )
self.__wheelConnection = self.wheelSignal().connect( Gaffer.WeakMethod( self.__wheel ) )
self.__viewportGadget = None
if isinstance( gadget, GafferUI.ViewportGadget ) :
self.setViewportGadget( gadget )
else :
self.setViewportGadget( GafferUI.ViewportGadget( gadget ) )
self._qtWidget().installEventFilter( _eventFilter )
## Returns the ViewportGadget used to render this Widget. You can
# modify this freely to change the Gadgets being rendered.
def getViewportGadget( self ) :
return self.__viewportGadget
## Sets the ViewportGadget used to render this Widget.
def setViewportGadget( self, viewportGadget ) :
assert( isinstance( viewportGadget, GafferUI.ViewportGadget ) )
if viewportGadget.isSame( self.__viewportGadget ) :
return
self.__viewportGadget = viewportGadget
self.__renderRequestConnection = self.__viewportGadget.renderRequestSignal().connect( Gaffer.WeakMethod( self.__renderRequest ) )
size = self.size()
if size.x and size.y :
self.__viewportGadget.setViewport( size )
self._redraw()
def _resize( self, size ) :
GafferUI.GLWidget._resize( self, size )
if size.x and size.y :
# avoid resizing if resolution has hit 0, as then
# the reframing maths breaks down
self.__viewportGadget.setViewport( size )
def _draw( self ) :
self.__viewportGadget.preRenderSignal()( self.__viewportGadget )
self.__viewportGadget.render()
def __enter( self, widget ) :
if not isinstance( QtGui.QApplication.focusWidget(), ( QtGui.QLineEdit, QtGui.QPlainTextEdit ) ) :
self._qtWidget().setFocus()
def __leave( self, widget ) :
self._qtWidget().clearFocus()
def __renderRequest( self, gadget ) :
self._redraw()
def __buttonPress( self, widget, event ) :
# we get given button presses before they're given to the overlay items,
# so we must ignore them so they can be used by the overlay.
if self._qtWidget().itemAt( event.line.p0.x, event.line.p0.y ) is not None :
return False
# but if we're outside the overlay item then we should take the
# keyboard focus back from the overlay.
focusItem = self._qtWidget().scene().focusItem()
if focusItem is not None :
self._qtWidget().scene().clearFocus()
if focusItem.widget().focusWidget() is not None :
focusItem.widget().focusWidget().clearFocus()
if not self._makeCurrent() :
return False
return self.__viewportGadget.buttonPressSignal()( self.__viewportGadget, event )
def __buttonRelease( self, widget, event ) :
if not self._makeCurrent() :
return False
return self.__viewportGadget.buttonReleaseSignal()( self.__viewportGadget, event )
def __buttonDoubleClick( self, widget, event ) :
if not self._makeCurrent() :
return False
return self.__viewportGadget.buttonDoubleClickSignal()( self.__viewportGadget, event )
def __mouseMove( self, widget, event ) :
if not self._makeCurrent() :
return False
self.__viewportGadget.mouseMoveSignal()( self.__viewportGadget, event )
# we always return false so that any overlay items will get appropriate
# move/enter/leave events, otherwise highlighting for buttons etc can go
# awry.
return False
def __dragBegin( self, widget, event ) :
if not self._makeCurrent() :
return False
return self.__viewportGadget.dragBeginSignal()( self.__viewportGadget, event )
def __dragEnter( self, widget, event ) :
if not self._makeCurrent() :
return False
return self.__viewportGadget.dragEnterSignal()( self.__viewportGadget, event )
def __dragMove( self, widget, event ) :
if not self._makeCurrent() :
return False
return self.__viewportGadget.dragMoveSignal()( self.__viewportGadget, event )
def __dragLeave( self, widget, event ) :
if not self._makeCurrent() :
return False
return self.__viewportGadget.dragLeaveSignal()( self.__viewportGadget, event )
def __drop( self, widget, event ) :
if not self._makeCurrent() :
return False
return self.__viewportGadget.dropSignal()( self.__viewportGadget, event )
def __dragEnd( self, widget, event ) :
if not self._makeCurrent() :
return False
return self.__viewportGadget.dragEndSignal()( self.__viewportGadget, event )
def __keyPress( self, widget, event ) :
# we get given keypresses before the graphicsview does, so we
# need to make sure we don't stop them going to a focussed overlay widget.
if self._qtWidget().scene().focusItem() is not None :
if self._qtWidget().scene().focusItem().widget().focusWidget() is not None :
return False
if not self._makeCurrent() :
return False
return self.__viewportGadget.keyPressSignal()( self.__viewportGadget, event )
def __keyRelease( self, widget, event ) :
if self._qtWidget().scene().focusItem() is not None :
if self._qtWidget().scene().focusItem().widget().focusWidget() is not None :
return False
if not self._makeCurrent() :
return False
return self.__viewportGadget.keyReleaseSignal()( self.__viewportGadget, event )
def __wheel( self, widget, event ) :
if not self._makeCurrent() :
return False
return self.__viewportGadget.wheelSignal()( self.__viewportGadget, event )
## Used to make the tooltips dependent on which gadget is under the mouse
class _EventFilter( QtCore.QObject ) :
def __init__( self ) :
QtCore.QObject.__init__( self )
def eventFilter( self, qObject, qEvent ) :
if qEvent.type()==QtCore.QEvent.ToolTip :
widget = GafferUI.Widget._owner( qObject )
assert( isinstance( widget, GadgetWidget ) )
if not widget._makeCurrent() :
return False
toolTip = widget.getViewportGadget().getToolTip(
IECore.LineSegment3f(
IECore.V3f( qEvent.x(), qEvent.y(), 1 ),
IECore.V3f( qEvent.x(), qEvent.y(), 0 )
)
)
QtGui.QToolTip.showText( qEvent.globalPos(), toolTip, qObject )
return True
return False
# this single instance is used by all widgets
_eventFilter = _EventFilter()
|
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tensorflow as tf
FLAGS = tf.app.flags.FLAGS
# Basic model parameters.
tf.app.flags.DEFINE_integer('batch_size', 128,
"""Number of images to process in a batch.""")
tf.app.flags.DEFINE_string('data_dir', '/tmp/cifar10_data',
"""Path to the CIFAR-10 data directory.""")
tf.app.flags.DEFINE_boolean('use_fp16', False,
"""Train the model using fp16.""")
# Process images of this size. Note that this differs from the original CIFAR
# image size of 32 x 32. If one alters this number, then the entire model
# architecture will change and any model would need to be retrained.
IMAGE_SIZE = 24
# Global constants describing the CIFAR-10 data set.
NUM_CLASSES = 10
NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN = 50000
NUM_EXAMPLES_PER_EPOCH_FOR_EVAL = 10000
def read_cifar10(filename_queue):
"""Reads and parses examples from CIFAR10 data files.
Recommendation: if you want N-way read parallelism, call this function
N times. This will give you N independent Readers reading different
files & positions within those files, which will give better mixing of
examples.
Args:
filename_queue: A queue of strings with the filenames to read from.
Returns:
An object representing a single example, with the following fields:
height: number of rows in the result (32)
width: number of columns in the result (32)
depth: number of color channels in the result (3)
key: a scalar string Tensor describing the filename & record number
for this example.
label: an int32 Tensor with the label in the range 0..9.
uint8image: a [height, width, depth] uint8 Tensor with the image data
"""
class CIFAR10Record(object):
pass
result = CIFAR10Record()
# Dimensions of the images in the CIFAR-10 dataset.
# See http://www.cs.toronto.edu/~kriz/cifar.html for a description of the
# input format.
label_bytes = 1 # 2 for CIFAR-100
result.height = 32
result.width = 32
result.depth = 3
image_bytes = result.height * result.width * result.depth
# Every record consists of a label followed by the image, with a
# fixed number of bytes for each.
record_bytes = label_bytes + image_bytes
# Read a record, getting filenames from the filename_queue. No
# header or footer in the CIFAR-10 format, so we leave header_bytes
# and footer_bytes at their default of 0.
reader = tf.FixedLengthRecordReader(record_bytes=record_bytes)
result.key, value = reader.read(filename_queue)
# Convert from a string to a vector of uint8 that is record_bytes long.
record_bytes = tf.decode_raw(value, tf.uint8)
# The first bytes represent the label, which we convert from uint8->int32.
result.label = tf.cast(
tf.slice(record_bytes, [0], [label_bytes]), tf.int32)
# The remaining bytes after the label represent the image, which we reshape
# from [depth * height * width] to [depth, height, width].
depth_major = tf.reshape(tf.slice(record_bytes, [label_bytes], [image_bytes]),
[result.depth, result.height, result.width])
# Convert from [depth, height, width] to [height, width, depth].
result.uint8image = tf.transpose(depth_major, [1, 2, 0])
return result
def _generate_image_and_label_batch(image, label, min_queue_examples,
batch_size, shuffle):
"""Construct a queued batch of images and labels.
Args:
image: 3-D Tensor of [height, width, 3] of type.float32.
label: 1-D Tensor of type.int32
min_queue_examples: int32, minimum number of samples to retain
in the queue that provides of batches of examples.
batch_size: Number of images per batch.
shuffle: boolean indicating whether to use a shuffling queue.
Returns:
images: Images. 4D tensor of [batch_size, height, width, 3] size.
labels: Labels. 1D tensor of [batch_size] size.
"""
# Create a queue that shuffles the examples, and then
# read 'batch_size' images + labels from the example queue.
num_preprocess_threads = 16
if shuffle:
images, label_batch = tf.train.shuffle_batch(
[image, label],
batch_size=batch_size,
num_threads=num_preprocess_threads,
capacity=min_queue_examples + 3 * batch_size,
min_after_dequeue=min_queue_examples)
else:
images, label_batch = tf.train.batch(
[image, label],
batch_size=batch_size,
num_threads=num_preprocess_threads,
capacity=min_queue_examples + 3 * batch_size)
# Display the training images in the visualizer.
tf.summary.image('images', images)
return images, tf.reshape(label_batch, [batch_size])
def inputs(eval_data, data_dir, batch_size):
"""Construct input for CIFAR evaluation using the Reader ops.
Args:
eval_data: bool, indicating if one should use the train or eval data set.
data_dir: Path to the CIFAR-10 data directory.
batch_size: Number of images per batch.
Returns:
images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
labels: Labels. 1D tensor of [batch_size] size.
"""
if not eval_data:
filenames = [os.path.join(data_dir, 'data_batch_%d.bin' % i)
for i in xrange(1, 6)]
num_examples_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN
else:
filenames = [os.path.join(data_dir, 'test_batch.bin')]
num_examples_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_EVAL
for f in filenames:
if not tf.gfile.Exists(f):
raise ValueError('Failed to find file: ' + f)
# Create a queue that produces the filenames to read.
filename_queue = tf.train.string_input_producer(filenames)
# Read examples from files in the filename queue.
read_input = read_cifar10(filename_queue)
return read_input.uint8image, read_input.label
def distorted_inputs():
"""Construct distorted input for CIFAR training using the Reader ops.
Returns:
images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
labels: Labels. 1D tensor of [batch_size] size.
Raises:
ValueError: If no data_dir
"""
if not FLAGS.data_dir:
raise ValueError('Please supply a data_dir')
data_dir = os.path.join(FLAGS.data_dir, 'cifar-10-batches-bin')
images, labels = distorted_inputs(data_dir=data_dir,
batch_size=FLAGS.batch_size)
if FLAGS.use_fp16:
images = tf.cast(images, tf.float16)
labels = tf.cast(labels, tf.float16)
return images, labels
def test_inputs(eval_data):
"""Construct input for CIFAR evaluation using the Reader ops.
Args:
eval_data: bool, indicating if one should use the train or eval data set.
Returns:
images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
labels: Labels. 1D tensor of [batch_size] size.
Raises:
ValueError: If no data_dir
"""
if not FLAGS.data_dir:
raise ValueError('Please supply a data_dir')
data_dir = os.path.join(FLAGS.data_dir, 'cifar-10-batches-bin')
images, labels = inputs(eval_data=eval_data,
data_dir=data_dir,
batch_size=FLAGS.batch_size)
if FLAGS.use_fp16:
images = tf.cast(images, tf.float16)
labels = tf.cast(labels, tf.float16)
return images, labels
images, labels = test_inputs("test")
with tf.Session() as sess:
sess.run([images, labels])
print(labels)
|
|
import sys
import codecs
import os.path
import warnings
from werkzeug.utils import cached_property
from browsepy.compat import range, PY_LEGACY # noqa
from browsepy.file import Node, File, Directory, \
underscore_replace, check_under_base
if PY_LEGACY:
import ConfigParser as configparser
else:
import configparser
ConfigParserBase = (
configparser.SafeConfigParser
if hasattr(configparser, 'SafeConfigParser') else
configparser.ConfigParser
)
class PLSFileParser(object):
'''
ConfigParser wrapper accepting fallback on get for convenience.
This wraps instead of inheriting due ConfigParse being classobj on python2.
'''
NOT_SET = type('NotSetType', (object,), {})
parser_class = (
configparser.SafeConfigParser
if hasattr(configparser, 'SafeConfigParser') else
configparser.ConfigParser
)
def __init__(self, path):
with warnings.catch_warnings():
# We already know about SafeConfigParser deprecation!
warnings.filterwarnings('ignore', category=DeprecationWarning)
self._parser = self.parser_class()
self._parser.read(path)
def getint(self, section, key, fallback=NOT_SET):
try:
return self._parser.getint(section, key)
except (configparser.NoOptionError, ValueError):
if fallback is self.NOT_SET:
raise
return fallback
def get(self, section, key, fallback=NOT_SET):
try:
return self._parser.get(section, key)
except (configparser.NoOptionError, ValueError):
if fallback is self.NOT_SET:
raise
return fallback
class PlayableBase(File):
extensions = {
'mp3': 'audio/mpeg',
'ogg': 'audio/ogg',
'wav': 'audio/wav',
'm3u': 'audio/x-mpegurl',
'm3u8': 'audio/x-mpegurl',
'pls': 'audio/x-scpls',
}
@classmethod
def extensions_from_mimetypes(cls, mimetypes):
mimetypes = frozenset(mimetypes)
return {
ext: mimetype
for ext, mimetype in cls.extensions.items()
if mimetype in mimetypes
}
@classmethod
def detect(cls, node, os_sep=os.sep):
basename = node.path.rsplit(os_sep)[-1]
if '.' in basename:
ext = basename.rsplit('.')[-1]
return cls.extensions.get(ext, None)
return None
class PlayableFile(PlayableBase):
mimetypes = ['audio/mpeg', 'audio/ogg', 'audio/wav']
extensions = PlayableBase.extensions_from_mimetypes(mimetypes)
media_map = {mime: ext for ext, mime in extensions.items()}
def __init__(self, **kwargs):
self.duration = kwargs.pop('duration', None)
self.title = kwargs.pop('title', None)
super(PlayableFile, self).__init__(**kwargs)
@property
def title(self):
return self._title or self.name
@title.setter
def title(self, title):
self._title = title
@property
def media_format(self):
return self.media_map[self.type]
class PlayListFile(PlayableBase):
playable_class = PlayableFile
mimetypes = ['audio/x-mpegurl', 'audio/x-mpegurl', 'audio/x-scpls']
extensions = PlayableBase.extensions_from_mimetypes(mimetypes)
@classmethod
def from_urlpath(cls, path, app=None):
original = Node.from_urlpath(path, app)
if original.mimetype == PlayableDirectory.mimetype:
return PlayableDirectory(original.path, original.app)
elif original.mimetype == M3UFile.mimetype:
return M3UFile(original.path, original.app)
if original.mimetype == PLSFile.mimetype:
return PLSFile(original.path, original.app)
return original
def normalize_playable_path(self, path):
if '://' in path:
return path
path = os.path.normpath(path)
if not os.path.isabs(path):
return os.path.join(self.parent.path, path)
drive = os.path.splitdrive(self.path)[0]
if drive and not os.path.splitdrive(path)[0]:
path = drive + path
if check_under_base(path, self.app.config['directory_base']):
return path
return None
def _entries(self):
return
yield # noqa
def entries(self, sortkey=None, reverse=None):
for file in self._entries():
if PlayableFile.detect(file):
yield file
class PLSFile(PlayListFile):
ini_parser_class = PLSFileParser
maxsize = getattr(sys, 'maxint', 0) or getattr(sys, 'maxsize', 0) or 2**32
mimetype = 'audio/x-scpls'
extensions = PlayableBase.extensions_from_mimetypes([mimetype])
def _entries(self):
parser = self.ini_parser_class(self.path)
maxsize = parser.getint('playlist', 'NumberOfEntries', None)
for i in range(1, self.maxsize if maxsize is None else maxsize + 1):
path = parser.get('playlist', 'File%d' % i, None)
if not path:
if maxsize:
continue
break
path = self.normalize_playable_path(path)
if not path:
continue
yield self.playable_class(
path=path,
app=self.app,
duration=parser.getint(
'playlist', 'Length%d' % i,
None
),
title=parser.get(
'playlist',
'Title%d' % i,
None
),
)
class M3UFile(PlayListFile):
mimetype = 'audio/x-mpegurl'
extensions = PlayableBase.extensions_from_mimetypes([mimetype])
def _iter_lines(self):
prefix = '#EXTM3U\n'
encoding = 'utf-8' if self.path.endswith('.m3u8') else 'ascii'
with codecs.open(
self.path, 'r',
encoding=encoding,
errors=underscore_replace
) as f:
if f.read(len(prefix)) != prefix:
f.seek(0)
for line in f:
line = line.rstrip()
if line:
yield line
def _entries(self):
data = {}
for line in self._iter_lines():
if line.startswith('#EXTINF:'):
duration, title = line.split(',', 1)
data['duration'] = None if duration == '-1' else int(duration)
data['title'] = title
if not line:
continue
path = self.normalize_playable_path(line)
if path:
yield self.playable_class(path=path, app=self.app, **data)
data.clear()
class PlayableDirectory(Directory):
file_class = PlayableFile
name = ''
@cached_property
def parent(self):
return Directory(self.path)
@classmethod
def detect(cls, node):
if node.is_directory:
for file in node._listdir():
if PlayableFile.detect(file):
return cls.mimetype
return None
def entries(self, sortkey=None, reverse=None):
listdir_fnc = super(PlayableDirectory, self).listdir
for file in listdir_fnc(sortkey=sortkey, reverse=reverse):
if PlayableFile.detect(file):
yield file
def detect_playable_mimetype(path, os_sep=os.sep):
basename = path.rsplit(os_sep)[-1]
if '.' in basename:
ext = basename.rsplit('.')[-1]
return PlayableBase.extensions.get(ext, None)
return None
|
|
###############################################################################
# Universal Analytics for Python
# Copyright (c) 2013, Analytics Pros
#
# This project is free software, distributed under the BSD license.
# Analytics Pros offers consulting and integration services if your firm needs
# assistance in strategy, implementation, or auditing existing work.
###############################################################################
#
# reworked version
# migrated to Python 3
# and async/await
#
import urllib
import aiohttp
import asyncio
from urllib.parse import urlencode
import datetime
import hashlib
import logging
import time
import uuid
logger = logging.getLogger(__name__)
def generate_uuid(basedata=None):
""" Provides a _random_ UUID with no input, or a UUID4-format MD5 checksum of any input data provided """
if basedata is None:
return str(uuid.uuid4())
elif isinstance(basedata, str):
checksum = hashlib.md5(basedata).hexdigest()
return '%8s-%4s-%4s-%4s-%12s' % (
checksum[0:8], checksum[8:12], checksum[12:16], checksum[16:20], checksum[20:32])
class Time(datetime.datetime):
""" Wrappers and convenience methods for processing various time representations """
@classmethod
def from_unix(cls, seconds, milliseconds=0):
""" Produce a full |datetime.datetime| object from a Unix timestamp """
base = list(time.gmtime(seconds))[0:6]
base.append(milliseconds * 1000) # microseconds
return cls(*base)
@classmethod
def to_unix(cls, timestamp):
""" Wrapper over time module to produce Unix epoch time as a float """
if not isinstance(timestamp, datetime.datetime):
raise TypeError('Time.milliseconds expects a datetime object')
base = time.mktime(timestamp.timetuple())
return base
@classmethod
def milliseconds_offset(cls, timestamp, now=None):
""" Offset time (in milliseconds) from a |datetime.datetime| object to now """
if isinstance(timestamp, (int, float)):
base = timestamp
else:
base = cls.to_unix(timestamp)
base += (timestamp.microsecond / 1000000)
if now is None:
now = time.time()
return (now - base) * 1000
class HTTPRequest:
""" URL Construction and request handling abstraction.
This is not intended to be used outside this module.
Automates mapping of persistent state (i.e. query parameters)
onto transcient datasets for each query.
"""
endpoint = 'https://www.google-analytics.com/collect'
# Store properties for all requests
def __init__(self, user_agent=None, *args, **opts):
self.user_agent = user_agent or 'Bot Story'
@classmethod
def fixUTF8(cls, data): # Ensure proper encoding for UA's servers...
""" Convert all strings to UTF-8 """
for key in data:
if isinstance(data[key], str):
data[key] = data[key].encode('utf-8')
return data
# Apply stored properties to the given dataset & POST to the configured endpoint
def send_data(self, session, values):
logger.debug('get')
logger.debug(values)
return session.get(self.endpoint + '?' + urlencode(self.fixUTF8(values)),
headers={
'User-Agent': self.user_agent
})
# Apply stored properties to the given dataset & POST to the configured endpoint
async def send(self, values):
logging.debug('send')
logging.debug(values)
logging.debug('self.user_agent')
logging.debug(self.user_agent)
loop = asyncio.get_event_loop()
async with aiohttp.ClientSession(loop=loop) as session:
async with self.send_data(self._session or session, values) as resp:
logging.debug('status')
logging.debug(resp.status)
logging.debug('resp.text()')
logging.debug(await resp.text())
class HTTPPost(HTTPRequest):
def send_data(self, session, values):
data = urllib.parse.urlencode(values)
binary_data = data.encode('utf-8')
logging.debug('binary_data')
logging.debug(binary_data)
return session.post(self.endpoint,
data=binary_data,
headers={
'User-Agent': self.user_agent
})
class Tracker:
""" Primary tracking interface for Universal Analytics """
params = None
parameter_alias = {}
valid_hittypes = ('pageview', 'event', 'social', 'screenview', 'transaction', 'item', 'exception', 'timing')
@classmethod
def alias(cls, typemap, base, *names):
""" Declare an alternate (humane) name for a measurement protocol parameter """
cls.parameter_alias[base] = (typemap, base)
for i in names:
cls.parameter_alias[i] = (typemap, base)
@classmethod
def coerceParameter(cls, name, value=None):
if isinstance(name, str) and name[0] == '&':
return name[1:], str(value)
elif name in cls.parameter_alias:
typecast, param_name = cls.parameter_alias.get(name)
return param_name, typecast(value)
else:
raise KeyError('Parameter "{0}" is not recognized'.format(name))
def payload(self, data):
for key, value in data.items():
try:
yield self.coerceParameter(key, value)
except KeyError:
continue
option_sequence = {
'pageview': [(str, 'dp')],
'event': [(str, 'ec'), (str, 'ea'), (str, 'el'), (int, 'ev')],
'social': [(str, 'sn'), (str, 'sa'), (str, 'st')],
'timing': [(str, 'utc'), (str, 'utv'), (str, 'utt'), (str, 'utl')]
}
@classmethod
def consume_options(cls, data, hittype, args):
""" Interpret sequential arguments related to known hittypes based on declared structures """
opt_position = 0
data['t'] = hittype # integrate hit type parameter
if hittype in cls.option_sequence:
for expected_type, optname in cls.option_sequence[hittype]:
if opt_position < len(args) and isinstance(args[opt_position], expected_type):
data[optname] = args[opt_position]
opt_position += 1
@classmethod
def hittime(cls, timestamp=None, age=None, milliseconds=None):
""" Returns an integer represeting the milliseconds offset for a given hit (relative to now) """
if isinstance(timestamp, (int, float)):
return int(Time.milliseconds_offset(Time.from_unix(timestamp, milliseconds=milliseconds)))
if isinstance(timestamp, datetime.datetime):
return int(Time.milliseconds_offset(timestamp))
if isinstance(age, (int, float)):
return int(age * 1000) + (milliseconds or 0)
@property
def account(self):
return self.params.get('tid', None)
def __init__(self, account, name=None, client_id=None, hash_client_id=False, user_id=None, user_agent=None,
use_post=True):
# for debug purpose
self._session = None
if use_post is False:
self.http = HTTPRequest(user_agent=user_agent)
else:
self.http = HTTPPost(user_agent=user_agent)
self.params = {'v': 1, 'tid': account}
if client_id is None:
client_id = generate_uuid()
self.params['cid'] = client_id
self.hash_client_id = hash_client_id
if user_id is not None:
self.params['uid'] = user_id
def set_timestamp(self, data):
""" Interpret time-related options, apply queue-time parameter as needed """
if 'hittime' in data: # an absolute timestamp
data['qt'] = self.hittime(timestamp=data.pop('hittime', None))
if 'hitage' in data: # a relative age (in seconds)
data['qt'] = self.hittime(age=data.pop('hitage', None))
async def send(self, hittype, *args, **data):
""" Transmit HTTP requests to Google Analytics using the measurement protocol """
if hittype not in self.valid_hittypes:
raise KeyError('Unsupported Universal Analytics Hit Type: {0}'.format(repr(hittype)))
self.set_timestamp(data)
self.consume_options(data, hittype, args)
for item in args: # process dictionary-object arguments of transcient data
if isinstance(item, dict):
for key, val in self.payload(item):
data[key] = val
for k, v in self.params.items(): # update only absent parameters
if k not in data:
data[k] = v
data = dict(self.payload(data))
if self.hash_client_id:
data['cid'] = generate_uuid(data['cid'])
# Transmit the hit to Google...
await self.http.send(data)
# Setting persistent attibutes of the session/hit/etc (inc. custom dimensions/metrics)
def set(self, name, value=None):
if isinstance(name, dict):
for key, value in name.items():
try:
param, value = self.coerceParameter(key, value)
self.params[param] = value
except KeyError:
pass
elif isinstance(name, str):
try:
param, value = self.coerceParameter(name, value)
self.params[param] = value
except KeyError:
pass
def __getitem__(self, name):
param, value = self.coerceParameter(name, None)
return self.params.get(param, None)
def __setitem__(self, name, value):
param, value = self.coerceParameter(name, value)
self.params[param] = value
def __delitem__(self, name):
param, value = self.coerceParameter(name, None)
if param in self.params:
del self.params[param]
def safe_unicode(obj):
""" Safe convertion to the Unicode string version of the object """
try:
return str(obj)
except UnicodeDecodeError:
return obj.decode('utf-8')
# Declaring name mappings for Measurement Protocol parameters
MAX_CUSTOM_DEFINITIONS = 200
MAX_EC_LISTS = 11 # 1-based index
MAX_EC_PRODUCTS = 11 # 1-based index
MAX_EC_PROMOTIONS = 11 # 1-based index
Tracker.alias(int, 'v', 'protocol-version')
Tracker.alias(safe_unicode, 'cid', 'client-id', 'clientId', 'clientid')
Tracker.alias(safe_unicode, 'tid', 'trackingId', 'account')
Tracker.alias(safe_unicode, 'uid', 'user-id', 'userId', 'userid')
Tracker.alias(safe_unicode, 'uip', 'user-ip', 'userIp', 'ipaddr')
Tracker.alias(safe_unicode, 'ua', 'userAgent', 'userAgentOverride', 'user-agent')
Tracker.alias(safe_unicode, 'dp', 'page', 'path')
Tracker.alias(safe_unicode, 'dt', 'title', 'pagetitle', 'pageTitle' 'page-title')
Tracker.alias(safe_unicode, 'dl', 'location')
Tracker.alias(safe_unicode, 'dh', 'hostname')
Tracker.alias(safe_unicode, 'sc', 'sessioncontrol', 'session-control', 'sessionControl')
Tracker.alias(safe_unicode, 'dr', 'referrer', 'referer')
Tracker.alias(int, 'qt', 'queueTime', 'queue-time')
Tracker.alias(safe_unicode, 't', 'hitType', 'hittype')
Tracker.alias(int, 'aip', 'anonymizeIp', 'anonIp', 'anonymize-ip')
# Campaign attribution
Tracker.alias(safe_unicode, 'cn', 'campaign', 'campaignName', 'campaign-name')
Tracker.alias(safe_unicode, 'cs', 'source', 'campaignSource', 'campaign-source')
Tracker.alias(safe_unicode, 'cm', 'medium', 'campaignMedium', 'campaign-medium')
Tracker.alias(safe_unicode, 'ck', 'keyword', 'campaignKeyword', 'campaign-keyword')
Tracker.alias(safe_unicode, 'cc', 'content', 'campaignContent', 'campaign-content')
Tracker.alias(safe_unicode, 'ci', 'campaignId', 'campaignID', 'campaign-id')
# Technical specs
Tracker.alias(safe_unicode, 'sr', 'screenResolution', 'screen-resolution', 'resolution')
Tracker.alias(safe_unicode, 'vp', 'viewport', 'viewportSize', 'viewport-size')
Tracker.alias(safe_unicode, 'de', 'encoding', 'documentEncoding', 'document-encoding')
Tracker.alias(int, 'sd', 'colors', 'screenColors', 'screen-colors')
Tracker.alias(safe_unicode, 'ul', 'language', 'user-language', 'userLanguage')
# Mobile app
Tracker.alias(safe_unicode, 'an', 'appName', 'app-name', 'app')
Tracker.alias(safe_unicode, 'cd', 'contentDescription', 'screenName', 'screen-name', 'content-description')
Tracker.alias(safe_unicode, 'av', 'appVersion', 'app-version', 'version')
Tracker.alias(safe_unicode, 'aid', 'appID', 'appId', 'application-id', 'app-id', 'applicationId')
Tracker.alias(safe_unicode, 'aiid', 'appInstallerId', 'app-installer-id')
# Ecommerce
Tracker.alias(safe_unicode, 'ta', 'affiliation', 'transactionAffiliation', 'transaction-affiliation')
Tracker.alias(safe_unicode, 'ti', 'transaction', 'transactionId', 'transaction-id')
Tracker.alias(float, 'tr', 'revenue', 'transactionRevenue', 'transaction-revenue')
Tracker.alias(float, 'ts', 'shipping', 'transactionShipping', 'transaction-shipping')
Tracker.alias(float, 'tt', 'tax', 'transactionTax', 'transaction-tax')
Tracker.alias(safe_unicode, 'cu', 'currency', 'transactionCurrency',
'transaction-currency') # Currency code, e.g. USD, EUR
Tracker.alias(safe_unicode, 'in', 'item-name', 'itemName')
Tracker.alias(float, 'ip', 'item-price', 'itemPrice')
Tracker.alias(float, 'iq', 'item-quantity', 'itemQuantity')
Tracker.alias(safe_unicode, 'ic', 'item-code', 'sku', 'itemCode')
Tracker.alias(safe_unicode, 'iv', 'item-variation', 'item-category', 'itemCategory', 'itemVariation')
# Events
Tracker.alias(safe_unicode, 'ec', 'event-category', 'eventCategory', 'category')
Tracker.alias(safe_unicode, 'ea', 'event-action', 'eventAction', 'action')
Tracker.alias(safe_unicode, 'el', 'event-label', 'eventLabel', 'label')
Tracker.alias(int, 'ev', 'event-value', 'eventValue', 'value')
Tracker.alias(int, 'ni', 'noninteractive', 'nonInteractive', 'noninteraction', 'nonInteraction')
# Social
Tracker.alias(safe_unicode, 'sa', 'social-action', 'socialAction')
Tracker.alias(safe_unicode, 'sn', 'social-network', 'socialNetwork')
Tracker.alias(safe_unicode, 'st', 'social-target', 'socialTarget')
# Exceptions
Tracker.alias(safe_unicode, 'exd', 'exception-description', 'exceptionDescription', 'exDescription')
Tracker.alias(int, 'exf', 'exception-fatal', 'exceptionFatal', 'exFatal')
# User Timing
Tracker.alias(safe_unicode, 'utc', 'timingCategory', 'timing-category')
Tracker.alias(safe_unicode, 'utv', 'timingVariable', 'timing-variable')
Tracker.alias(int, 'utt', 'time', 'timingTime', 'timing-time')
Tracker.alias(safe_unicode, 'utl', 'timingLabel', 'timing-label')
Tracker.alias(float, 'dns', 'timingDNS', 'timing-dns')
Tracker.alias(float, 'pdt', 'timingPageLoad', 'timing-page-load')
Tracker.alias(float, 'rrt', 'timingRedirect', 'timing-redirect')
Tracker.alias(safe_unicode, 'tcp', 'timingTCPConnect', 'timing-tcp-connect')
Tracker.alias(safe_unicode, 'srt', 'timingServerResponse', 'timing-server-response')
# Custom dimensions and metrics
for i in range(0, 200):
Tracker.alias(safe_unicode, 'cd{0}'.format(i), 'dimension{0}'.format(i))
Tracker.alias(int, 'cm{0}'.format(i), 'metric{0}'.format(i))
# Enhanced Ecommerce
Tracker.alias(str, 'pa') # Product action
Tracker.alias(str, 'tcc') # Coupon code
Tracker.alias(safe_unicode, 'pal') # Product action list
Tracker.alias(int, 'cos') # Checkout step
Tracker.alias(str, 'col') # Checkout step option
Tracker.alias(str, 'promoa') # Promotion action
for product_index in range(1, MAX_EC_PRODUCTS):
Tracker.alias(str, 'pr{0}id'.format(product_index)) # Product SKU
Tracker.alias(safe_unicode, 'pr{0}nm'.format(product_index)) # Product name
Tracker.alias(safe_unicode, 'pr{0}br'.format(product_index)) # Product brand
Tracker.alias(safe_unicode, 'pr{0}ca'.format(product_index)) # Product category
Tracker.alias(safe_unicode, 'pr{0}va'.format(product_index)) # Product variant
Tracker.alias(str, 'pr{0}pr'.format(product_index)) # Product price
Tracker.alias(int, 'pr{0}qt'.format(product_index)) # Product quantity
Tracker.alias(str, 'pr{0}cc'.format(product_index)) # Product coupon code
Tracker.alias(int, 'pr{0}ps'.format(product_index)) # Product position
for custom_index in range(MAX_CUSTOM_DEFINITIONS):
Tracker.alias(str, 'pr{0}cd{1}'.format(product_index, custom_index)) # Product custom dimension
Tracker.alias(int, 'pr{0}cm{1}'.format(product_index, custom_index)) # Product custom metric
for list_index in range(1, MAX_EC_LISTS):
Tracker.alias(str, 'il{0}pi{1}id'.format(list_index, product_index)) # Product impression SKU
Tracker.alias(safe_unicode, 'il{0}pi{1}nm'.format(list_index, product_index)) # Product impression name
Tracker.alias(safe_unicode, 'il{0}pi{1}br'.format(list_index, product_index)) # Product impression brand
Tracker.alias(safe_unicode, 'il{0}pi{1}ca'.format(list_index, product_index)) # Product impression category
Tracker.alias(safe_unicode, 'il{0}pi{1}va'.format(list_index, product_index)) # Product impression variant
Tracker.alias(int, 'il{0}pi{1}ps'.format(list_index, product_index)) # Product impression position
Tracker.alias(int, 'il{0}pi{1}pr'.format(list_index, product_index)) # Product impression price
for custom_index in range(MAX_CUSTOM_DEFINITIONS):
Tracker.alias(str, 'il{0}pi{1}cd{2}'.format(list_index, product_index,
custom_index)) # Product impression custom dimension
Tracker.alias(int, 'il{0}pi{1}cm{2}'.format(list_index, product_index,
custom_index)) # Product impression custom metric
for list_index in range(1, MAX_EC_LISTS):
Tracker.alias(safe_unicode, 'il{0}nm'.format(list_index)) # Product impression list name
for promotion_index in range(1, MAX_EC_PROMOTIONS):
Tracker.alias(str, 'promo{0}id'.format(promotion_index)) # Promotion ID
Tracker.alias(safe_unicode, 'promo{0}nm'.format(promotion_index)) # Promotion name
Tracker.alias(str, 'promo{0}cr'.format(promotion_index)) # Promotion creative
Tracker.alias(str, 'promo{0}ps'.format(promotion_index)) # Promotion position
|
|
# Copyright 2011 OpenStack Foundation
# Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from lxml import etree
import webob
from nova.api.openstack.compute.contrib import quotas
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova import context as context_maker
from nova import quota
from nova import test
from nova.tests.api.openstack import fakes
def quota_set(id):
return {'quota_set': {'id': id, 'metadata_items': 128,
'ram': 51200, 'floating_ips': 10, 'fixed_ips': -1,
'instances': 10, 'injected_files': 5, 'cores': 20,
'injected_file_content_bytes': 10240,
'security_groups': 10, 'security_group_rules': 20,
'key_pairs': 100, 'injected_file_path_bytes': 255}}
class QuotaSetsTest(test.TestCase):
def setUp(self):
super(QuotaSetsTest, self).setUp()
self.ext_mgr = self.mox.CreateMock(extensions.ExtensionManager)
self.controller = quotas.QuotaSetsController(self.ext_mgr)
def test_format_quota_set(self):
raw_quota_set = {
'instances': 10,
'cores': 20,
'ram': 51200,
'floating_ips': 10,
'fixed_ips': -1,
'metadata_items': 128,
'injected_files': 5,
'injected_file_path_bytes': 255,
'injected_file_content_bytes': 10240,
'security_groups': 10,
'security_group_rules': 20,
'key_pairs': 100}
quota_set = self.controller._format_quota_set('1234', raw_quota_set)
qs = quota_set['quota_set']
self.assertEqual(qs['id'], '1234')
self.assertEqual(qs['instances'], 10)
self.assertEqual(qs['cores'], 20)
self.assertEqual(qs['ram'], 51200)
self.assertEqual(qs['floating_ips'], 10)
self.assertEqual(qs['fixed_ips'], -1)
self.assertEqual(qs['metadata_items'], 128)
self.assertEqual(qs['injected_files'], 5)
self.assertEqual(qs['injected_file_path_bytes'], 255)
self.assertEqual(qs['injected_file_content_bytes'], 10240)
self.assertEqual(qs['security_groups'], 10)
self.assertEqual(qs['security_group_rules'], 20)
self.assertEqual(qs['key_pairs'], 100)
def test_quotas_defaults(self):
uri = '/v2/fake_tenant/os-quota-sets/fake_tenant/defaults'
req = fakes.HTTPRequest.blank(uri)
res_dict = self.controller.defaults(req, 'fake_tenant')
expected = {'quota_set': {
'id': 'fake_tenant',
'instances': 10,
'cores': 20,
'ram': 51200,
'floating_ips': 10,
'fixed_ips': -1,
'metadata_items': 128,
'injected_files': 5,
'injected_file_path_bytes': 255,
'injected_file_content_bytes': 10240,
'security_groups': 10,
'security_group_rules': 20,
'key_pairs': 100}}
self.assertEqual(res_dict, expected)
def test_quotas_show_as_admin(self):
self.ext_mgr.is_loaded('os-user-quotas').AndReturn(True)
self.mox.ReplayAll()
req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/1234',
use_admin_context=True)
res_dict = self.controller.show(req, 1234)
self.assertEqual(res_dict, quota_set('1234'))
def test_quotas_show_as_unauthorized_user(self):
self.ext_mgr.is_loaded('os-user-quotas').AndReturn(True)
self.mox.ReplayAll()
req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/1234')
self.assertRaises(webob.exc.HTTPForbidden, self.controller.show,
req, 1234)
def test_quotas_update_as_admin(self):
self.ext_mgr.is_loaded('os-extended-quotas').AndReturn(True)
self.ext_mgr.is_loaded('os-user-quotas').AndReturn(True)
self.mox.ReplayAll()
body = {'quota_set': {'instances': 50, 'cores': 50,
'ram': 51200, 'floating_ips': 10,
'fixed_ips': -1, 'metadata_items': 128,
'injected_files': 5,
'injected_file_content_bytes': 10240,
'injected_file_path_bytes': 255,
'security_groups': 10,
'security_group_rules': 20,
'key_pairs': 100, 'fixed_ips': -1}}
req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/update_me',
use_admin_context=True)
res_dict = self.controller.update(req, 'update_me', body)
self.assertEqual(res_dict, body)
def test_quotas_update_zero_value_as_admin(self):
self.ext_mgr.is_loaded('os-extended-quotas').AndReturn(True)
self.ext_mgr.is_loaded('os-user-quotas').AndReturn(True)
self.mox.ReplayAll()
body = {'quota_set': {'instances': 0, 'cores': 0,
'ram': 0, 'floating_ips': 0,
'fixed_ips': 0, 'metadata_items': 0,
'injected_files': 0,
'injected_file_content_bytes': 0,
'injected_file_path_bytes': 0,
'security_groups': 0,
'security_group_rules': 0,
'key_pairs': 100, 'fixed_ips': -1}}
req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/update_me',
use_admin_context=True)
res_dict = self.controller.update(req, 'update_me', body)
self.assertEqual(res_dict, body)
def test_quotas_update_as_user(self):
self.ext_mgr.is_loaded('os-extended-quotas').AndReturn(True)
self.ext_mgr.is_loaded('os-user-quotas').AndReturn(True)
self.mox.ReplayAll()
body = {'quota_set': {'instances': 50, 'cores': 50,
'ram': 51200, 'floating_ips': 10,
'fixed_ips': -1, 'metadata_items': 128,
'injected_files': 5,
'injected_file_content_bytes': 10240,
'security_groups': 10,
'security_group_rules': 20,
'key_pairs': 100}}
req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/update_me')
self.assertRaises(webob.exc.HTTPForbidden, self.controller.update,
req, 'update_me', body)
def test_quotas_update_invalid_key(self):
self.ext_mgr.is_loaded('os-extended-quotas').AndReturn(True)
self.ext_mgr.is_loaded('os-user-quotas').AndReturn(True)
self.mox.ReplayAll()
body = {'quota_set': {'instances2': -2, 'cores': -2,
'ram': -2, 'floating_ips': -2,
'metadata_items': -2, 'injected_files': -2,
'injected_file_content_bytes': -2}}
req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/update_me',
use_admin_context=True)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
req, 'update_me', body)
def test_quotas_update_invalid_limit(self):
self.ext_mgr.is_loaded('os-extended-quotas').AndReturn(True)
self.ext_mgr.is_loaded('os-user-quotas').AndReturn(True)
self.mox.ReplayAll()
body = {'quota_set': {'instances': -2, 'cores': -2,
'ram': -2, 'floating_ips': -2, 'fixed_ips': -2,
'metadata_items': -2, 'injected_files': -2,
'injected_file_content_bytes': -2}}
req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/update_me',
use_admin_context=True)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
req, 'update_me', body)
def test_quotas_update_empty_body(self):
self.ext_mgr.is_loaded('os-extended-quotas').AndReturn(True)
self.ext_mgr.is_loaded('os-user-quotas').AndReturn(True)
self.mox.ReplayAll()
body = {}
req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/update_me',
use_admin_context=True)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
req, 'update_me', body)
def test_quotas_update_invalid_value_json_fromat_empty_string(self):
expected_resp = {'quota_set': {
'instances': 50, 'cores': 50,
'ram': 51200, 'floating_ips': 10,
'fixed_ips': -1, 'metadata_items': 128,
'injected_files': 5,
'injected_file_content_bytes': 10240,
'injected_file_path_bytes': 255,
'security_groups': 10,
'security_group_rules': 20,
'key_pairs': 100}}
# when PUT JSON format with empty string for quota
body = {'quota_set': {'instances': 50, 'cores': 50,
'ram': '', 'floating_ips': 10,
'fixed_ips': -1, 'metadata_items': 128,
'injected_files': 5,
'injected_file_content_bytes': 10240,
'injected_file_path_bytes': 255,
'security_groups': 10,
'security_group_rules': 20,
'key_pairs': 100}}
req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/update_me',
use_admin_context=True)
self.ext_mgr.is_loaded('os-extended-quotas').AndReturn(True)
self.ext_mgr.is_loaded('os-user-quotas').AndReturn(True)
self.mox.ReplayAll()
res_dict = self.controller.update(req, 'update_me', body)
self.assertEqual(res_dict, expected_resp)
def test_quotas_update_invalid_value_xml_fromat_empty_string(self):
expected_resp = {'quota_set': {
'instances': 50, 'cores': 50,
'ram': 51200, 'floating_ips': 10,
'fixed_ips': -1, 'metadata_items': 128,
'injected_files': 5,
'injected_file_content_bytes': 10240,
'injected_file_path_bytes': 255,
'security_groups': 10,
'security_group_rules': 20,
'key_pairs': 100}}
# when PUT XML format with empty string for quota
body = {'quota_set': {'instances': 50, 'cores': 50,
'ram': {}, 'floating_ips': 10,
'fixed_ips': -1, 'metadata_items': 128,
'injected_files': 5,
'injected_file_content_bytes': 10240,
'injected_file_path_bytes': 255,
'security_groups': 10,
'security_group_rules': 20,
'key_pairs': 100}}
req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/update_me',
use_admin_context=True)
self.ext_mgr.is_loaded('os-extended-quotas').AndReturn(True)
self.ext_mgr.is_loaded('os-user-quotas').AndReturn(True)
self.mox.ReplayAll()
res_dict = self.controller.update(req, 'update_me', body)
self.assertEqual(res_dict, expected_resp)
def test_quotas_update_invalid_value_non_int(self):
# when PUT non integer value
body = {'quota_set': {'instances': test, 'cores': 50,
'ram': {}, 'floating_ips': 10,
'fixed_ips': -1, 'metadata_items': 128,
'injected_files': 5,
'injected_file_content_bytes': 10240,
'injected_file_path_bytes': 255,
'security_groups': 10,
'security_group_rules': 20,
'key_pairs': 100}}
req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/update_me',
use_admin_context=True)
self.ext_mgr.is_loaded('os-extended-quotas').AndReturn(True)
self.ext_mgr.is_loaded('os-user-quotas').AndReturn(True)
self.mox.ReplayAll()
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
req, 'update_me', body)
def test_quotas_update_invalid_value_with_float(self):
# when PUT non integer value
body = {'quota_set': {'instances': 50.5, 'cores': 50,
'ram': {}, 'floating_ips': 10,
'fixed_ips': -1, 'metadata_items': 128,
'injected_files': 5,
'injected_file_content_bytes': 10240,
'injected_file_path_bytes': 255,
'security_groups': 10,
'security_group_rules': 20,
'key_pairs': 100}}
req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/update_me',
use_admin_context=True)
self.ext_mgr.is_loaded('os-extended-quotas').AndReturn(True)
self.ext_mgr.is_loaded('os-user-quotas').AndReturn(True)
self.mox.ReplayAll()
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
req, 'update_me', body)
def test_quotas_update_invalid_value_with_unicode(self):
# when PUT non integer value
body = {'quota_set': {'instances': u'\u30aa\u30fc\u30d7\u30f3',
'cores': 50,
'ram': {}, 'floating_ips': 10,
'fixed_ips': -1, 'metadata_items': 128,
'injected_files': 5,
'injected_file_content_bytes': 10240,
'injected_file_path_bytes': 255,
'security_groups': 10,
'security_group_rules': 20,
'key_pairs': 100}}
req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/update_me',
use_admin_context=True)
self.ext_mgr.is_loaded('os-extended-quotas').AndReturn(True)
self.ext_mgr.is_loaded('os-user-quotas').AndReturn(True)
self.mox.ReplayAll()
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
req, 'update_me', body)
def test_delete_quotas_when_extension_not_loaded(self):
self.ext_mgr.is_loaded('os-extended-quotas').AndReturn(False)
self.mox.ReplayAll()
req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/1234')
self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete,
req, 1234)
def test_quotas_delete_as_unauthorized_user(self):
self.ext_mgr.is_loaded('os-extended-quotas').AndReturn(True)
self.mox.ReplayAll()
req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/1234')
self.assertRaises(webob.exc.HTTPForbidden, self.controller.delete,
req, 1234)
def test_quotas_delete_as_admin(self):
context = context_maker.get_admin_context()
self.req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/1234')
self.req.environ['nova.context'] = context
self.ext_mgr.is_loaded('os-extended-quotas').AndReturn(True)
self.mox.StubOutWithMock(quota.QUOTAS,
"destroy_all_by_project")
quota.QUOTAS.destroy_all_by_project(context, 1234)
self.mox.ReplayAll()
res = self.controller.delete(self.req, 1234)
self.mox.VerifyAll()
self.assertEqual(res.status_int, 202)
class QuotaXMLSerializerTest(test.TestCase):
def setUp(self):
super(QuotaXMLSerializerTest, self).setUp()
self.serializer = quotas.QuotaTemplate()
self.deserializer = wsgi.XMLDeserializer()
def test_serializer(self):
exemplar = dict(quota_set=dict(
id='project_id',
metadata_items=10,
injected_file_path_bytes=255,
injected_file_content_bytes=20,
ram=50,
floating_ips=60,
fixed_ips=-1,
instances=70,
injected_files=80,
security_groups=10,
security_group_rules=20,
key_pairs=100,
cores=90))
text = self.serializer.serialize(exemplar)
tree = etree.fromstring(text)
self.assertEqual('quota_set', tree.tag)
self.assertEqual('project_id', tree.get('id'))
self.assertEqual(len(exemplar['quota_set']) - 1, len(tree))
for child in tree:
self.assertIn(child.tag, exemplar['quota_set'])
self.assertEqual(int(child.text), exemplar['quota_set'][child.tag])
def test_deserializer(self):
exemplar = dict(quota_set=dict(
metadata_items='10',
injected_file_content_bytes='20',
ram='50',
floating_ips='60',
fixed_ips='-1',
instances='70',
injected_files='80',
security_groups='10',
security_group_rules='20',
key_pairs='100',
cores='90'))
intext = ("<?xml version='1.0' encoding='UTF-8'?>\n"
'<quota_set>'
'<metadata_items>10</metadata_items>'
'<injected_file_content_bytes>20'
'</injected_file_content_bytes>'
'<ram>50</ram>'
'<floating_ips>60</floating_ips>'
'<fixed_ips>-1</fixed_ips>'
'<instances>70</instances>'
'<injected_files>80</injected_files>'
'<security_groups>10</security_groups>'
'<security_group_rules>20</security_group_rules>'
'<key_pairs>100</key_pairs>'
'<cores>90</cores>'
'</quota_set>')
result = self.deserializer.deserialize(intext)['body']
self.assertEqual(result, exemplar)
fake_quotas = {'ram': {'limit': 51200,
'in_use': 12800,
'reserved': 12800},
'cores': {'limit': 20,
'in_use': 10,
'reserved': 5},
'instances': {'limit': 100,
'in_use': 0,
'reserved': 0}}
def fake_get_quotas(self, context, id, user_id=None, usages=False):
if usages:
return fake_quotas
else:
return dict((k, v['limit']) for k, v in fake_quotas.items())
class ExtendedQuotasTest(test.TestCase):
def setUp(self):
super(ExtendedQuotasTest, self).setUp()
self.ext_mgr = self.mox.CreateMock(extensions.ExtensionManager)
self.controller = quotas.QuotaSetsController(self.ext_mgr)
def test_quotas_update_exceed_in_used(self):
body = {'quota_set': {'cores': 10}}
self.stubs.Set(quotas.QuotaSetsController, '_get_quotas',
fake_get_quotas)
req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/update_me',
use_admin_context=True)
self.ext_mgr.is_loaded('os-extended-quotas').AndReturn(True)
self.ext_mgr.is_loaded('os-user-quotas').AndReturn(True)
self.mox.ReplayAll()
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
req, 'update_me', body)
def test_quotas_force_update_exceed_in_used(self):
self.stubs.Set(quotas.QuotaSetsController, '_get_quotas',
fake_get_quotas)
req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/update_me',
use_admin_context=True)
expected = {'quota_set': {'ram': 25600, 'instances': 200, 'cores': 10}}
body = {'quota_set': {'ram': 25600,
'instances': 200,
'cores': 10,
'force': 'True'}}
fake_quotas.get('ram')['limit'] = 25600
fake_quotas.get('cores')['limit'] = 10
fake_quotas.get('instances')['limit'] = 200
self.ext_mgr.is_loaded('os-extended-quotas').AndReturn(True)
self.ext_mgr.is_loaded('os-user-quotas').AndReturn(True)
self.mox.ReplayAll()
res_dict = self.controller.update(req, 'update_me', body)
self.assertEqual(res_dict, expected)
class UserQuotasTest(test.TestCase):
def setUp(self):
super(UserQuotasTest, self).setUp()
self.ext_mgr = self.mox.CreateMock(extensions.ExtensionManager)
self.controller = quotas.QuotaSetsController(self.ext_mgr)
def test_user_quotas_show_as_admin(self):
self.ext_mgr.is_loaded('os-user-quotas').AndReturn(True)
self.mox.ReplayAll()
req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/1234?user_id=1',
use_admin_context=True)
res_dict = self.controller.show(req, 1234)
self.assertEqual(res_dict, quota_set('1234'))
def test_user_quotas_show_as_unauthorized_user(self):
self.ext_mgr.is_loaded('os-user-quotas').AndReturn(True)
self.mox.ReplayAll()
req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/1234?user_id=1')
self.assertRaises(webob.exc.HTTPForbidden, self.controller.show,
req, 1234)
def test_user_quotas_update_as_admin(self):
self.ext_mgr.is_loaded('os-extended-quotas').AndReturn(True)
self.ext_mgr.is_loaded('os-user-quotas').AndReturn(True)
self.mox.ReplayAll()
body = {'quota_set': {'instances': 10, 'cores': 20,
'ram': 51200, 'floating_ips': 10,
'fixed_ips': -1, 'metadata_items': 128,
'injected_files': 5,
'injected_file_content_bytes': 10240,
'injected_file_path_bytes': 255,
'security_groups': 10,
'security_group_rules': 20,
'key_pairs': 100}}
url = '/v2/fake4/os-quota-sets/update_me?user_id=1'
req = fakes.HTTPRequest.blank(url, use_admin_context=True)
res_dict = self.controller.update(req, 'update_me', body)
self.assertEqual(res_dict, body)
def test_user_quotas_update_as_user(self):
self.ext_mgr.is_loaded('os-extended-quotas').AndReturn(True)
self.ext_mgr.is_loaded('os-user-quotas').AndReturn(True)
self.mox.ReplayAll()
body = {'quota_set': {'instances': 10, 'cores': 20,
'ram': 51200, 'floating_ips': 10,
'fixed_ips': -1, 'metadata_items': 128,
'injected_files': 5,
'injected_file_content_bytes': 10240,
'security_groups': 10,
'security_group_rules': 20,
'key_pairs': 100}}
url = '/v2/fake4/os-quota-sets/update_me?user_id=1'
req = fakes.HTTPRequest.blank(url)
self.assertRaises(webob.exc.HTTPForbidden, self.controller.update,
req, 'update_me', body)
def test_user_quotas_update_exceed_project(self):
self.ext_mgr.is_loaded('os-extended-quotas').AndReturn(True)
self.ext_mgr.is_loaded('os-user-quotas').AndReturn(True)
self.mox.ReplayAll()
body = {'quota_set': {'instances': 20}}
url = '/v2/fake4/os-quota-sets/update_me?user_id=1'
req = fakes.HTTPRequest.blank(url, use_admin_context=True)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
req, 'update_me', body)
def test_delete_user_quotas_when_extension_not_loaded(self):
self.ext_mgr.is_loaded('os-extended-quotas').AndReturn(True)
self.ext_mgr.is_loaded('os-user-quotas').AndReturn(False)
self.mox.ReplayAll()
req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/1234?user_id=1')
self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete,
req, 1234)
def test_user_quotas_delete_as_unauthorized_user(self):
self.ext_mgr.is_loaded('os-extended-quotas').AndReturn(True)
self.ext_mgr.is_loaded('os-user-quotas').AndReturn(True)
self.mox.ReplayAll()
req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/1234?user_id=1')
self.assertRaises(webob.exc.HTTPForbidden, self.controller.delete,
req, 1234)
def test_user_quotas_delete_as_admin(self):
context = context_maker.get_admin_context()
url = '/v2/fake4/os-quota-sets/1234?user_id=1'
self.req = fakes.HTTPRequest.blank(url)
self.req.environ['nova.context'] = context
self.ext_mgr.is_loaded('os-extended-quotas').AndReturn(True)
self.ext_mgr.is_loaded('os-user-quotas').AndReturn(True)
self.mox.StubOutWithMock(quota.QUOTAS,
"destroy_all_by_project_and_user")
quota.QUOTAS.destroy_all_by_project_and_user(context, 1234, '1')
self.mox.ReplayAll()
res = self.controller.delete(self.req, 1234)
self.mox.VerifyAll()
self.assertEqual(res.status_int, 202)
|
|
#!/usr/bin/env python
#
# Copyright (c) 2015 Intel Corporation.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of works must retain the original copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the original copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Intel Corporation nor the names of its contributors
# may be used to endorse or promote products derived from this work without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors:
# Yun, Liu<yunx.liu@intel.com>
import unittest
import os
import comm
import zipfile
import shutil
from xml.etree import ElementTree
import json
class TestCrosswalkApptoolsFunctions(unittest.TestCase):
def test_create_package_relative_path(self):
comm.setUp()
os.chdir(comm.XwalkPath)
comm.clear("org.xwalk.test")
os.mkdir("org.xwalk.test")
os.chdir('org.xwalk.test')
cmd = comm.HOST_PREFIX + comm.PackTools + \
"crosswalk-pkg --platforms=android --android=" + comm.ANDROID_MODE + " --crosswalk=" + comm.crosswalkzip + " ../../testapp/create_package_basic/"
return_code = os.system(cmd)
apks = os.listdir(os.getcwd())
apkLength = 0
if comm.MODE != " --android-shared":
for i in range(len(apks)):
if apks[i].endswith(".apk") and "x86" in apks[i]:
if comm.BIT == "64":
self.assertIn("64", apks[i])
apkLength = apkLength + 1
if apks[i].endswith(".apk") and "arm" in apks[i]:
if comm.BIT == "64":
self.assertIn("64", apks[i])
apkLength = apkLength + 1
self.assertEquals(apkLength, 2)
else:
for i in range(len(apks)):
if apks[i].endswith(".apk") and "shared" in apks[i]:
apkLength = apkLength + 1
appVersion = apks[i].split('-')[1]
self.assertEquals(apkLength, 1)
comm.run(self)
comm.clear("org.xwalk.test")
self.assertEquals(return_code, 0)
def test_create_package_non_exist_relative_path(self):
comm.setUp()
os.chdir(comm.XwalkPath)
comm.clear("org.xwalk.test")
os.mkdir("org.xwalk.test")
os.chdir('org.xwalk.test')
cmd = comm.HOST_PREFIX + comm.PackTools + \
"crosswalk-pkg --platforms=android --android=" + comm.ANDROID_MODE + " --crosswalk=" + comm.crosswalkzip + " ../../testapp/non_exist_path/"
return_code = os.system(cmd)
comm.clear("org.xwalk.test")
self.assertNotEquals(return_code, 0)
def test_create_package_non_exist_path(self):
comm.setUp()
os.chdir(comm.XwalkPath)
comm.clear("org.xwalk.test")
os.mkdir("org.xwalk.test")
os.chdir('org.xwalk.test')
cmd = comm.HOST_PREFIX + comm.PackTools + \
"crosswalk-pkg --platforms=android --android=" + comm.ANDROID_MODE + " --crosswalk=" + comm.crosswalkzip + " " + comm.ConstPath + "/../testapp/non_exist_path/"
return_code = os.system(cmd)
comm.clear("org.xwalk.test")
self.assertNotEquals(return_code, 0)
def test_create_package_missing_path(self):
comm.setUp()
os.chdir(comm.XwalkPath)
comm.clear("org.xwalk.test")
os.mkdir("org.xwalk.test")
os.chdir('org.xwalk.test')
cmd = comm.HOST_PREFIX + comm.PackTools + \
"crosswalk-pkg --platforms=android --android=" + comm.ANDROID_MODE + " --crosswalk=" + comm.crosswalkzip
return_code = os.system(cmd)
comm.clear("org.xwalk.test")
self.assertNotEquals(return_code, 0)
def test_create_package_missing_manifest(self):
comm.setUp()
os.chdir(comm.XwalkPath)
comm.clear("org.xwalk.test")
os.mkdir("org.xwalk.test")
os.chdir('org.xwalk.test')
cmd = comm.HOST_PREFIX + comm.PackTools + \
"crosswalk-pkg --platforms=android --android=" + comm.ANDROID_MODE + " --crosswalk=" + comm.crosswalkzip + " " + comm.ConstPath + "/../testapp/start_url/"
return_code = os.system(cmd)
comm.clear("org.xwalk.test")
self.assertNotEquals(return_code, 0)
def test_create_package_missing_manifest_relative_path(self):
comm.setUp()
os.chdir(comm.XwalkPath)
comm.clear("org.xwalk.test")
os.mkdir("org.xwalk.test")
os.chdir('org.xwalk.test')
cmd = comm.HOST_PREFIX + comm.PackTools + \
"crosswalk-pkg --platforms=android --android=" + comm.ANDROID_MODE + " --crosswalk=" + comm.crosswalkzip + " ../../testapp/start_url/"
return_code = os.system(cmd)
comm.clear("org.xwalk.test")
self.assertNotEquals(return_code, 0)
def test_create_package_filepath(self):
comm.setUp()
os.chdir(comm.XwalkPath)
comm.clear("org.xwalk.test")
os.mkdir("org.xwalk.test")
os.chdir('org.xwalk.test')
cmd = comm.HOST_PREFIX + comm.PackTools + \
"crosswalk-pkg --platforms=android --android=" + comm.ANDROID_MODE + " --crosswalk=" + comm.crosswalkzip + " " + comm.ConstPath + "/../testapp/create_package_basic/manifest.json"
return_code = os.system(cmd)
comm.clear("org.xwalk.test")
self.assertNotEquals(return_code, 0)
def test_create_package_relative_filepath(self):
comm.setUp()
os.chdir(comm.XwalkPath)
comm.clear("org.xwalk.test")
os.mkdir("org.xwalk.test")
os.chdir('org.xwalk.test')
cmd = comm.HOST_PREFIX + comm.PackTools + \
"crosswalk-pkg --platforms=android --android=" + comm.ANDROID_MODE + " --crosswalk=" + comm.crosswalkzip + " ../../testapp/create_package_basic/manifest.json"
return_code = os.system(cmd)
comm.clear("org.xwalk.test")
self.assertNotEquals(return_code, 0)
def test_create_package_missing_icon_startUrl(self):
comm.setUp()
os.chdir(comm.XwalkPath)
comm.clear("org.xwalk.test")
os.mkdir("org.xwalk.test")
os.chdir('org.xwalk.test')
cmd = comm.HOST_PREFIX + comm.PackTools + \
"crosswalk-pkg --platforms=android --android=" + comm.ANDROID_MODE + " --crosswalk=" + comm.crosswalkzip + " " + comm.ConstPath + "/../testapp/create_package_missing_icon_startUrl/"
return_code = os.system(cmd)
apks = os.listdir(os.getcwd())
apkLength = 0
if comm.MODE != " --android-shared":
for i in range(len(apks)):
if apks[i].endswith(".apk") and "x86" in apks[i]:
if comm.BIT == "64":
self.assertIn("64", apks[i])
apkLength = apkLength + 1
if apks[i].endswith(".apk") and "arm" in apks[i]:
if comm.BIT == "64":
self.assertIn("64", apks[i])
apkLength = apkLength + 1
self.assertEquals(apkLength, 2)
else:
for i in range(len(apks)):
if apks[i].endswith(".apk") and "shared" in apks[i]:
apkLength = apkLength + 1
appVersion = apks[i].split('-')[1]
self.assertEquals(apkLength, 1)
comm.run(self)
comm.clear("org.xwalk.test")
self.assertEquals(return_code, 0)
def test_create_package_stable(self):
comm.setUp()
os.chdir(comm.XwalkPath)
comm.clear("org.xwalk.test")
os.mkdir("org.xwalk.test")
os.chdir('org.xwalk.test')
cmd = comm.HOST_PREFIX + comm.PackTools + \
"crosswalk-pkg --platforms=android --android=" + comm.ANDROID_MODE + ' --crosswalk=stable --targets="' + comm.BIT + '" ' + comm.ConstPath + "/../testapp/create_package_basic/"
(return_code, output) = comm.getstatusoutput(cmd)
version = comm.check_crosswalk_version(self, "stable")
apks = os.listdir(os.getcwd())
apkLength = 0
if comm.MODE != " --android-shared":
for i in range(len(apks)):
if apks[i].endswith(".apk") and "x86" in apks[i]:
if comm.BIT == "64":
self.assertIn("64", apks[i])
apkLength = apkLength + 1
if apks[i].endswith(".apk") and "arm" in apks[i]:
if comm.BIT == "64":
self.assertIn("64", apks[i])
apkLength = apkLength + 1
self.assertEquals(apkLength, 2)
else:
for i in range(len(apks)):
if apks[i].endswith(".apk") and "shared" in apks[i]:
apkLength = apkLength + 1
appVersion = apks[i].split('-')[1]
self.assertEquals(apkLength, 1)
comm.run(self)
comm.clear("org.xwalk.test")
self.assertEquals(return_code, 0)
self.assertIn(version, output[0])
def test_create_package_beta(self):
comm.setUp()
os.chdir(comm.XwalkPath)
comm.clear("org.xwalk.test")
os.mkdir("org.xwalk.test")
os.chdir('org.xwalk.test')
cmd = comm.HOST_PREFIX + comm.PackTools + \
"crosswalk-pkg --platforms=android --android=" + comm.ANDROID_MODE + ' --crosswalk=beta --targets="' + comm.BIT + '" ' + comm.ConstPath + "/../testapp/create_package_basic/"
(return_code, output) = comm.getstatusoutput(cmd)
version = comm.check_crosswalk_version(self, "beta")
apks = os.listdir(os.getcwd())
apkLength = 0
if comm.MODE != " --android-shared":
for i in range(len(apks)):
if apks[i].endswith(".apk") and "x86" in apks[i]:
if comm.BIT == "64":
self.assertIn("64", apks[i])
apkLength = apkLength + 1
if apks[i].endswith(".apk") and "arm" in apks[i]:
if comm.BIT == "64":
self.assertIn("64", apks[i])
apkLength = apkLength + 1
self.assertEquals(apkLength, 2)
else:
for i in range(len(apks)):
if apks[i].endswith(".apk") and "shared" in apks[i]:
apkLength = apkLength + 1
appVersion = apks[i].split('-')[1]
self.assertEquals(apkLength, 1)
comm.run(self)
comm.clear("org.xwalk.test")
self.assertEquals(return_code, 0)
self.assertIn(version, output[0])
def test_create_package_canary(self):
comm.setUp()
os.chdir(comm.XwalkPath)
comm.clear("org.xwalk.test")
os.mkdir("org.xwalk.test")
os.chdir('org.xwalk.test')
cmd = comm.HOST_PREFIX + comm.PackTools + \
"crosswalk-pkg --platforms=android --android=" + comm.ANDROID_MODE + ' --crosswalk=canary --targets="' + comm.BIT + '" ' + comm.ConstPath + "/../testapp/create_package_basic/"
(return_code, output) = comm.getstatusoutput(cmd)
version = comm.check_crosswalk_version(self, "canary")
apks = os.listdir(os.getcwd())
apkLength = 0
if comm.MODE != " --android-shared":
for i in range(len(apks)):
if apks[i].endswith(".apk") and "x86" in apks[i]:
if comm.BIT == "64":
self.assertIn("64", apks[i])
apkLength = apkLength + 1
if apks[i].endswith(".apk") and "arm" in apks[i]:
if comm.BIT == "64":
self.assertIn("64", apks[i])
apkLength = apkLength + 1
self.assertEquals(apkLength, 2)
else:
for i in range(len(apks)):
if apks[i].endswith(".apk") and "shared" in apks[i]:
apkLength = apkLength + 1
appVersion = apks[i].split('-')[1]
self.assertEquals(apkLength, 1)
comm.run(self)
comm.clear("org.xwalk.test")
self.assertEquals(return_code, 0)
self.assertIn(version, output[0])
def test_build_package_release(self):
comm.setUp()
os.chdir(comm.XwalkPath)
comm.clear("org.xwalk.test")
os.mkdir("org.xwalk.test")
os.chdir('org.xwalk.test')
cmd = comm.HOST_PREFIX + comm.PackTools + \
"crosswalk-pkg --platforms=android --android=" + comm.ANDROID_MODE + " --release=true --crosswalk=" + comm.crosswalkzip + " " + comm.ConstPath + "/../testapp/create_package_basic/"
return_code = os.system(cmd)
apks = os.listdir(os.getcwd())
apkLength = 0
if comm.MODE != " --android-shared":
for i in range(len(apks)):
if apks[i].endswith(".apk") and "x86" in apks[i]:
if comm.BIT == "64":
self.assertIn("64", apks[i])
apkLength = apkLength + 1
if apks[i].endswith(".apk") and "arm" in apks[i]:
if comm.BIT == "64":
self.assertIn("64", apks[i])
apkLength = apkLength + 1
self.assertEquals(apkLength, 2)
else:
for i in range(len(apks)):
if apks[i].endswith(".apk") and "shared" in apks[i]:
apkLength = apkLength + 1
appVersion = apks[i].split('-')[1]
self.assertEquals(apkLength, 1)
comm.run(self)
comm.clear("org.xwalk.test")
self.assertEquals(return_code, 0)
def test_create_package_reading_manifest(self):
comm.setUp()
os.chdir(comm.XwalkPath)
comm.clear("org.xwalk.test")
os.mkdir("org.xwalk.test")
if os.path.exists(comm.ConstPath + "/../testapp/start_url/manifest.json"):
os.remove(comm.ConstPath + "/../testapp/start_url/manifest.json")
os.chdir('org.xwalk.test')
if comm.SHELL_FLAG == "False":
cmd = comm.HOST_PREFIX + comm.PackTools + \
"crosswalk-pkg --platforms=android --android=" + comm.ANDROID_MODE + " --crosswalk=" + comm.crosswalkzip + ' --manifest "{ """xwalk_package_id""": """org.xwalk.test""", """start_url""": """start.html""" }" ' + comm.ConstPath + "/../testapp/start_url/"
else:
cmd = comm.HOST_PREFIX + comm.PackTools + \
"crosswalk-pkg --platforms=android --android=" + comm.ANDROID_MODE + " --crosswalk=" + comm.crosswalkzip + " --manifest '{ \"xwalk_package_id\": \"org.xwalk.test\", \"start_url\": \"start.html\" }' " + comm.ConstPath + "/../testapp/start_url/"
return_code = os.system(cmd)
with open(comm.ConstPath + "/../testapp/start_url/manifest.json") as json_file:
data = json.load(json_file)
apks = os.listdir(os.getcwd())
apkLength = 0
if comm.MODE != " --android-shared":
for i in range(len(apks)):
if apks[i].endswith(".apk") and "x86" in apks[i]:
if comm.BIT == "64":
self.assertIn("64", apks[i])
apkLength = apkLength + 1
if apks[i].endswith(".apk") and "arm" in apks[i]:
if comm.BIT == "64":
self.assertIn("64", apks[i])
apkLength = apkLength + 1
self.assertEquals(apkLength, 2)
else:
for i in range(len(apks)):
if apks[i].endswith(".apk") and "shared" in apks[i]:
apkLength = apkLength + 1
appVersion = apks[i].split('-')[1]
self.assertEquals(apkLength, 1)
comm.run(self)
comm.clear("org.xwalk.test")
os.remove(comm.ConstPath + "/../testapp/start_url/manifest.json")
self.assertEquals(return_code, 0)
self.assertEquals(data['xwalk_package_id'].strip(os.linesep), "org.xwalk.test")
def test_extensions_without_jsFile(self):
comm.setUp()
os.chdir(comm.XwalkPath)
comm.clear("org.xwalk.test")
os.mkdir("org.xwalk.test")
os.chdir('org.xwalk.test')
shutil.copytree(comm.ConstPath + "/../testapp/extension_permission/", comm.XwalkPath + "/org.xwalk.test/extension_permission/")
os.remove(comm.XwalkPath + "/org.xwalk.test/extension_permission/contactextension/contactextension.js")
cmd = comm.HOST_PREFIX + comm.PackTools + \
"crosswalk-pkg --platforms=android --android=" + comm.ANDROID_MODE + " --crosswalk=" + comm.crosswalkzip + " " + comm.XwalkPath + "/org.xwalk.test/extension_permission/"
return_code = os.system(cmd)
apks = os.listdir(os.getcwd())
apkLength = 0
if comm.MODE != " --android-shared":
for i in range(len(apks)):
if apks[i].endswith(".apk") and "x86" in apks[i]:
if comm.BIT == "64":
self.assertIn("64", apks[i])
apkLength = apkLength + 1
if apks[i].endswith(".apk") and "arm" in apks[i]:
if comm.BIT == "64":
self.assertIn("64", apks[i])
apkLength = apkLength + 1
self.assertEquals(apkLength, 2)
else:
for i in range(len(apks)):
if apks[i].endswith(".apk") and "shared" in apks[i]:
apkLength = apkLength + 1
appVersion = apks[i].split('-')[1]
self.assertEquals(apkLength, 1)
comm.run(self)
comm.clear("org.xwalk.test")
self.assertEquals(return_code, 0)
def test_external_extensions_way(self):
comm.setUp()
os.chdir(comm.XwalkPath)
comm.clear("org.xwalk.test")
os.mkdir("org.xwalk.test")
os.chdir('org.xwalk.test')
cmd = comm.HOST_PREFIX + comm.PackTools + \
"crosswalk-pkg --platforms=android --android=" + comm.ANDROID_MODE + " --keep --crosswalk=canary " + comm.ConstPath + "/../testapp/extension_permission/"
(return_code, output) = comm.getstatusoutput(cmd)
projectDir = output[0].split(" * " + os.linesep)[-1].split(' ')[-1].strip(os.linesep)
comm.clear("org.xwalk.test")
self.assertEquals(return_code, 0)
self.assertNotIn("extensions-config.json", os.listdir(projectDir + "/prj/android/assets/"))
self.assertIn("contactextension", os.listdir(projectDir + "/prj/android/assets/xwalk-extensions/"))
self.assertIn("contactextension.js", os.listdir(projectDir + "/prj/android/assets/xwalk-extensions/contactextension/"))
self.assertIn("contactextension.json", os.listdir(projectDir + "/prj/android/assets/xwalk-extensions/contactextension/"))
def test_create_package_skip_dummy_using_s_option(self):
comm.setUp()
os.chdir(comm.XwalkPath)
comm.clear("org.xwalk.test")
os.mkdir("org.xwalk.test")
os.chdir('org.xwalk.test')
cmd = comm.HOST_PREFIX + comm.PackTools + \
"crosswalk-pkg --platforms=android --android=" + comm.ANDROID_MODE + " -s --crosswalk=" + comm.crosswalkzip + " " + comm.ConstPath + "/../testapp/create_package_basic/"
(return_code, output) = comm.getstatusoutput(cmd)
comm.clear("org.xwalk.test")
self.assertEquals(return_code, 0)
self.assertIn("Skipping host setup check", output[0])
def test_create_package_skip_dummy_using_skip_check_option(self):
comm.setUp()
os.chdir(comm.XwalkPath)
comm.clear("org.xwalk.test")
os.mkdir("org.xwalk.test")
os.chdir('org.xwalk.test')
cmd = comm.HOST_PREFIX + comm.PackTools + \
"crosswalk-pkg --platforms=android --android=" + comm.ANDROID_MODE + " --skip-check --crosswalk=" + comm.crosswalkzip + " " + comm.ConstPath + "/../testapp/create_package_basic/"
(return_code, output) = comm.getstatusoutput(cmd)
comm.clear("org.xwalk.test")
self.assertEquals(return_code, 0)
self.assertIn("Skipping host setup check", output[0])
def test_create_package_skip_dummy_default_in_subprocess(self):
comm.setUp()
os.chdir(comm.XwalkPath)
comm.clear("org.xwalk.test")
os.mkdir("org.xwalk.test")
os.chdir('org.xwalk.test')
cmd = comm.HOST_PREFIX + comm.PackTools + \
"crosswalk-pkg --platforms=android --android=" + comm.ANDROID_MODE + " --crosswalk=" + comm.crosswalkzip + " " + comm.ConstPath + "/../testapp/create_package_basic/"
(return_code, output) = comm.getstatusoutput(cmd)
comm.clear("org.xwalk.test")
self.assertEquals(return_code, 0)
self.assertIn("Skipping host setup check", output[0])
def test_create_package_skip_dummy_no(self):
comm.setUp()
os.chdir(comm.XwalkPath)
comm.clear("org.xwalk.test")
os.mkdir("org.xwalk.test")
os.chdir('org.xwalk.test')
cmd = comm.HOST_PREFIX + comm.PackTools + \
"crosswalk-pkg --platforms=android --android=" + comm.ANDROID_MODE + " -s no --crosswalk=" + comm.crosswalkzip + " " + comm.ConstPath + "/../testapp/create_package_basic/"
(return_code, output) = comm.getstatusoutput(cmd)
comm.clear("org.xwalk.test")
self.assertEquals(return_code, 0)
self.assertIn("Checking host setup", output[0])
def test_create_package_default_non_interactive(self):
comm.setUp()
os.chdir(comm.XwalkPath)
comm.clear("org.xwalk.test")
os.mkdir("org.xwalk.test")
os.chdir('org.xwalk.test')
cmd = comm.HOST_PREFIX + comm.PackTools + \
"crosswalk-pkg --platforms=android --android=" + comm.ANDROID_MODE + " --crosswalk=" + comm.crosswalkzip + " " + comm.ConstPath + "/../testapp/create_package_basic/ > 1.log 2>&1"
(return_code, output) = comm.getstatusoutput(cmd)
cmd = "cat 1.log"
(return_code, output) = comm.getstatusoutput(cmd)
comm.clear("org.xwalk.test")
comm.clear("1.log")
self.assertEquals(return_code, 0)
self.assertIn("Skipping host setup check", output[0])
if __name__ == '__main__':
unittest.main()
|
|
# Copyright 2020 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Postprocessing for anchor-based detection."""
import functools
from typing import List, Tuple
from absl import logging
import tensorflow as tf
from tensorflow_examples.lite.model_maker.third_party.efficientdet import nms_np
from tensorflow_examples.lite.model_maker.third_party.efficientdet import utils
from tensorflow_examples.lite.model_maker.third_party.efficientdet.keras import anchors
T = tf.Tensor # a shortcut for typing check.
CLASS_OFFSET = 1
# TFLite-specific constants.
TFLITE_MAX_CLASSES_PER_DETECTION = 1
TFLITE_DETECTION_POSTPROCESS_FUNC = 'TFLite_Detection_PostProcess'
# TFLite fast NMS == postprocess_global (less accurate)
# TFLite regular NMS == postprocess_per_class
TFLITE_USE_REGULAR_NMS = False
def to_list(inputs):
if isinstance(inputs, dict):
return [inputs[k] for k in sorted(inputs.keys())]
if isinstance(inputs, list):
return inputs
def batch_map_fn(map_fn, inputs, *args):
"""Apply map_fn at batch dimension."""
if isinstance(inputs[0], (list, tuple)):
batch_size = len(inputs[0])
else:
batch_size = inputs[0].shape.as_list()[0]
if not batch_size:
# handle dynamic batch size: tf.vectorized_map is faster than tf.map_fn.
return tf.vectorized_map(map_fn, inputs, *args)
outputs = []
for i in range(batch_size):
outputs.append(map_fn([x[i] for x in inputs]))
return [tf.stack(y) for y in zip(*outputs)]
def clip_boxes(boxes: T, image_size: int) -> T:
"""Clip boxes to fit the image size."""
image_size = utils.parse_image_size(image_size) * 2
return tf.clip_by_value(boxes, [0], image_size)
def merge_class_box_level_outputs(params, cls_outputs: List[T],
box_outputs: List[T]) -> Tuple[T, T]:
"""Concatenates class and box of all levels into one tensor."""
cls_outputs_all, box_outputs_all = [], []
batch_size = tf.shape(cls_outputs[0])[0]
for level in range(0, params['max_level'] - params['min_level'] + 1):
if params['data_format'] == 'channels_first':
cls_outputs[level] = tf.transpose(cls_outputs[level], [0, 2, 3, 1])
box_outputs[level] = tf.transpose(box_outputs[level], [0, 2, 3, 1])
cls_outputs_all.append(
tf.reshape(cls_outputs[level], [batch_size, -1, params['num_classes']]))
box_outputs_all.append(tf.reshape(box_outputs[level], [batch_size, -1, 4]))
return tf.concat(cls_outputs_all, 1), tf.concat(box_outputs_all, 1)
def topk_class_boxes(params, cls_outputs: T,
box_outputs: T) -> Tuple[T, T, T, T]:
"""Pick the topk class and box outputs."""
batch_size = tf.shape(cls_outputs)[0]
num_classes = params['num_classes']
max_nms_inputs = params['nms_configs'].get('max_nms_inputs', 0)
if max_nms_inputs > 0:
# Prune anchors and detections to only keep max_nms_inputs.
# Due to some issues, top_k is currently slow in graph model.
logging.info('use max_nms_inputs for pre-nms topk.')
cls_outputs_reshape = tf.reshape(cls_outputs, [batch_size, -1])
_, cls_topk_indices = tf.math.top_k(
cls_outputs_reshape, k=max_nms_inputs, sorted=False)
indices = cls_topk_indices // num_classes
classes = cls_topk_indices % num_classes
cls_indices = tf.stack([indices, classes], axis=2)
cls_outputs_topk = tf.gather_nd(cls_outputs, cls_indices, batch_dims=1)
box_outputs_topk = tf.gather_nd(
box_outputs, tf.expand_dims(indices, 2), batch_dims=1)
else:
logging.info('use max_reduce for pre-nms topk.')
# Keep all anchors, but for each anchor, just keep the max probablity for
# each class.
cls_outputs_idx = tf.math.argmax(cls_outputs, axis=-1, output_type=tf.int32)
num_anchors = tf.shape(cls_outputs)[1]
classes = cls_outputs_idx
indices = tf.tile(
tf.expand_dims(tf.range(num_anchors), axis=0), [batch_size, 1])
cls_outputs_topk = tf.reduce_max(cls_outputs, -1)
box_outputs_topk = box_outputs
return cls_outputs_topk, box_outputs_topk, classes, indices
def pre_nms(params, cls_outputs, box_outputs, topk=True):
"""Detection post processing before nms.
It takes the multi-level class and box predictions from network, merge them
into unified tensors, and compute boxes, scores, and classes.
Args:
params: a dict of parameters.
cls_outputs: a list of tensors for classes, each tensor denotes a level of
logits with shape [N, H, W, num_class * num_anchors].
box_outputs: a list of tensors for boxes, each tensor ddenotes a level of
boxes with shape [N, H, W, 4 * num_anchors].
topk: if True, select topk before nms (mainly to speed up nms).
Returns:
A tuple of (boxes, scores, classes).
"""
# get boxes by apply bounding box regression to anchors.
eval_anchors = anchors.Anchors(params['min_level'], params['max_level'],
params['num_scales'], params['aspect_ratios'],
params['anchor_scale'], params['image_size'])
cls_outputs, box_outputs = merge_class_box_level_outputs(
params, cls_outputs, box_outputs)
if topk:
# select topK purely based on scores before NMS, in order to speed up nms.
cls_outputs, box_outputs, classes, indices = topk_class_boxes(
params, cls_outputs, box_outputs)
anchor_boxes = tf.gather(eval_anchors.boxes, indices)
else:
anchor_boxes = eval_anchors.boxes
classes = None
boxes = anchors.decode_box_outputs(box_outputs, anchor_boxes)
# convert logits to scores.
scores = tf.math.sigmoid(cls_outputs)
return boxes, scores, classes
def nms(params, boxes: T, scores: T, classes: T,
padded: bool) -> Tuple[T, T, T, T]:
"""Non-maximum suppression.
Args:
params: a dict of parameters.
boxes: a tensor with shape [N, 4], where N is the number of boxes. Box
format is [y_min, x_min, y_max, x_max].
scores: a tensor with shape [N].
classes: a tensor with shape [N].
padded: a bool vallue indicating whether the results are padded.
Returns:
A tuple (boxes, scores, classes, valid_lens), where valid_lens is a scalar
denoting the valid length of boxes/scores/classes outputs.
"""
nms_configs = params['nms_configs']
method = nms_configs['method']
max_output_size = nms_configs['max_output_size']
if method == 'hard' or not method:
# hard nms.
sigma = 0.0
iou_thresh = nms_configs['iou_thresh'] or 0.5
score_thresh = nms_configs['score_thresh'] or float('-inf')
elif method == 'gaussian':
sigma = nms_configs['sigma'] or 0.5
iou_thresh = 1.0
score_thresh = nms_configs['score_thresh'] or 0.001
else:
raise ValueError('Inference has invalid nms method {}'.format(method))
# TF API's sigma is twice as the paper's value, so here we divide it by 2:
# https://github.com/tensorflow/tensorflow/issues/40253.
nms_top_idx, nms_scores, nms_valid_lens = tf.raw_ops.NonMaxSuppressionV5(
boxes=boxes,
scores=scores,
max_output_size=max_output_size,
iou_threshold=iou_thresh,
score_threshold=score_thresh,
soft_nms_sigma=(sigma / 2),
pad_to_max_output_size=padded)
nms_boxes = tf.gather(boxes, nms_top_idx)
nms_classes = tf.cast(
tf.gather(classes, nms_top_idx) + CLASS_OFFSET, boxes.dtype)
return nms_boxes, nms_scores, nms_classes, nms_valid_lens
def postprocess_combined(params, cls_outputs, box_outputs, image_scales=None):
"""Post processing with combined NMS.
Leverage the tf combined NMS. It is fast on TensorRT, but slow on CPU/GPU.
Args:
params: a dict of parameters.
cls_outputs: a list of tensors for classes, each tensor denotes a level of
logits with shape [N, H, W, num_class * num_anchors].
box_outputs: a list of tensors for boxes, each tensor ddenotes a level of
boxes with shape [N, H, W, 4 * num_anchors]. Each box format is [y_min,
x_min, y_max, x_man].
image_scales: scaling factor or the final image and bounding boxes.
Returns:
A tuple of batch level (boxes, scores, classess, valid_len) after nms.
"""
cls_outputs = to_list(cls_outputs)
box_outputs = to_list(box_outputs)
# Don't filter any outputs because combine_nms need the raw information.
boxes, scores, _ = pre_nms(params, cls_outputs, box_outputs, topk=False)
max_output_size = params['nms_configs']['max_output_size']
score_thresh = params['nms_configs']['score_thresh'] or float('-inf')
nms_boxes, nms_scores, nms_classes, nms_valid_len = (
tf.image.combined_non_max_suppression(
tf.expand_dims(boxes, axis=2),
scores,
max_output_size,
max_output_size,
score_threshold=score_thresh,
clip_boxes=False))
nms_classes += CLASS_OFFSET
nms_boxes = clip_boxes(nms_boxes, params['image_size'])
if image_scales is not None:
scales = tf.expand_dims(tf.expand_dims(image_scales, -1), -1)
nms_boxes = nms_boxes * tf.cast(scales, nms_boxes.dtype)
return nms_boxes, nms_scores, nms_classes, nms_valid_len
def tflite_nms_implements_signature(params):
"""`experimental_implements` signature for TFLite's custom NMS op.
This signature encodes the arguments to correctly initialize TFLite's custom
post-processing op in the MLIR converter.
For details on `experimental_implements` see here:
https://www.tensorflow.org/api_docs/python/tf/function
Args:
params: a dict of parameters.
Returns:
String encoding of a map from attribute keys to values.
"""
scale_value = 1.0
nms_configs = params['nms_configs']
iou_thresh = nms_configs['iou_thresh'] or 0.5
score_thresh = nms_configs['score_thresh'] or float('-inf')
max_detections = params['tflite_max_detections']
implements_signature = [
'name: "%s"' % TFLITE_DETECTION_POSTPROCESS_FUNC,
'attr { key: "max_detections" value { i: %d } }' % max_detections,
'attr { key: "max_classes_per_detection" value { i: %d } }' %
TFLITE_MAX_CLASSES_PER_DETECTION,
'attr { key: "use_regular_nms" value { b: %s } }' %
str(TFLITE_USE_REGULAR_NMS).lower(),
'attr { key: "nms_score_threshold" value { f: %f } }' % score_thresh,
'attr { key: "nms_iou_threshold" value { f: %f } }' % iou_thresh,
'attr { key: "y_scale" value { f: %f } }' % scale_value,
'attr { key: "x_scale" value { f: %f } }' % scale_value,
'attr { key: "h_scale" value { f: %f } }' % scale_value,
'attr { key: "w_scale" value { f: %f } }' % scale_value,
'attr { key: "num_classes" value { i: %d } }' % params['num_classes']
]
implements_signature = ' '.join(implements_signature)
return implements_signature
def tflite_pre_nms(params, cls_outputs, box_outputs):
"""Pre-NMS that is compatible with TFLite's custom NMS op.
For details, see tensorflow/lite/kernels/detection_postprocess.cc
Args:
params: a dict of parameters.
cls_outputs: a list of tensors for classes, each tensor denotes a level of
logits with shape [1, H, W, num_class * num_anchors].
box_outputs: a list of tensors for boxes, each tensor ddenotes a level of
boxes with shape [1, H, W, 4 * num_anchors]. Each box format is [y_min,
x_min, y_max, x_man].
Returns:
boxes: boxes encoded as {y_center, x_center, height, width}
scores: scores converted from `cls_outputs` logits using sigmoid
anchors: normalized anchors encoded as {y_center, x_center, height, width}
"""
cls_outputs = to_list(cls_outputs)
box_outputs = to_list(box_outputs)
cls_outputs, box_outputs = merge_class_box_level_outputs(
params, cls_outputs, box_outputs)
eval_anchors = anchors.Anchors(params['min_level'], params['max_level'],
params['num_scales'], params['aspect_ratios'],
params['anchor_scale'], params['image_size'])
# TODO(b/175166514): Consider computing Top-K boxes & anchors here. We don't
# do this currently since the resultant graph does not support TFLite
# delegates well. `topk_class_boxes` won't work as-is, since the outputs
# will need to be modified appropriately for TFLite op's consumption.
# TFLite's object detection APIs require normalized anchors.
height, width = utils.parse_image_size(params['image_size'])
normalize_factor = tf.constant([height, width, height, width],
dtype=tf.float32)
normalized_anchors = eval_anchors.boxes / normalize_factor
decoded_anchors = anchors.decode_anchors_to_centersize(
box_outputs, normalized_anchors)
# convert logits to scores.
scores = tf.math.sigmoid(cls_outputs)
return box_outputs, scores, decoded_anchors
def postprocess_tflite(params, cls_outputs, box_outputs):
"""Post processing for conversion to TFLite.
Mathematically same as postprocess_global, except that the last portion of the
TF graph constitutes a dummy `tf.function` that contains an annotation for
conversion to TFLite's custom NMS op. Using this custom op allows features
like post-training quantization & accelerator support.
NOTE: This function does NOT return a valid output, and is only meant to
generate a SavedModel for TFLite conversion via MLIR.
For TFLite op details, see tensorflow/lite/kernels/detection_postprocess.cc
Args:
params: a dict of parameters.
cls_outputs: a list of tensors for classes, each tensor denotes a level of
logits with shape [1, H, W, num_class * num_anchors].
box_outputs: a list of tensors for boxes, each tensor ddenotes a level of
boxes with shape [1, H, W, 4 * num_anchors]. Each box format is [y_min,
x_min, y_max, x_man].
Returns:
A (dummy) tuple of (boxes, scores, classess, valid_len).
"""
box_outputs, scores, decoded_anchors = tflite_pre_nms(params, cls_outputs,
box_outputs)
# There is no TF equivalent for TFLite's custom post-processing op.
# So we add an 'empty' composite function here, that is legalized to the
# custom op with MLIR.
# For details, see:
# tensorflow/compiler/mlir/lite/utils/nms_utils.cc
@tf.function(experimental_implements=tflite_nms_implements_signature(params))
# pylint: disable=g-unused-argument,unused-argument
def dummy_post_processing(box_encodings, class_predictions, anchor_boxes):
boxes = tf.constant(0.0, dtype=tf.float32, name='boxes')
scores = tf.constant(0.0, dtype=tf.float32, name='scores')
classes = tf.constant(0.0, dtype=tf.float32, name='classes')
num_detections = tf.constant(0.0, dtype=tf.float32, name='num_detections')
return boxes, classes, scores, num_detections
return dummy_post_processing(box_outputs, scores, decoded_anchors)[::-1]
def postprocess_global(params, cls_outputs, box_outputs, image_scales=None):
"""Post processing with global NMS.
A fast but less accurate version of NMS. The idea is to treat the scores for
different classes in a unified way, and perform NMS globally for all classes.
Args:
params: a dict of parameters.
cls_outputs: a list of tensors for classes, each tensor denotes a level of
logits with shape [N, H, W, num_class * num_anchors].
box_outputs: a list of tensors for boxes, each tensor ddenotes a level of
boxes with shape [N, H, W, 4 * num_anchors]. Each box format is [y_min,
x_min, y_max, x_man].
image_scales: scaling factor or the final image and bounding boxes.
Returns:
A tuple of batch level (boxes, scores, classess, valid_len) after nms.
"""
cls_outputs = to_list(cls_outputs)
box_outputs = to_list(box_outputs)
boxes, scores, classes = pre_nms(params, cls_outputs, box_outputs)
def single_batch_fn(element):
return nms(params, element[0], element[1], element[2], True)
nms_boxes, nms_scores, nms_classes, nms_valid_len = batch_map_fn(
single_batch_fn, [boxes, scores, classes])
nms_boxes = clip_boxes(nms_boxes, params['image_size'])
if image_scales is not None:
scales = tf.expand_dims(tf.expand_dims(image_scales, -1), -1)
nms_boxes = nms_boxes * tf.cast(scales, nms_boxes.dtype)
return nms_boxes, nms_scores, nms_classes, nms_valid_len
def per_class_nms(params, boxes, scores, classes, image_scales=None):
"""Per-class nms, a utility for postprocess_per_class.
Args:
params: a dict of parameters.
boxes: A tensor with shape [N, K, 4], where N is batch_size, K is num_boxes.
Box format is [y_min, x_min, y_max, x_max].
scores: A tensor with shape [N, K].
classes: A tensor with shape [N, K].
image_scales: scaling factor or the final image and bounding boxes.
Returns:
A tuple of batch level (boxes, scores, classess, valid_len) after nms.
"""
def single_batch_fn(element):
"""A mapping function for a single batch."""
boxes_i, scores_i, classes_i = element[0], element[1], element[2]
nms_boxes_cls, nms_scores_cls, nms_classes_cls = [], [], []
nms_valid_len_cls = []
for cid in range(params['num_classes']):
indices = tf.where(tf.equal(classes_i, cid))
if indices.shape[0] == 0:
continue
classes_cls = tf.gather_nd(classes_i, indices)
boxes_cls = tf.gather_nd(boxes_i, indices)
scores_cls = tf.gather_nd(scores_i, indices)
nms_boxes, nms_scores, nms_classes, nms_valid_len = nms(
params, boxes_cls, scores_cls, classes_cls, False)
nms_boxes_cls.append(nms_boxes)
nms_scores_cls.append(nms_scores)
nms_classes_cls.append(nms_classes)
nms_valid_len_cls.append(nms_valid_len)
# Pad zeros and select topk.
max_output_size = params['nms_configs'].get('max_output_size', 100)
nms_boxes_cls = tf.pad(
tf.concat(nms_boxes_cls, 0), [[0, max_output_size], [0, 0]])
nms_scores_cls = tf.pad(
tf.concat(nms_scores_cls, 0), [[0, max_output_size]])
nms_classes_cls = tf.pad(
tf.concat(nms_classes_cls, 0), [[0, max_output_size]])
nms_valid_len_cls = tf.stack(nms_valid_len_cls)
_, indices = tf.math.top_k(nms_scores_cls, k=max_output_size, sorted=True)
return tuple((
tf.gather(nms_boxes_cls, indices),
tf.gather(nms_scores_cls, indices),
tf.gather(nms_classes_cls, indices),
tf.minimum(max_output_size, tf.reduce_sum(nms_valid_len_cls))))
# end of single_batch_fn
nms_boxes, nms_scores, nms_classes, nms_valid_len = batch_map_fn(
single_batch_fn, [boxes, scores, classes])
if image_scales is not None:
scales = tf.expand_dims(tf.expand_dims(image_scales, -1), -1)
nms_boxes = nms_boxes * tf.cast(scales, nms_boxes.dtype)
return nms_boxes, nms_scores, nms_classes, nms_valid_len
def postprocess_per_class(params, cls_outputs, box_outputs, image_scales=None):
"""Post processing with per class NMS.
An accurate but relatively slow version of NMS. The idea is to perform NMS for
each class, and then combine them.
Args:
params: a dict of parameters.
cls_outputs: a list of tensors for classes, each tensor denotes a level of
logits with shape [N, H, W, num_class * num_anchors].
box_outputs: a list of tensors for boxes, each tensor ddenotes a level of
boxes with shape [N, H, W, 4 * num_anchors]. Each box format is [y_min,
x_min, y_max, x_man].
image_scales: scaling factor or the final image and bounding boxes.
Returns:
A tuple of batch level (boxes, scores, classess, valid_len) after nms.
"""
cls_outputs = to_list(cls_outputs)
box_outputs = to_list(box_outputs)
boxes, scores, classes = pre_nms(params, cls_outputs, box_outputs)
return per_class_nms(params, boxes, scores, classes, image_scales)
def generate_detections_from_nms_output(nms_boxes_bs,
nms_classes_bs,
nms_scores_bs,
image_ids,
original_image_widths=None,
flip=False):
"""Generating [id, x, y, w, h, score, class] from NMS outputs."""
image_ids_bs = tf.cast(tf.expand_dims(image_ids, -1), nms_scores_bs.dtype)
if flip:
detections_bs = [
image_ids_bs * tf.ones_like(nms_scores_bs),
# the mirrored location of the left edge is the image width
# minus the position of the right edge
original_image_widths - nms_boxes_bs[:, :, 3],
nms_boxes_bs[:, :, 0],
# the mirrored location of the right edge is the image width
# minus the position of the left edge
original_image_widths - nms_boxes_bs[:, :, 1],
nms_boxes_bs[:, :, 2],
nms_scores_bs,
nms_classes_bs,
]
else:
detections_bs = [
image_ids_bs * tf.ones_like(nms_scores_bs),
nms_boxes_bs[:, :, 1],
nms_boxes_bs[:, :, 0],
nms_boxes_bs[:, :, 3],
nms_boxes_bs[:, :, 2],
nms_scores_bs,
nms_classes_bs,
]
return tf.stack(detections_bs, axis=-1, name='detections')
def generate_detections(params,
cls_outputs,
box_outputs,
image_scales,
image_ids,
flip=False,
pre_class_nms=True):
"""A legacy interface for generating [id, x, y, w, h, score, class]."""
_, width = utils.parse_image_size(params['image_size'])
original_image_widths = tf.expand_dims(image_scales, -1) * width
if params['nms_configs'].get('pyfunc', True):
# numpy based soft-nms gives better accuracy than the tensorflow builtin
# the reason why is unknown
detections_bs = []
boxes, scores, classes = pre_nms(params, cls_outputs, box_outputs)
for index in range(boxes.shape[0]):
nms_configs = params['nms_configs']
detections = tf.numpy_function(
functools.partial(nms_np.per_class_nms, nms_configs=nms_configs), [
boxes[index],
scores[index],
classes[index],
tf.slice(image_ids, [index], [1]),
tf.slice(image_scales, [index], [1]),
params['num_classes'],
nms_configs['max_output_size'],
], tf.float32)
if flip:
detections = tf.stack([
detections[:, 0],
# the mirrored location of the left edge is the image width
# minus the position of the right edge
original_image_widths[index] - detections[:, 3],
detections[:, 2],
# the mirrored location of the right edge is the image width
# minus the position of the left edge
original_image_widths[index] - detections[:, 1],
detections[:, 4],
detections[:, 5],
detections[:, 6],
], axis=-1)
detections_bs.append(detections)
return tf.stack(detections_bs, axis=0, name='detections')
if pre_class_nms:
postprocess = postprocess_per_class
else:
postprocess = postprocess_global
nms_boxes_bs, nms_scores_bs, nms_classes_bs, _ = postprocess(
params, cls_outputs, box_outputs, image_scales)
return generate_detections_from_nms_output(nms_boxes_bs, nms_classes_bs,
nms_scores_bs, image_ids,
original_image_widths, flip)
def transform_detections(detections):
"""A transforms detections in [id, x1, y1, x2, y2, score, class] form to [id, x, y, w, h, score, class]."""
return tf.stack( #
[
detections[:, :, 0],
detections[:, :, 1],
detections[:, :, 2],
detections[:, :, 3] - detections[:, :, 1],
detections[:, :, 4] - detections[:, :, 2],
detections[:, :, 5],
detections[:, :, 6],
],
axis=-1)
|
|
# -*- coding: utf-8 -*-
import functools
import inspect
import uuid
import marshmallow as ma
import sqlalchemy as sa
from marshmallow import fields, validate
from sqlalchemy.dialects import mssql, mysql, postgresql
from .exceptions import ModelConversionError
from .fields import Related
def _is_field(value):
return (
isinstance(value, type) and
issubclass(value, fields.Field)
)
def _has_default(column):
return (
column.default is not None or
column.server_default is not None or
_is_auto_increment(column)
)
def _is_auto_increment(column):
return (
column.table is not None and
column is column.table._autoincrement_column
)
def _postgres_array_factory(converter, data_type):
return functools.partial(
fields.List,
converter._get_field_class_for_data_type(data_type.item_type),
)
def _should_exclude_field(column, fields=None, exclude=None):
if fields and column.key not in fields:
return True
if exclude and column.key in exclude:
return True
return False
class ModelConverter(object):
"""Class that converts a SQLAlchemy model into a dictionary of corresponding
marshmallow `Fields <marshmallow.fields.Field>`.
"""
SQLA_TYPE_MAPPING = {
sa.Enum: fields.Field,
postgresql.BIT: fields.Integer,
postgresql.UUID: fields.UUID,
postgresql.MACADDR: fields.String,
postgresql.INET: fields.String,
postgresql.JSON: fields.Raw,
postgresql.JSONB: fields.Raw,
postgresql.HSTORE: fields.Raw,
postgresql.ARRAY: _postgres_array_factory,
mysql.BIT: fields.Integer,
mysql.YEAR: fields.Integer,
mysql.SET: fields.List,
mysql.ENUM: fields.Field,
mssql.BIT: fields.Integer,
}
if hasattr(sa, 'JSON'):
SQLA_TYPE_MAPPING[sa.JSON] = fields.Raw
DIRECTION_MAPPING = {
'MANYTOONE': False,
'MANYTOMANY': True,
'ONETOMANY': True,
}
def __init__(self, schema_cls=None):
self.schema_cls = schema_cls
@property
def type_mapping(self):
if self.schema_cls:
return self.schema_cls.TYPE_MAPPING
else:
return ma.Schema.TYPE_MAPPING
def fields_for_model(self, model, include_fk=False, fields=None, exclude=None, base_fields=None,
dict_cls=dict):
result = dict_cls()
base_fields = base_fields or {}
for prop in model.__mapper__.iterate_properties:
if _should_exclude_field(prop, fields=fields, exclude=exclude):
continue
if hasattr(prop, 'columns'):
if not include_fk:
# Only skip a column if there is no overridden column
# which does not have a Foreign Key.
for column in prop.columns:
if not column.foreign_keys:
break
else:
continue
field = base_fields.get(prop.key) or self.property2field(prop)
if field:
result[prop.key] = field
return result
def fields_for_table(self, table, include_fk=False, fields=None, exclude=None, base_fields=None,
dict_cls=dict):
result = dict_cls()
base_fields = base_fields or {}
for column in table.columns:
if _should_exclude_field(column, fields=fields, exclude=exclude):
continue
if not include_fk and column.foreign_keys:
continue
field = base_fields.get(column.key) or self.column2field(column)
if field:
result[column.key] = field
return result
def property2field(self, prop, instance=True, field_class=None, **kwargs):
field_class = field_class or self._get_field_class_for_property(prop)
if not instance:
return field_class
field_kwargs = self._get_field_kwargs_for_property(prop)
field_kwargs.update(kwargs)
ret = field_class(**field_kwargs)
if (
hasattr(prop, 'direction') and
self.DIRECTION_MAPPING[prop.direction.name] and
prop.uselist is True
):
ret = fields.List(ret, **kwargs)
return ret
def column2field(self, column, instance=True, **kwargs):
field_class = self._get_field_class_for_column(column)
if not instance:
return field_class
field_kwargs = self.get_base_kwargs()
self._add_column_kwargs(field_kwargs, column)
field_kwargs.update(kwargs)
return field_class(**field_kwargs)
def field_for(self, model, property_name, **kwargs):
prop = model.__mapper__.get_property(property_name)
return self.property2field(prop, **kwargs)
def _get_field_class_for_column(self, column):
return self._get_field_class_for_data_type(column.type)
def _get_field_class_for_data_type(self, data_type):
field_cls = None
types = inspect.getmro(type(data_type))
# First search for a field class from self.SQLA_TYPE_MAPPING
for col_type in types:
if col_type in self.SQLA_TYPE_MAPPING:
field_cls = self.SQLA_TYPE_MAPPING[col_type]
if callable(field_cls) and not _is_field(field_cls):
field_cls = field_cls(self, data_type)
break
else:
# Try to find a field class based on the column's python_type
try:
python_type = data_type.python_type
except NotImplementedError:
python_type = None
if python_type in self.type_mapping:
field_cls = self.type_mapping[python_type]
else:
if hasattr(data_type, 'impl'):
return self._get_field_class_for_data_type(data_type.impl)
raise ModelConversionError(
'Could not find field column of type {0}.'.format(types[0]))
return field_cls
def _get_field_class_for_property(self, prop):
if hasattr(prop, 'direction'):
field_cls = Related
else:
column = prop.columns[0]
field_cls = self._get_field_class_for_column(column)
return field_cls
def _get_field_kwargs_for_property(self, prop):
kwargs = self.get_base_kwargs()
if hasattr(prop, 'columns'):
column = prop.columns[0]
self._add_column_kwargs(kwargs, column)
if hasattr(prop, 'direction'): # Relationship property
self._add_relationship_kwargs(kwargs, prop)
if getattr(prop, 'doc', None): # Useful for documentation generation
kwargs['description'] = prop.doc
return kwargs
def _add_column_kwargs(self, kwargs, column):
"""Add keyword arguments to kwargs (in-place) based on the passed in
`Column <sqlalchemy.schema.Column>`.
"""
if column.nullable:
kwargs['allow_none'] = True
kwargs['required'] = not column.nullable and not _has_default(column)
if hasattr(column.type, 'enums'):
kwargs['validate'].append(
validate.OneOf(choices=column.type.enums))
# Add a length validator if a max length is set on the column
# Skip UUID columns
if hasattr(column.type, 'length'):
try:
python_type = column.type.python_type
except (AttributeError, NotImplementedError):
python_type = None
if not python_type or not issubclass(python_type, uuid.UUID):
kwargs['validate'].append(
validate.Length(max=column.type.length))
if hasattr(column.type, 'scale'):
kwargs['places'] = getattr(column.type, 'scale', None)
def _add_relationship_kwargs(self, kwargs, prop):
"""Add keyword arguments to kwargs (in-place) based on the passed in
relationship `Property`.
"""
nullable = True
for pair in prop.local_remote_pairs:
if not pair[0].nullable:
if prop.uselist is True:
nullable = False
break
kwargs.update({
'allow_none': nullable,
'required': not nullable,
})
def get_base_kwargs(self):
return {
'validate': []
}
default_converter = ModelConverter()
fields_for_model = default_converter.fields_for_model
"""Generate a dict of field_name: `marshmallow.fields.Field` pairs for the
given model.
:param model: The SQLAlchemy model
:param bool include_fk: Whether to include foreign key fields in the output.
:return: dict of field_name: Field instance pairs
"""
property2field = default_converter.property2field
"""Convert a SQLAlchemy `Property` to a field instance or class.
:param Property prop: SQLAlchemy Property.
:param bool instance: If `True`, return `Field` instance, computing relevant kwargs
from the given property. If `False`, return the `Field` class.
:param kwargs: Additional keyword arguments to pass to the field constructor.
:return: A `marshmallow.fields.Field` class or instance.
"""
column2field = default_converter.column2field
"""Convert a SQLAlchemy `Column <sqlalchemy.schema.Column>` to a field instance or class.
:param sqlalchemy.schema.Column column: SQLAlchemy Column.
:param bool instance: If `True`, return `Field` instance, computing relevant kwargs
from the given property. If `False`, return the `Field` class.
:return: A `marshmallow.fields.Field` class or instance.
"""
field_for = default_converter.field_for
"""Convert a property for a mapped SQLAlchemy class to a marshmallow `Field`.
Example: ::
date_created = field_for(Author, 'date_created', dump_only=True)
author = field_for(Book, 'author')
:param type model: A SQLAlchemy mapped class.
:param str property_name: The name of the property to convert.
:param kwargs: Extra keyword arguments to pass to `property2field`
:return: A `marshmallow.fields.Field` class or instance.
"""
|
|
#!/usr/bin/env python3
# Copyright (c) 2014-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test fee estimation code."""
from decimal import Decimal
import random
from test_framework.messages import CTransaction, CTxIn, CTxOut, COutPoint, ToHex, COIN
from test_framework.script import CScript, OP_1, OP_DROP, OP_2, OP_HASH160, OP_EQUAL, hash160, OP_TRUE
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
assert_greater_than,
assert_greater_than_or_equal,
satoshi_round,
)
# Construct 2 trivial P2SH's and the ScriptSigs that spend them
# So we can create many transactions without needing to spend
# time signing.
REDEEM_SCRIPT_1 = CScript([OP_1, OP_DROP])
REDEEM_SCRIPT_2 = CScript([OP_2, OP_DROP])
P2SH_1 = CScript([OP_HASH160, hash160(REDEEM_SCRIPT_1), OP_EQUAL])
P2SH_2 = CScript([OP_HASH160, hash160(REDEEM_SCRIPT_2), OP_EQUAL])
# Associated ScriptSig's to spend satisfy P2SH_1 and P2SH_2
SCRIPT_SIG = [CScript([OP_TRUE, REDEEM_SCRIPT_1]), CScript([OP_TRUE, REDEEM_SCRIPT_2])]
def small_txpuzzle_randfee(from_node, conflist, unconflist, amount, min_fee, fee_increment):
"""Create and send a transaction with a random fee.
The transaction pays to a trivial P2SH script, and assumes that its inputs
are of the same form.
The function takes a list of confirmed outputs and unconfirmed outputs
and attempts to use the confirmed list first for its inputs.
It adds the newly created outputs to the unconfirmed list.
Returns (raw transaction, fee)."""
# It's best to exponentially distribute our random fees
# because the buckets are exponentially spaced.
# Exponentially distributed from 1-128 * fee_increment
rand_fee = float(fee_increment) * (1.1892 ** random.randint(0, 28))
# Total fee ranges from min_fee to min_fee + 127*fee_increment
fee = min_fee - fee_increment + satoshi_round(rand_fee)
tx = CTransaction()
total_in = Decimal("0.00000000")
while total_in <= (amount + fee) and len(conflist) > 0:
t = conflist.pop(0)
total_in += t["amount"]
tx.vin.append(CTxIn(COutPoint(int(t["txid"], 16), t["vout"]), b""))
if total_in <= amount + fee:
while total_in <= (amount + fee) and len(unconflist) > 0:
t = unconflist.pop(0)
total_in += t["amount"]
tx.vin.append(CTxIn(COutPoint(int(t["txid"], 16), t["vout"]), b""))
if total_in <= amount + fee:
raise RuntimeError("Insufficient funds: need %d, have %d" % (amount + fee, total_in))
tx.vout.append(CTxOut(int((total_in - amount - fee) * COIN), P2SH_1))
tx.vout.append(CTxOut(int(amount * COIN), P2SH_2))
# These transactions don't need to be signed, but we still have to insert
# the ScriptSig that will satisfy the ScriptPubKey.
for inp in tx.vin:
inp.scriptSig = SCRIPT_SIG[inp.prevout.n]
txid = from_node.sendrawtransaction(hexstring=ToHex(tx), maxfeerate=0)
unconflist.append({"txid": txid, "vout": 0, "amount": total_in - amount - fee})
unconflist.append({"txid": txid, "vout": 1, "amount": amount})
return (ToHex(tx), fee)
def split_inputs(from_node, txins, txouts, initial_split=False):
"""Generate a lot of inputs so we can generate a ton of transactions.
This function takes an input from txins, and creates and sends a transaction
which splits the value into 2 outputs which are appended to txouts.
Previously this was designed to be small inputs so they wouldn't have
a high coin age when the notion of priority still existed."""
prevtxout = txins.pop()
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(int(prevtxout["txid"], 16), prevtxout["vout"]), b""))
half_change = satoshi_round(prevtxout["amount"] / 2)
rem_change = prevtxout["amount"] - half_change - Decimal("0.00001000")
tx.vout.append(CTxOut(int(half_change * COIN), P2SH_1))
tx.vout.append(CTxOut(int(rem_change * COIN), P2SH_2))
# If this is the initial split we actually need to sign the transaction
# Otherwise we just need to insert the proper ScriptSig
if (initial_split):
completetx = from_node.signrawtransactionwithwallet(ToHex(tx))["hex"]
else:
tx.vin[0].scriptSig = SCRIPT_SIG[prevtxout["vout"]]
completetx = ToHex(tx)
txid = from_node.sendrawtransaction(hexstring=completetx, maxfeerate=0)
txouts.append({"txid": txid, "vout": 0, "amount": half_change})
txouts.append({"txid": txid, "vout": 1, "amount": rem_change})
def check_raw_estimates(node, fees_seen):
"""Call estimaterawfee and verify that the estimates meet certain invariants."""
delta = 1.0e-6 # account for rounding error
for i in range(1, 26):
for _, e in node.estimaterawfee(i).items():
feerate = float(e["feerate"])
assert_greater_than(feerate, 0)
if feerate + delta < min(fees_seen) or feerate - delta > max(fees_seen):
raise AssertionError("Estimated fee (%f) out of range (%f,%f)"
% (feerate, min(fees_seen), max(fees_seen)))
def check_smart_estimates(node, fees_seen):
"""Call estimatesmartfee and verify that the estimates meet certain invariants."""
delta = 1.0e-6 # account for rounding error
last_feerate = float(max(fees_seen))
all_smart_estimates = [node.estimatesmartfee(i) for i in range(1, 26)]
for i, e in enumerate(all_smart_estimates): # estimate is for i+1
feerate = float(e["feerate"])
assert_greater_than(feerate, 0)
if feerate + delta < min(fees_seen) or feerate - delta > max(fees_seen):
raise AssertionError("Estimated fee (%f) out of range (%f,%f)"
% (feerate, min(fees_seen), max(fees_seen)))
if feerate - delta > last_feerate:
raise AssertionError("Estimated fee (%f) larger than last fee (%f) for lower number of confirms"
% (feerate, last_feerate))
last_feerate = feerate
if i == 0:
assert_equal(e["blocks"], 2)
else:
assert_greater_than_or_equal(i + 1, e["blocks"])
def check_estimates(node, fees_seen):
check_raw_estimates(node, fees_seen)
check_smart_estimates(node, fees_seen)
class EstimateFeeTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 3
# mine non-standard txs (e.g. txs with "dust" outputs)
# Force fSendTrickle to true (via whitelist.noban)
self.extra_args = [
["-acceptnonstdtxn", "-whitelist=noban@127.0.0.1"],
["-acceptnonstdtxn", "-whitelist=noban@127.0.0.1", "-blockmaxweight=68000"],
["-acceptnonstdtxn", "-whitelist=noban@127.0.0.1", "-blockmaxweight=32000"],
]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def setup_network(self):
"""
We'll setup the network to have 3 nodes that all mine with different parameters.
But first we need to use one node to create a lot of outputs
which we will use to generate our transactions.
"""
self.add_nodes(3, extra_args=self.extra_args)
# Use node0 to mine blocks for input splitting
# Node1 mines small blocks but that are bigger than the expected transaction rate.
# NOTE: the CreateNewBlock code starts counting block weight at 4,000 weight,
# (68k weight is room enough for 120 or so transactions)
# Node2 is a stingy miner, that
# produces too small blocks (room for only 55 or so transactions)
self.start_nodes()
self.import_deterministic_coinbase_privkeys()
self.stop_nodes()
def transact_and_mine(self, numblocks, mining_node):
min_fee = Decimal("0.00001")
# We will now mine numblocks blocks generating on average 100 transactions between each block
# We shuffle our confirmed txout set before each set of transactions
# small_txpuzzle_randfee will use the transactions that have inputs already in the chain when possible
# resorting to tx's that depend on the mempool when those run out
for _ in range(numblocks):
random.shuffle(self.confutxo)
for _ in range(random.randrange(100 - 50, 100 + 50)):
from_index = random.randint(1, 2)
(txhex, fee) = small_txpuzzle_randfee(self.nodes[from_index], self.confutxo,
self.memutxo, Decimal("0.005"), min_fee, min_fee)
tx_kbytes = (len(txhex) // 2) / 1000.0
self.fees_per_kb.append(float(fee) / tx_kbytes)
self.sync_mempools(wait=.1)
mined = mining_node.getblock(mining_node.generate(1)[0], True)["tx"]
self.sync_blocks(wait=.1)
# update which txouts are confirmed
newmem = []
for utx in self.memutxo:
if utx["txid"] in mined:
self.confutxo.append(utx)
else:
newmem.append(utx)
self.memutxo = newmem
def run_test(self):
self.log.info("This test is time consuming, please be patient")
self.log.info("Splitting inputs so we can generate tx's")
# Start node0
self.start_node(0)
self.txouts = []
self.txouts2 = []
# Split a coinbase into two transaction puzzle outputs
split_inputs(self.nodes[0], self.nodes[0].listunspent(0), self.txouts, True)
# Mine
while len(self.nodes[0].getrawmempool()) > 0:
self.nodes[0].generate(1)
# Repeatedly split those 2 outputs, doubling twice for each rep
# Use txouts to monitor the available utxo, since these won't be tracked in wallet
reps = 0
while reps < 5:
# Double txouts to txouts2
while len(self.txouts) > 0:
split_inputs(self.nodes[0], self.txouts, self.txouts2)
while len(self.nodes[0].getrawmempool()) > 0:
self.nodes[0].generate(1)
# Double txouts2 to txouts
while len(self.txouts2) > 0:
split_inputs(self.nodes[0], self.txouts2, self.txouts)
while len(self.nodes[0].getrawmempool()) > 0:
self.nodes[0].generate(1)
reps += 1
self.log.info("Finished splitting")
# Now we can connect the other nodes, didn't want to connect them earlier
# so the estimates would not be affected by the splitting transactions
self.start_node(1)
self.start_node(2)
self.connect_nodes(1, 0)
self.connect_nodes(0, 2)
self.connect_nodes(2, 1)
self.sync_all()
self.fees_per_kb = []
self.memutxo = []
self.confutxo = self.txouts # Start with the set of confirmed txouts after splitting
self.log.info("Will output estimates for 1/2/3/6/15/25 blocks")
for _ in range(2):
self.log.info("Creating transactions and mining them with a block size that can't keep up")
# Create transactions and mine 10 small blocks with node 2, but create txs faster than we can mine
self.transact_and_mine(10, self.nodes[2])
check_estimates(self.nodes[1], self.fees_per_kb)
self.log.info("Creating transactions and mining them at a block size that is just big enough")
# Generate transactions while mining 10 more blocks, this time with node1
# which mines blocks with capacity just above the rate that transactions are being created
self.transact_and_mine(10, self.nodes[1])
check_estimates(self.nodes[1], self.fees_per_kb)
# Finish by mining a normal-sized block:
while len(self.nodes[1].getrawmempool()) > 0:
self.nodes[1].generate(1)
self.sync_blocks(self.nodes[0:3], wait=.1)
self.log.info("Final estimates after emptying mempools")
check_estimates(self.nodes[1], self.fees_per_kb)
if __name__ == '__main__':
EstimateFeeTest().main()
|
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
from dataclasses import dataclass, field
from typing import List, Tuple
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq import utils
from fairseq.data.data_utils import compute_mask_indices
from fairseq.dataclass import ChoiceEnum, FairseqDataclass
from fairseq.distributed import fsdp_wrap
from fairseq.models import BaseFairseqModel, register_model
from fairseq.modules import (
Fp32GroupNorm,
Fp32LayerNorm,
GradMultiply,
GumbelVectorQuantizer,
LayerNorm,
MultiheadAttention,
RelPositionalEncoding,
SamePad,
TransposeLast,
)
from fairseq.modules.checkpoint_activations import checkpoint_wrapper
from fairseq.modules.conformer_layer import ConformerWav2Vec2EncoderLayer
from fairseq.modules.transformer_sentence_encoder import init_bert_params
from fairseq.utils import buffered_arange, index_put, is_xla_tensor
from .utils import pad_to_multiple
EXTRACTOR_MODE_CHOICES = ChoiceEnum(["default", "layer_norm"])
MASKING_DISTRIBUTION_CHOICES = ChoiceEnum(["static", "uniform", "normal", "poisson"])
LAYER_TYPE_CHOICES = ChoiceEnum(["transformer", "conformer"])
@dataclass
class Wav2Vec2Config(FairseqDataclass):
extractor_mode: EXTRACTOR_MODE_CHOICES = field(
default="default",
metadata={
"help": "mode for feature extractor. default has a single group norm with d "
"groups in the first conv block, whereas layer_norm has layer norms in "
"every block (meant to use with normalize=True)"
},
)
encoder_layers: int = field(
default=12, metadata={"help": "num encoder layers in the transformer"}
)
encoder_embed_dim: int = field(
default=768, metadata={"help": "encoder embedding dimension"}
)
encoder_ffn_embed_dim: int = field(
default=3072, metadata={"help": "encoder embedding dimension for FFN"}
)
encoder_attention_heads: int = field(
default=12, metadata={"help": "num encoder attention heads"}
)
activation_fn: ChoiceEnum(utils.get_available_activation_fns()) = field(
default="gelu", metadata={"help": "activation function to use"}
)
layer_type: LAYER_TYPE_CHOICES = field(
default="transformer", metadata={"help": "layer type in encoder"}
)
# dropouts
dropout: float = field(
default=0.1, metadata={"help": "dropout probability for the transformer"}
)
attention_dropout: float = field(
default=0.1, metadata={"help": "dropout probability for attention weights"}
)
activation_dropout: float = field(
default=0.0, metadata={"help": "dropout probability after activation in FFN"}
)
encoder_layerdrop: float = field(
default=0.0, metadata={"help": "probability of dropping a tarnsformer layer"}
)
dropout_input: float = field(
default=0.0,
metadata={"help": "dropout to apply to the input (after feat extr)"},
)
dropout_features: float = field(
default=0.0,
metadata={"help": "dropout to apply to the features (after feat extr)"},
)
final_dim: int = field(
default=0,
metadata={
"help": "project final representations and targets to this many dimensions."
"set to encoder_embed_dim is <= 0"
},
)
layer_norm_first: bool = field(
default=False, metadata={"help": "apply layernorm first in the transformer"}
)
conv_feature_layers: str = field(
default="[(512, 10, 5)] + [(512, 3, 2)] * 4 + [(512,2,2)] + [(512,2,2)]",
metadata={
"help": "string describing convolutional feature extraction layers in form of a python list that contains "
"[(dim, kernel_size, stride), ...]"
},
)
conv_bias: bool = field(
default=False, metadata={"help": "include bias in conv encoder"}
)
logit_temp: float = field(
default=0.1, metadata={"help": "temperature to divide logits by"}
)
quantize_targets: bool = field(
default=False, metadata={"help": "use quantized targets"}
)
quantize_input: bool = field(
default=False, metadata={"help": "use quantized inputs"}
)
same_quantizer: bool = field(
default=False, metadata={"help": "use same quantizer for inputs and targets"}
)
target_glu: bool = field(
default=False, metadata={"help": "adds projection + glu to targets"}
)
feature_grad_mult: float = field(
default=1.0, metadata={"help": "multiply feature extractor var grads by this"}
)
quantizer_depth: int = field(
default=1,
metadata={"help": "number of quantizer layers"},
)
quantizer_factor: int = field(
default=3,
metadata={
"help": "dimensionality increase for inner quantizer layers (if depth > 1)"
},
)
latent_vars: int = field(
default=320,
metadata={"help": "number of latent variables V in each group of the codebook"},
)
latent_groups: int = field(
default=2,
metadata={"help": "number of groups G of latent variables in the codebook"},
)
latent_dim: int = field(
default=0,
metadata={
"help": "if > 0, uses this dimensionality for latent variables. "
"otherwise uses final_dim / latent_groups"
},
)
# masking
mask_length: int = field(default=10, metadata={"help": "mask length"})
mask_prob: float = field(
default=0.65, metadata={"help": "probability of replacing a token with mask"}
)
mask_selection: MASKING_DISTRIBUTION_CHOICES = field(
default="static", metadata={"help": "how to choose mask length"}
)
mask_other: float = field(
default=0,
metadata={
"help": "secondary mask argument (used for more complex distributions), "
"see help in compute_mask_indices"
},
)
no_mask_overlap: bool = field(
default=False, metadata={"help": "whether to allow masks to overlap"}
)
mask_min_space: int = field(
default=1,
metadata={"help": "min space between spans (if no overlap is enabled)"},
)
require_same_masks: bool = field(
default=True,
metadata={
"help": "whether to number of masked timesteps must be the same across all "
"examples in a batch"
},
)
mask_dropout: float = field(
default=0.0,
metadata={"help": "percent of masks to unmask for each sample"},
)
# channel masking
mask_channel_length: int = field(
default=10, metadata={"help": "length of the mask for features (channels)"}
)
mask_channel_prob: float = field(
default=0.0, metadata={"help": "probability of replacing a feature with 0"}
)
mask_channel_before: bool = False
mask_channel_selection: MASKING_DISTRIBUTION_CHOICES = field(
default="static",
metadata={"help": "how to choose mask length for channel masking"},
)
mask_channel_other: float = field(
default=0,
metadata={
"help": "secondary mask argument (used for more complex distributions), "
"see help in compute_mask_indicesh"
},
)
no_mask_channel_overlap: bool = field(
default=False, metadata={"help": "whether to allow channel masks to overlap"}
)
mask_channel_min_space: int = field(
default=1,
metadata={"help": "min space between spans (if no overlap is enabled)"},
)
# negative selection
num_negatives: int = field(
default=100,
metadata={"help": "number of negative examples from the same sample"},
)
negatives_from_everywhere: bool = field(
default=False,
metadata={"help": "sample negatives from everywhere, not just masked states"},
)
cross_sample_negatives: int = field(
default=0, metadata={"help": "number of negative examples from the any sample"}
)
codebook_negatives: int = field(
default=0, metadata={"help": "number of negative examples codebook"}
)
# positional embeddings
conv_pos: int = field(
default=128,
metadata={"help": "number of filters for convolutional positional embeddings"},
)
conv_pos_groups: int = field(
default=16,
metadata={"help": "number of groups for convolutional positional embedding"},
)
pos_conv_depth: int = field(
default=1,
metadata={"help": "depth of positional encoder network"},
)
latent_temp: Tuple[float, float, float] = field(
default=(2, 0.5, 0.999995),
metadata={
"help": "temperature for latent variable sampling. "
"can be tuple of 3 values (start, end, decay)"
},
)
max_positions: int = field(default=100000, metadata={"help": "Max positions"})
checkpoint_activations: bool = field(
default=False,
metadata={"help": "recompute activations and save memory for extra compute"},
)
# FP16 optimization
required_seq_len_multiple: int = field(
default=2,
metadata={
"help": "pad the input to encoder such that the sequence length is divisible by multiple"
},
)
crop_seq_to_multiple: int = field(
default=1,
metadata={
"help": "crop convolutional feature extractor output such that the sequence length is divisible by multiple"
},
)
# Conformer
depthwise_conv_kernel_size: int = field(
default=31,
metadata={
"help": "depthwise-conv-kernel-size for convolution in conformer layer"
},
)
attn_type: str = field(
default="",
metadata={"help": "if espnet use ESPNET MHA"},
)
pos_enc_type: str = field(
default="abs",
metadata={"help": "Positional encoding type to use in conformer"},
)
fp16: bool = field(default=False, metadata={"help": "If fp16 is being used"})
@register_model("wav2vec2", dataclass=Wav2Vec2Config)
class Wav2Vec2Model(BaseFairseqModel):
def __init__(self, cfg: Wav2Vec2Config):
super().__init__()
self.cfg = cfg
feature_enc_layers = eval(cfg.conv_feature_layers)
self.embed = feature_enc_layers[-1][0]
self.feature_extractor = ConvFeatureExtractionModel(
conv_layers=feature_enc_layers,
dropout=0.0,
mode=cfg.extractor_mode,
conv_bias=cfg.conv_bias,
)
self.post_extract_proj = (
nn.Linear(self.embed, cfg.encoder_embed_dim)
if self.embed != cfg.encoder_embed_dim and not cfg.quantize_input
else None
)
self.crop_seq_to_multiple = cfg.crop_seq_to_multiple
self.mask_prob = cfg.mask_prob
self.mask_selection = cfg.mask_selection
self.mask_other = cfg.mask_other
self.mask_length = cfg.mask_length
self.no_mask_overlap = cfg.no_mask_overlap
self.mask_min_space = cfg.mask_min_space
self.mask_channel_prob = cfg.mask_channel_prob
self.mask_channel_before = cfg.mask_channel_before
self.mask_channel_selection = cfg.mask_channel_selection
self.mask_channel_other = cfg.mask_channel_other
self.mask_channel_length = cfg.mask_channel_length
self.no_mask_channel_overlap = cfg.no_mask_channel_overlap
self.mask_channel_min_space = cfg.mask_channel_min_space
self.dropout_input = nn.Dropout(cfg.dropout_input)
self.dropout_features = nn.Dropout(cfg.dropout_features)
self.feature_grad_mult = cfg.feature_grad_mult
self.quantizer = None
self.input_quantizer = None
self.n_negatives = cfg.num_negatives
self.cross_sample_negatives = cfg.cross_sample_negatives
self.codebook_negatives = cfg.codebook_negatives
self.negatives_from_everywhere = cfg.negatives_from_everywhere
self.logit_temp = cfg.logit_temp
final_dim = cfg.final_dim if cfg.final_dim > 0 else cfg.encoder_embed_dim
if cfg.quantize_targets:
vq_dim = cfg.latent_dim if cfg.latent_dim > 0 else final_dim
self.quantizer = GumbelVectorQuantizer(
dim=self.embed,
num_vars=cfg.latent_vars,
temp=cfg.latent_temp,
groups=cfg.latent_groups,
combine_groups=False,
vq_dim=vq_dim,
time_first=True,
weight_proj_depth=cfg.quantizer_depth,
weight_proj_factor=cfg.quantizer_factor,
)
self.project_q = nn.Linear(vq_dim, final_dim)
else:
self.project_q = nn.Linear(self.embed, final_dim)
if cfg.quantize_input:
if cfg.same_quantizer and self.quantizer is not None:
vq_dim = final_dim
self.input_quantizer = self.quantizer
else:
vq_dim = cfg.latent_dim if cfg.latent_dim > 0 else cfg.encoder_embed_dim
self.input_quantizer = GumbelVectorQuantizer(
dim=self.embed,
num_vars=cfg.latent_vars,
temp=cfg.latent_temp,
groups=cfg.latent_groups,
combine_groups=False,
vq_dim=vq_dim,
time_first=True,
weight_proj_depth=cfg.quantizer_depth,
weight_proj_factor=cfg.quantizer_factor,
)
self.project_inp = nn.Linear(vq_dim, cfg.encoder_embed_dim)
self.mask_emb = nn.Parameter(
torch.FloatTensor(cfg.encoder_embed_dim).uniform_()
)
encoder_cls = TransformerEncoder
if cfg.layer_type == "conformer" and cfg.pos_enc_type in ["rel_pos", "rope"]:
encoder_cls = ConformerEncoder
self.encoder = encoder_cls(cfg)
self.layer_norm = LayerNorm(self.embed)
self.target_glu = None
if cfg.target_glu:
self.target_glu = nn.Sequential(
nn.Linear(final_dim, final_dim * 2), nn.GLU()
)
self.final_proj = nn.Linear(cfg.encoder_embed_dim, final_dim)
def upgrade_state_dict_named(self, state_dict, name):
super().upgrade_state_dict_named(state_dict, name)
"""Upgrade a (possibly old) state dict for new versions of fairseq."""
return state_dict
@classmethod
def build_model(cls, cfg: Wav2Vec2Config, task=None):
"""Build a new model instance."""
return cls(cfg)
def apply_mask(
self,
x,
padding_mask,
mask_indices=None,
mask_channel_indices=None,
):
B, T, C = x.shape
if self.mask_channel_prob > 0 and self.mask_channel_before:
mask_channel_indices = compute_mask_indices(
(B, C),
None,
self.mask_channel_prob,
self.mask_channel_length,
self.mask_channel_selection,
self.mask_channel_other,
no_overlap=self.no_mask_channel_overlap,
min_space=self.mask_channel_min_space,
)
mask_channel_indices = (
torch.from_numpy(mask_channel_indices)
.to(x.device)
.unsqueeze(1)
.expand(-1, T, -1)
)
x[mask_channel_indices] = 0
if self.mask_prob > 0:
if mask_indices is None:
mask_indices = compute_mask_indices(
(B, T),
padding_mask,
self.mask_prob,
self.mask_length,
self.mask_selection,
self.mask_other,
min_masks=2,
no_overlap=self.no_mask_overlap,
min_space=self.mask_min_space,
require_same_masks=self.cfg.require_same_masks,
mask_dropout=self.cfg.mask_dropout,
)
mask_indices = torch.from_numpy(mask_indices).to(x.device)
x = index_put(x, mask_indices, self.mask_emb)
else:
mask_indices = None
if self.mask_channel_prob > 0 and not self.mask_channel_before:
if mask_channel_indices is None:
mask_channel_indices = compute_mask_indices(
(B, C),
None,
self.mask_channel_prob,
self.mask_channel_length,
self.mask_channel_selection,
self.mask_channel_other,
no_overlap=self.no_mask_channel_overlap,
min_space=self.mask_channel_min_space,
)
mask_channel_indices = (
torch.from_numpy(mask_channel_indices)
.to(x.device)
.unsqueeze(1)
.expand(-1, T, -1)
)
x = index_put(x, mask_channel_indices, 0)
return x, mask_indices
def sample_negatives(self, y, num, padding_count=None):
if self.n_negatives == 0 and self.cross_sample_negatives == 0:
return y.new(0)
bsz, tsz, fsz = y.shape
y = y.view(-1, fsz) # BTC => (BxT)C
# FIXME: what happens if padding_count is specified?
cross_high = tsz * bsz
high = tsz - (padding_count or 0)
with torch.no_grad():
assert high > 1, f"{bsz,tsz,fsz}"
if self.n_negatives > 0:
tszs = (
buffered_arange(num)
.unsqueeze(-1)
.expand(-1, self.n_negatives)
.flatten()
)
neg_idxs = torch.randint(
low=0, high=high - 1, size=(bsz, self.n_negatives * num)
)
neg_idxs[neg_idxs >= tszs] += 1
if self.cross_sample_negatives > 0:
tszs = (
buffered_arange(num)
.unsqueeze(-1)
.expand(-1, self.cross_sample_negatives)
.flatten()
)
cross_neg_idxs = torch.randint(
low=0,
high=cross_high - 1,
size=(bsz, self.cross_sample_negatives * num),
)
cross_neg_idxs[cross_neg_idxs >= tszs] += 1
if self.n_negatives > 0:
neg_idxs = neg_idxs + (torch.arange(bsz).unsqueeze(1) * high)
else:
neg_idxs = cross_neg_idxs
if self.cross_sample_negatives > 0 and self.n_negatives > 0:
neg_idxs = torch.cat([neg_idxs, cross_neg_idxs], dim=1)
negs = y[neg_idxs.view(-1)]
negs = negs.view(
bsz, num, self.n_negatives + self.cross_sample_negatives, fsz
).permute(
2, 0, 1, 3
) # to NxBxTxC
return negs, neg_idxs
def compute_preds(self, x, y, negatives):
neg_is_pos = (y == negatives).all(-1)
y = y.unsqueeze(0)
targets = torch.cat([y, negatives], dim=0)
logits = torch.cosine_similarity(x.float(), targets.float(), dim=-1)
logits = logits / self.logit_temp
logits = logits.type_as(x)
if is_xla_tensor(logits) or neg_is_pos.any():
if not hasattr(self, "_inftensor"):
fillval = -float(2**30)
self._inftensor = (
torch.tensor(fillval).to(x.device)
if is_xla_tensor(logits)
else float("-inf")
)
logits[1:] = index_put(logits[1:], neg_is_pos, self._inftensor)
return logits
def _get_feat_extract_output_lengths(self, input_lengths: torch.LongTensor):
"""
Computes the output length of the convolutional layers
"""
def _conv_out_length(input_length, kernel_size, stride):
return torch.floor((input_length - kernel_size) / stride + 1)
conv_cfg_list = eval(self.cfg.conv_feature_layers)
for i in range(len(conv_cfg_list)):
input_lengths = _conv_out_length(
input_lengths, conv_cfg_list[i][1], conv_cfg_list[i][2]
)
return input_lengths.to(torch.long)
def forward(
self,
source,
padding_mask=None,
mask=True,
features_only=False,
layer=None,
mask_indices=None,
mask_channel_indices=None,
padding_count=None,
):
if self.feature_grad_mult > 0:
features = self.feature_extractor(source)
if self.feature_grad_mult != 1.0:
features = GradMultiply.apply(features, self.feature_grad_mult)
else:
with torch.no_grad():
features = self.feature_extractor(source)
features_pen = features.float().pow(2).mean()
features = features.transpose(1, 2)
features = self.layer_norm(features)
unmasked_features = features.clone()
if padding_mask is not None and padding_mask.any():
input_lengths = (1 - padding_mask.long()).sum(-1)
# apply conv formula to get real output_lengths
output_lengths = self._get_feat_extract_output_lengths(input_lengths)
padding_mask = torch.zeros(
features.shape[:2], dtype=features.dtype, device=features.device
)
# these two operations makes sure that all values
# before the output lengths indices are attended to
padding_mask[
(
torch.arange(padding_mask.shape[0], device=padding_mask.device),
output_lengths - 1,
)
] = 1
padding_mask = (1 - padding_mask.flip([-1]).cumsum(-1).flip([-1])).bool()
else:
padding_mask = None
time_steps_to_drop = features.size(1) % self.crop_seq_to_multiple
if time_steps_to_drop != 0:
features = features[:, :-time_steps_to_drop]
unmasked_features = unmasked_features[:, :-time_steps_to_drop]
if padding_mask is not None:
padding_mask = padding_mask[:, :-time_steps_to_drop]
if self.post_extract_proj is not None:
features = self.post_extract_proj(features)
features = self.dropout_input(features)
unmasked_features = self.dropout_features(unmasked_features)
num_vars = None
code_ppl = None
prob_ppl = None
curr_temp = None
if self.input_quantizer:
q = self.input_quantizer(features, produce_targets=False)
features = q["x"]
num_vars = q["num_vars"]
code_ppl = q["code_perplexity"]
prob_ppl = q["prob_perplexity"]
curr_temp = q["temp"]
features = self.project_inp(features)
if mask:
x, mask_indices = self.apply_mask(
features,
padding_mask,
mask_indices=mask_indices,
mask_channel_indices=mask_channel_indices,
)
if not is_xla_tensor(x) and mask_indices is not None:
# tpu-comment: reducing the size in a dynamic way causes
# too many recompilations on xla.
y = unmasked_features[mask_indices].view(
unmasked_features.size(0), -1, unmasked_features.size(-1)
)
else:
y = unmasked_features
else:
x = features
y = unmasked_features
mask_indices = None
x, layer_results = self.encoder(x, padding_mask=padding_mask, layer=layer)
if features_only:
return {
"x": x,
"padding_mask": padding_mask,
"features": unmasked_features,
"layer_results": layer_results,
}
if self.quantizer:
if self.negatives_from_everywhere:
q = self.quantizer(unmasked_features, produce_targets=False)
y = q["x"]
num_vars = q["num_vars"]
code_ppl = q["code_perplexity"]
prob_ppl = q["prob_perplexity"]
curr_temp = q["temp"]
y = self.project_q(y)
negs, _ = self.sample_negatives(
y,
mask_indices[0].sum(),
padding_count=padding_count,
)
y = y[mask_indices].view(y.size(0), -1, y.size(-1))
else:
q = self.quantizer(y, produce_targets=False)
y = q["x"]
num_vars = q["num_vars"]
code_ppl = q["code_perplexity"]
prob_ppl = q["prob_perplexity"]
curr_temp = q["temp"]
y = self.project_q(y)
negs, _ = self.sample_negatives(
y,
y.size(1),
padding_count=padding_count,
)
if self.codebook_negatives > 0:
cb_negs = self.quantizer.sample_from_codebook(
y.size(0) * y.size(1), self.codebook_negatives
)
cb_negs = cb_negs.view(
self.codebook_negatives, y.size(0), y.size(1), -1
) # order doesnt matter
cb_negs = self.project_q(cb_negs)
negs = torch.cat([negs, cb_negs], dim=0)
else:
y = self.project_q(y)
if self.negatives_from_everywhere:
negs, _ = self.sample_negatives(
unmasked_features,
y.size(1),
padding_count=padding_count,
)
negs = self.project_q(negs)
else:
negs, _ = self.sample_negatives(
y,
y.size(1),
padding_count=padding_count,
)
if not is_xla_tensor(x):
# tpu-comment: reducing the size in a dynamic way causes
# too many recompilations on xla.
x = x[mask_indices].view(x.size(0), -1, x.size(-1))
if self.target_glu:
y = self.target_glu(y)
negs = self.target_glu(negs)
x = self.final_proj(x)
x = self.compute_preds(x, y, negs)
result = {
"x": x,
"padding_mask": padding_mask,
"features_pen": features_pen,
}
if prob_ppl is not None:
result["prob_perplexity"] = prob_ppl
result["code_perplexity"] = code_ppl
result["num_vars"] = num_vars
result["temp"] = curr_temp
return result
def quantize(self, x):
assert self.quantizer is not None
x = self.feature_extractor(x)
x = x.transpose(1, 2)
x = self.layer_norm(x)
return self.quantizer.forward_idx(x)
def extract_features(self, source, padding_mask, mask=False, layer=None):
res = self.forward(
source, padding_mask, mask=mask, features_only=True, layer=layer
)
return res
def get_logits(self, net_output):
logits = net_output["x"]
logits = logits.transpose(0, 2)
logits = logits.reshape(-1, logits.size(-1))
return logits
def get_targets(self, sample, net_output, expand_steps=True):
x = net_output["x"]
return x.new_zeros(x.size(1) * x.size(2), dtype=torch.long)
def get_extra_losses(self, net_output):
pen = []
if "prob_perplexity" in net_output:
pen.append(
(net_output["num_vars"] - net_output["prob_perplexity"])
/ net_output["num_vars"]
)
if "features_pen" in net_output:
pen.append(net_output["features_pen"])
return pen
def remove_pretraining_modules(self, last_layer=None):
self.quantizer = None
self.project_q = None
self.target_glu = None
self.final_proj = None
if last_layer is not None:
self.encoder.layers = nn.ModuleList(
l for i, l in enumerate(self.encoder.layers) if i <= last_layer
)
class ConvFeatureExtractionModel(nn.Module):
def __init__(
self,
conv_layers: List[Tuple[int, int, int]],
dropout: float = 0.0,
mode: str = "default",
conv_bias: bool = False,
):
super().__init__()
assert mode in {"default", "layer_norm"}
def block(
n_in,
n_out,
k,
stride,
is_layer_norm=False,
is_group_norm=False,
conv_bias=False,
):
def make_conv():
conv = nn.Conv1d(n_in, n_out, k, stride=stride, bias=conv_bias)
nn.init.kaiming_normal_(conv.weight)
return conv
assert (
is_layer_norm and is_group_norm
) == False, "layer norm and group norm are exclusive"
if is_layer_norm:
return nn.Sequential(
make_conv(),
nn.Dropout(p=dropout),
nn.Sequential(
TransposeLast(),
Fp32LayerNorm(dim, elementwise_affine=True),
TransposeLast(),
),
nn.GELU(),
)
elif is_group_norm:
return nn.Sequential(
make_conv(),
nn.Dropout(p=dropout),
Fp32GroupNorm(dim, dim, affine=True),
nn.GELU(),
)
else:
return nn.Sequential(make_conv(), nn.Dropout(p=dropout), nn.GELU())
in_d = 1
self.conv_layers = nn.ModuleList()
for i, cl in enumerate(conv_layers):
assert len(cl) == 3, "invalid conv definition: " + str(cl)
(dim, k, stride) = cl
self.conv_layers.append(
block(
in_d,
dim,
k,
stride,
is_layer_norm=mode == "layer_norm",
is_group_norm=mode == "default" and i == 0,
conv_bias=conv_bias,
)
)
in_d = dim
def forward(self, x):
# BxT -> BxCxT
x = x.unsqueeze(1)
for conv in self.conv_layers:
x = conv(x)
return x
def make_conv_pos(e, k, g):
pos_conv = nn.Conv1d(
e,
e,
kernel_size=k,
padding=k // 2,
groups=g,
)
dropout = 0
std = math.sqrt((4 * (1.0 - dropout)) / (k * e))
nn.init.normal_(pos_conv.weight, mean=0, std=std)
nn.init.constant_(pos_conv.bias, 0)
pos_conv = nn.utils.weight_norm(pos_conv, name="weight", dim=2)
pos_conv = nn.Sequential(pos_conv, SamePad(k), nn.GELU())
return pos_conv
class TransformerEncoder(nn.Module):
def build_encoder_layer(self, args: Wav2Vec2Config):
if args.layer_type == "transformer":
layer = TransformerSentenceEncoderLayer(
embedding_dim=self.embedding_dim,
ffn_embedding_dim=args.encoder_ffn_embed_dim,
num_attention_heads=args.encoder_attention_heads,
dropout=self.dropout,
attention_dropout=args.attention_dropout,
activation_dropout=args.activation_dropout,
activation_fn=args.activation_fn,
layer_norm_first=args.layer_norm_first,
)
elif args.layer_type == "conformer":
layer = ConformerWav2Vec2EncoderLayer(
embed_dim=self.embedding_dim,
ffn_embed_dim=args.encoder_ffn_embed_dim,
attention_heads=args.encoder_attention_heads,
dropout=args.dropout,
depthwise_conv_kernel_size=args.depthwise_conv_kernel_size,
activation_fn="swish",
attn_type=args.attn_type,
use_fp16=args.fp16,
pos_enc_type="abs",
)
layer = fsdp_wrap(layer)
if args.checkpoint_activations:
layer = checkpoint_wrapper(layer)
return layer
def __init__(self, args: Wav2Vec2Config):
super().__init__()
self.dropout = args.dropout
self.embedding_dim = args.encoder_embed_dim
self.required_seq_len_multiple = args.required_seq_len_multiple
pos_conv_depth = getattr(args, "pos_conv_depth", 1)
if pos_conv_depth > 1:
num_layers = args.pos_conv_depth
k = max(3, args.conv_pos // num_layers)
def make_conv_block(e, k, g, l):
return nn.Sequential(
*[
nn.Sequential(
nn.Conv1d(
e,
e,
kernel_size=k,
padding=k // 2,
groups=g,
),
SamePad(k),
TransposeLast(),
LayerNorm(e, elementwise_affine=False),
TransposeLast(),
nn.GELU(),
)
for _ in range(l)
]
)
self.pos_conv = make_conv_block(
self.embedding_dim, k, args.conv_pos_groups, num_layers
)
else:
self.pos_conv = make_conv_pos(
self.embedding_dim,
args.conv_pos,
args.conv_pos_groups,
)
self.layers = nn.ModuleList(
[self.build_encoder_layer(args) for _ in range(args.encoder_layers)]
)
self.layer_norm_first = args.layer_norm_first
self.layer_norm = LayerNorm(self.embedding_dim)
self.layerdrop = args.encoder_layerdrop
self.apply(init_bert_params)
def forward(self, x, padding_mask=None, layer=None):
x, layer_results = self.extract_features(x, padding_mask, layer)
if self.layer_norm_first and layer is None:
x = self.layer_norm(x)
return x, layer_results
def extract_features(
self,
x,
padding_mask=None,
tgt_layer=None,
min_layer=0,
):
if padding_mask is not None:
x = index_put(x, padding_mask, 0)
x_conv = self.pos_conv(x.transpose(1, 2))
x_conv = x_conv.transpose(1, 2)
x = x + x_conv
if not self.layer_norm_first:
x = self.layer_norm(x)
# pad to the sequence length dimension
x, pad_length = pad_to_multiple(
x, self.required_seq_len_multiple, dim=-2, value=0
)
if pad_length > 0 and padding_mask is None:
padding_mask = x.new_zeros((x.size(0), x.size(1)), dtype=torch.bool)
padding_mask[:, -pad_length:] = True
else:
padding_mask, _ = pad_to_multiple(
padding_mask, self.required_seq_len_multiple, dim=-1, value=True
)
x = F.dropout(x, p=self.dropout, training=self.training)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
layer_results = []
r = None
for i, layer in enumerate(self.layers):
dropout_probability = np.random.random() if self.layerdrop > 0 else 1
if not self.training or (dropout_probability > self.layerdrop):
x, (z, lr) = layer(
x, self_attn_padding_mask=padding_mask, need_weights=False
)
if i >= min_layer:
layer_results.append((x, z, lr))
if i == tgt_layer:
r = x
break
if r is not None:
x = r
# T x B x C -> B x T x C
x = x.transpose(0, 1)
# undo paddding
if pad_length > 0:
x = x[:, :-pad_length]
def undo_pad(a, b, c):
return (
a[:-pad_length],
b[:-pad_length] if b is not None else b,
c[:-pad_length],
)
layer_results = [undo_pad(*u) for u in layer_results]
return x, layer_results
def max_positions(self):
"""Maximum output length supported by the encoder."""
return self.args.max_positions
def upgrade_state_dict_named(self, state_dict, name):
"""Upgrade a (possibly old) state dict for new versions of fairseq."""
return state_dict
class ConformerEncoder(TransformerEncoder):
def build_encoder_layer(self, args):
layer = ConformerWav2Vec2EncoderLayer(
embed_dim=self.embedding_dim,
ffn_embed_dim=args.encoder_ffn_embed_dim,
attention_heads=args.encoder_attention_heads,
dropout=args.dropout,
depthwise_conv_kernel_size=args.depthwise_conv_kernel_size,
activation_fn="swish",
attn_type=args.attn_type,
pos_enc_type=args.pos_enc_type,
use_fp16=args.fp16, # only used for rope
)
layer = fsdp_wrap(layer)
if args.checkpoint_activations:
layer = checkpoint_wrapper(layer)
return layer
def __init__(self, args):
super().__init__(args)
self.args = args
self.dropout = args.dropout
self.embedding_dim = args.encoder_embed_dim
self.pos_enc_type = args.pos_enc_type
max_source_positions = self.max_positions()
if self.pos_enc_type == "rel_pos":
self.embed_positions = RelPositionalEncoding(
max_source_positions, self.embedding_dim
)
elif self.pos_enc_type == "rope":
self.embed_positions = None
else:
raise Exception("Unsupported positional encoding type")
self.layers = nn.ModuleList(
[self.build_encoder_layer(args) for _ in range(args.encoder_layers)]
)
self.layer_norm_first = args.layer_norm_first
self.layer_norm = LayerNorm(self.embedding_dim)
self.layerdrop = args.encoder_layerdrop
self.apply(init_bert_params)
def extract_features(self, x, padding_mask=None, tgt_layer=None):
if padding_mask is not None:
x = index_put(x, padding_mask, 0)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
# B X T X C here
position_emb = None
if self.pos_enc_type == "rel_pos":
position_emb = self.embed_positions(x)
if not self.layer_norm_first:
x = self.layer_norm(x)
x = F.dropout(x, p=self.dropout, training=self.training)
layer_results = []
r = None
for i, layer in enumerate(self.layers):
dropout_probability = np.random.random()
if not self.training or (dropout_probability > self.layerdrop):
x, z = layer(
x,
self_attn_padding_mask=padding_mask,
need_weights=False,
position_emb=position_emb,
)
if tgt_layer is not None:
layer_results.append((x, z))
if i == tgt_layer:
r = x
break
if r is not None:
x = r
# T x B x C -> B x T x C
x = x.transpose(0, 1)
return x, layer_results
class TransformerSentenceEncoderLayer(nn.Module):
"""
Implements a Transformer Encoder Layer used in BERT/XLM style pre-trained
models.
"""
def __init__(
self,
embedding_dim: float = 768,
ffn_embedding_dim: float = 3072,
num_attention_heads: int = 8,
dropout: float = 0.1,
attention_dropout: float = 0.1,
activation_dropout: float = 0.1,
activation_fn: str = "relu",
layer_norm_first: bool = False,
) -> None:
super().__init__()
# Initialize parameters
self.embedding_dim = embedding_dim
self.dropout = dropout
self.activation_dropout = activation_dropout
# Initialize blocks
self.activation_fn = utils.get_activation_fn(activation_fn)
self.self_attn = MultiheadAttention(
self.embedding_dim,
num_attention_heads,
dropout=attention_dropout,
self_attention=True,
)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(self.activation_dropout)
self.dropout3 = nn.Dropout(dropout)
self.layer_norm_first = layer_norm_first
# layer norm associated with the self attention layer
self.self_attn_layer_norm = LayerNorm(self.embedding_dim)
self.fc1 = nn.Linear(self.embedding_dim, ffn_embedding_dim)
self.fc2 = nn.Linear(ffn_embedding_dim, self.embedding_dim)
# layer norm associated with the position wise feed-forward NN
self.final_layer_norm = LayerNorm(self.embedding_dim)
def forward(
self,
x: torch.Tensor,
self_attn_mask: torch.Tensor = None,
self_attn_padding_mask: torch.Tensor = None,
need_weights: bool = False,
att_args=None,
):
"""
LayerNorm is applied either before or after the self-attention/ffn
modules similar to the original Transformer imlementation.
"""
residual = x
if self.layer_norm_first:
x = self.self_attn_layer_norm(x)
x, attn = self.self_attn(
query=x,
key=x,
value=x,
key_padding_mask=self_attn_padding_mask,
attn_mask=self_attn_mask,
need_weights=False,
)
x = self.dropout1(x)
x = residual + x
residual = x
x = self.final_layer_norm(x)
x = self.activation_fn(self.fc1(x))
x = self.dropout2(x)
x = self.fc2(x)
layer_result = x
x = self.dropout3(x)
x = residual + x
else:
x, attn = self.self_attn(
query=x,
key=x,
value=x,
key_padding_mask=self_attn_padding_mask,
need_weights=False,
)
x = self.dropout1(x)
x = residual + x
x = self.self_attn_layer_norm(x)
residual = x
x = self.activation_fn(self.fc1(x))
x = self.dropout2(x)
x = self.fc2(x)
layer_result = x
x = self.dropout3(x)
x = residual + x
x = self.final_layer_norm(x)
return x, (attn, layer_result)
|
|
"""Plugin for plex media server (www.plexapp.com)."""
from __future__ import unicode_literals, division, absolute_import
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
import re
import logging
import os
from datetime import datetime
from os.path import basename
from socket import gethostbyname
from xml.dom.minidom import parseString
from flexget import plugin
from flexget.entry import Entry
from flexget.event import event
from flexget.utils import requests
log = logging.getLogger('plex')
class InputPlex(object):
"""
Uses a plex media server (www.plexapp.com) tv section as an input.
'section' Required parameter, numerical (/library/sections/<num>) or section name.
'selection' Can be set to different keys:
- all : Default
- unwatched :
- recentlyAdded :
- recentlyViewed :
- recentlyViewedShows : Series only.
'all' and 'recentlyViewedShows' will only produce a list of show names while the other three will produce
filename and download url.
'token' Plex access token, used to connect to PMS
'username' Myplex (http://my.plexapp.com) username, used to connect to shared PMS'.
'password' Myplex (http://my.plexapp.com) password, used to connect to shared PMS'.
'server' Host/IP of PMS to connect to.
'lowercase_title' Convert filename (title) to lower case.
'strip_non_alpha' Sanitize filename (title), stripping all non-alphanumeric letters.
Better to turn off in case of non-english titles.
'strip_year' Remove year from title, ex: Show Name (2012) 01x01 => Show Name 01x01.
Movies will have year added to their filename unless this is set.
'strip_parens' Remove information in parens from title, ex: Show Name (UK)(2012) 01x01 => Show Name 01x01.
'original_filename' Use filename stored in PMS instead of transformed name. lowercase_title and strip_year
will be ignored.
'unwatched_only' Request only unwatched media from PMS.
'fetch' What to download, can be set to the following values:
- file The file itself, default.
- art Series or movie art as configured in PMS
- cover Series cover for series, movie cover for movies.
- thumb Episode thumbnail, series only.
- season_cover Season cover, series only. If used in movies, movie cover will be set.
Default parameters:
server : localhost
port : 32400
selection : all
lowercase_title : no
strip_non_alpha : yes
strip_year : yes
strip_parens : no
original_filename: no
unwatched_only : no
fetch : file
Example:
plex:
server: 192.168.1.23
section: 3
selection: recentlyAdded
fetch: series_art
"""
schema = {
'type': 'object',
'properties': {
'server': {'type': 'string', 'default': '127.0.0.1'},
'port': {'type': 'integer', 'default': 32400},
'username': {'type': 'string'},
'password': {'type': 'string'},
'token': {'type': 'string'},
'section': {'type': ['string', 'integer']},
'selection': {'type': 'string', 'default': 'all'},
'lowercase_title': {'type': 'boolean', 'default': False},
'strip_non_alpha': {'type': 'boolean', 'default': True},
'strip_year': {'type': 'boolean', 'default': True},
'strip_parens': {'type': 'boolean', 'default': False},
'original_filename': {'type': 'boolean', 'default': False},
'unwatched_only': {'type': 'boolean', 'default': False},
'fetch': {
'type': 'string',
'default': 'file',
'enum': ['file', 'art', 'cover', 'thumb', 'season_cover'],
},
},
'required': ['section'],
'not': {
'anyOf': [{'required': ['token', 'username']}, {'required': ['token', 'password']}]
},
'error_not': 'Cannot specify `username` and `password` with `token`',
'dependencies': {'username': ['password'], 'password': ['username']},
'additionalProperties': False,
}
def prepare_config(self, config):
config['plexserver'] = config['server']
config = self.plex_format_server(config)
return config
def plex_get_globalaccesstoken(self, config):
header = {'X-Plex-Client-Identifier': 'flexget'}
try:
r = requests.post(
'https://my.plexapp.com/users/sign_in.xml',
auth=(config['username'], config['password']),
headers=header,
)
except requests.RequestException as error:
raise plugin.PluginError('Could not log in to myplex! Error: %s' % error)
if 'Invalid email' in r.text:
raise plugin.PluginError('Myplex: invalid username and/or password!')
dom = parseString(r.text)
globalaccesstoken = dom.getElementsByTagName('authentication-token')[
0
].firstChild.nodeValue
if not globalaccesstoken:
raise plugin.PluginError('Myplex: could not find a server!')
else:
log.debug('Myplex: Got global accesstoken: %s', globalaccesstoken)
return globalaccesstoken
def plex_get_accesstoken(self, config, globalaccesstoken=""):
accesstoken = None
if not globalaccesstoken:
globalaccesstoken = self.plex_get_globalaccesstoken(config)
if config['server'] in ('localhost', '127.0.0.1'):
log.debug('Server using localhost. Global Token will be used')
return globalaccesstoken
try:
r = requests.get(
"https://my.plexapp.com/pms/servers?X-Plex-Token=%s" % globalaccesstoken
)
except requests.RequestException as e:
raise plugin.PluginError(
"Could not get servers from my.plexapp.com using "
"authentication-token: %s. (%s)" % (globalaccesstoken, e)
)
dom = parseString(r.text)
for node in dom.getElementsByTagName('Server'):
if config['server'] in (
node.getAttribute('address'),
node.getAttribute('localAddresses'),
):
accesstoken = node.getAttribute('accessToken')
log.debug("Got plextoken: %s", accesstoken)
if not accesstoken:
raise plugin.PluginError('Could not retrieve accesstoken for %s.' % config['server'])
else:
return accesstoken
def plex_format_server(self, config):
if gethostbyname(config['server']) != config['server']:
config['server'] = gethostbyname(config['server'])
return config
def plex_section_is_int(self, section):
return isinstance(section, int)
def on_task_input(self, task, config):
config = self.prepare_config(config)
urlconfig = {}
urlappend = "?"
entries = []
if (
config['unwatched_only']
and config['section'] != 'recentlyViewedShows'
and config['section'] != 'all'
):
urlconfig['unwatched'] = '1'
if config.get('token'):
accesstoken = config['token']
log.debug("Using accesstoken: %s", accesstoken)
urlconfig['X-Plex-Token'] = accesstoken
elif config.get('username'):
accesstoken = self.plex_get_accesstoken(config)
log.debug("Got accesstoken: %s", accesstoken)
urlconfig['X-Plex-Token'] = accesstoken
for key in urlconfig:
urlappend += '%s=%s&' % (key, urlconfig[key])
if not self.plex_section_is_int(config['section']):
try:
path = "/library/sections/"
r = requests.get(
"http://%s:%d%s%s" % (config['plexserver'], config['port'], path, urlappend)
)
except requests.RequestException as e:
raise plugin.PluginError('Error retrieving source: %s' % e)
dom = parseString(r.text.encode("utf-8"))
for node in dom.getElementsByTagName('Directory'):
if node.getAttribute('title') == config['section']:
config['section'] = int(node.getAttribute('key'))
if not self.plex_section_is_int(config['section']):
raise plugin.PluginError('Could not find section \'%s\'' % config['section'])
log.debug(
"Fetching http://%s:%d/library/sections/%s/%s%s",
config['server'],
config['port'],
config['section'],
config['selection'],
urlappend,
)
try:
path = "/library/sections/%s/%s" % (config['section'], config['selection'])
r = requests.get(
"http://%s:%d%s%s" % (config['plexserver'], config['port'], path, urlappend)
)
except requests.RequestException as e:
raise plugin.PluginError(
'There is no section with number %d. (%s)' % (config['section'], e)
)
dom = parseString(r.text.encode("utf-8"))
plexsectionname = dom.getElementsByTagName('MediaContainer')[0].getAttribute('title1')
viewgroup = dom.getElementsByTagName('MediaContainer')[0].getAttribute('viewGroup')
log.debug("Plex section \"%s\" is a \"%s\" section", plexsectionname, viewgroup)
if viewgroup != "movie" and viewgroup != "show" and viewgroup != "episode":
raise plugin.PluginError("Section is neither a movie nor tv show section!")
domroot = "Directory"
titletag = "title"
if viewgroup == "episode":
domroot = "Video"
titletag = "grandparentTitle"
thumbtag = "thumb"
arttag = "art"
seasoncovertag = "parentThumb"
covertag = "grandparentThumb"
elif viewgroup == "movie":
domroot = "Video"
titletag = "title"
arttag = "art"
seasoncovertag = "thumb"
covertag = "thumb"
if config['fetch'] == "thumb":
raise plugin.PluginError(
"Movie sections does not have any thumbnails to download!"
)
for node in dom.getElementsByTagName(domroot):
e = Entry()
e['plex_server'] = config['plexserver']
e['plex_port'] = config['port']
e['plex_section'] = config['section']
e['plex_section_name'] = plexsectionname
e['plex_episode_thumb'] = ''
title = node.getAttribute(titletag)
if config['strip_year']:
title = re.sub(r'^(.*)\(\d{4}\)(.*)', r'\1\2', title)
if config['strip_parens']:
title = re.sub(r'\(.*?\)', r'', title)
title = title.strip()
if config['strip_non_alpha']:
title = re.sub(r'[\(\)]', r'', title)
title = re.sub(r'&', r'And', title)
title = re.sub(r'[^A-Za-z0-9- \']', r'', title)
if config['lowercase_title']:
title = title.lower()
if viewgroup == "show":
e['title'] = title
e['url'] = 'NULL'
entries.append(e)
# show ends here.
continue
e['plex_art'] = "http://%s:%d%s%s" % (
config['server'],
config['port'],
node.getAttribute(arttag),
urlappend,
)
e['plex_cover'] = "http://%s:%d%s%s" % (
config['server'],
config['port'],
node.getAttribute(covertag),
urlappend,
)
e['plex_season_cover'] = "http://%s:%d%s%s" % (
config['server'],
config['port'],
node.getAttribute(seasoncovertag),
urlappend,
)
if viewgroup == "episode":
e['plex_thumb'] = "http://%s:%d%s%s" % (
config['server'],
config['port'],
node.getAttribute('thumb'),
urlappend,
)
e['series_name'] = title
e['plex_ep_name'] = node.getAttribute('title')
season = int(node.getAttribute('parentIndex'))
if node.getAttribute('parentIndex') == node.getAttribute('year'):
season = node.getAttribute('originallyAvailableAt')
filenamemap = "%s_%s%s_%s_%s_%s.%s"
episode = ""
e['series_id_type'] = 'date'
e['series_date'] = season
elif node.getAttribute('index'):
episode = int(node.getAttribute('index'))
filenamemap = "%s_%02dx%02d_%s_%s_%s.%s"
e['series_season'] = season
e['series_episode'] = episode
e['series_id_type'] = 'ep'
e['series_id'] = 'S%02dE%02d' % (season, episode)
else:
log.debug(
"Could not get episode number for '%s' (Hint, ratingKey: %s)",
title,
node.getAttribute('ratingKey'),
)
break
elif viewgroup == "movie":
filenamemap = "%s_%s_%s_%s.%s"
e['plex_year'] = node.getAttribute('year')
e['plex_added'] = datetime.fromtimestamp(int(node.getAttribute('addedAt')))
e['plex_duration'] = node.getAttribute('duration')
e['plex_summary'] = node.getAttribute('summary')
e['plex_userrating'] = node.getAttribute('userrating')
e['plex_key'] = node.getAttribute('ratingKey')
count = node.getAttribute('viewCount')
offset = node.getAttribute('viewOffset')
if count:
e['plex_status'] = "seen"
elif offset:
e['plex_status'] = "inprogress"
else:
e['plex_status'] = "unwatched"
for media in node.getElementsByTagName('Media'):
entry = Entry(e)
vcodec = media.getAttribute('videoCodec')
acodec = media.getAttribute('audioCodec')
if media.hasAttribute('title'):
entry['plex_media_title'] = media.getAttribute('title')
if media.hasAttribute('optimizedForStreaming'):
entry['plex_stream_optimized'] = media.getAttribute('optimizedForStreaming')
if config['fetch'] == "file" or not config['fetch']:
container = media.getAttribute('container')
else:
container = "jpg"
resolution = media.getAttribute('videoResolution') + "p"
for part in media.getElementsByTagName('Part'):
if config['fetch'] == "file" or not config['fetch']:
key = part.getAttribute('key')
elif config['fetch'] == "art":
key = node.getAttribute(arttag)
elif config['fetch'] == "cover":
key = node.getAttribute(arttag)
elif config['fetch'] == "season_cover":
key = node.getAttribute(seasoncovertag)
elif config['fetch'] == "thumb":
key = node.getAttribute(thumbtag)
# key = part.getAttribute('key')
duration = part.getAttribute('duration')
entry['plex_title'] = title
entry['title'] = title
if config['original_filename']:
filename, fileext = os.path.splitext(basename(part.getAttribute('file')))
if config['fetch'] != 'file':
filename += ".jpg"
else:
filename = "%s%s" % (filename, fileext)
else:
if viewgroup == "episode":
filename = filenamemap % (
title.replace(" ", "."),
season,
episode,
resolution,
vcodec,
acodec,
container,
)
entry['title'] = filename
elif viewgroup == "movie":
filename = filenamemap % (
title.replace(" ", "."),
resolution,
vcodec,
acodec,
container,
)
entry['title'] = filename
entry['plex_url'] = "http://%s:%d%s%s" % (
config['server'],
config['port'],
key,
urlappend,
)
entry['plex_path'] = key
entry['url'] = "http://%s:%d%s%s" % (
config['server'],
config['port'],
key,
urlappend,
)
entry['plex_duration'] = duration
entry['filename'] = filename
if key == "":
log.debug("Could not find anything in PMS to download. Next!")
else:
entries.append(entry)
return entries
@event('plugin.register')
def register_plugin():
plugin.register(InputPlex, 'plex', api_ver=2)
|
|
# -*- coding: utf-8 -*-
#
# HMM Aligner
# Simon Fraser University
# NLP Lab
#
# This is the main programme of the HMM aligner
#
import sys
import os
import importlib
import argparse
import StringIO
import multiprocessing
from ConfigParser import SafeConfigParser
from loggers import logging, init_logger
from models.modelChecker import checkAlignmentModel
from fileIO import loadDataset, exportToFile, loadAlignment
__version__ = "0.6a"
if __name__ == '__main__':
# Default values:
config = {
'dataDir': '',
'sourceLanguage': '',
'targetLanguage': '',
'trainData': '',
'trainDataTag': '',
'trainAlignment': '',
'testData': '',
'testDataTag': '',
'reference': '',
'trainSize': sys.maxint,
'testSize': sys.maxint,
'iterations': 5,
'model': "IBM1",
'output': 'o.wa',
'showFigure': 0,
'intersect': False,
'loadModel': "",
'saveModel': "",
'forceLoad': False
}
configFileDataSection = {
'DataDirectory': 'dataDir',
'TargetLanguageSuffix': 'targetLanguage',
'SourceLanguageSuffix': 'sourceLanguage',
}
configFileTrainSection = {
'TextFilePrefix': 'trainData',
'TagFilePrefix': 'trainDataTag',
'AlignmentFileSuffix': 'trainAlignment'
}
configFileTestSection = {
'TextFilePrefix': 'testData',
'TagFilePrefix': 'testDataTag',
'Reference': 'reference'
}
# Initialise logger
init_logger('aligner.log')
__logger = logging.getLogger('MAIN')
# Dealing with arguments here
if True: # Adding arguments
# Parsing the options
ap = argparse.ArgumentParser(
description="""SFU HMM Aligner %s""" % __version__)
ap.add_argument(
"-d", "--datadir", dest="dataDir",
help="data directory")
ap.add_argument(
"--train", dest="trainData",
help="prefix of training data file")
ap.add_argument(
"--test", dest="testData",
help="prefix of testing data file")
ap.add_argument(
"--train-tag", dest="trainDataTag",
help="prefix of training tag file")
ap.add_argument(
"--test-tag", dest="testDataTag",
help="prefix of testing tag file")
ap.add_argument(
"--source", dest="sourceLanguage",
help="suffix of source language")
ap.add_argument(
"--target", dest="targetLanguage",
help="suffix of target language")
ap.add_argument(
"-a", "--alignment", dest="trainAlignment",
help="suffix of alignment file")
ap.add_argument(
"-n", "--trainSize", dest="trainSize", type=int,
help="Number of sentences to use for training")
ap.add_argument(
"-v", "--testSize", dest="testSize", type=int,
help="Number of sentences to use for testing")
ap.add_argument(
"-i", "--iterations", dest="iterations", type=int,
help="Number of iterations to train")
ap.add_argument(
"-m", "--model", dest="model",
help="model to use, default is IBM1Old")
ap.add_argument(
"-r", "--reference", dest="reference",
help="Location of reference file")
ap.add_argument(
"-o", "--outputToFile", dest="output",
help="Path to output file")
ap.add_argument(
"-c", "--config", dest="config",
help="Path to config file")
ap.add_argument(
"-s", "--saveModel", dest="saveModel",
help="Where to save the model")
ap.add_argument(
"-l", "--loadModel", dest="loadModel",
help="Specify the model file to load")
ap.add_argument(
"--forceLoad", dest="forceLoad", action='store_true',
help="Ignore version and force loading model file")
ap.add_argument(
"--showFigure", dest="showFigure", type=int,
help="Show figures for the first specified number of decodings")
ap.add_argument(
"--intersect", dest="intersect", action='store_true',
help="Do intersection training.")
args = ap.parse_args()
# Process config file
if args.config:
# Check local config path
if not os.path.isfile(args.config):
__logger.error("The config file doesn't exist: %s\n" % args.config)
sys.exit(1)
# Initialise the config parser
__logger.info("Reading configurations from file: %s" % (args.config))
cp = SafeConfigParser(os.environ)
cp.read(args.config)
# Process the contents of config file
for key in configFileDataSection:
if cp.get('General', key) != '':
config[configFileDataSection[key]] = cp.get('General', key)
for key in configFileTrainSection:
if cp.get('TrainData', key) != '':
config[configFileTrainSection[key]] = cp.get('TrainData', key)
for key in configFileTestSection:
if cp.get('TestData', key) != '':
config[configFileTestSection[key]] = cp.get('TestData', key)
# Reset default values to config file
ap.set_defaults(**config)
args = ap.parse_args()
config.update(vars(args))
# Load model
__logger.info("Loading model: " + config['model'])
Model = importlib.import_module("models." + config['model']).AlignmentModel
if not checkAlignmentModel(Model):
raise TypeError("Invalid Model class")
aligner = Model()
if "version" in vars(aligner):
__logger.info("Model version: " + str(aligner.version))
if config['intersect'] is True:
alignerReverse = Model()
# Load datasets
if config['trainData'] != "":
trainSourceFiles = [os.path.expanduser(
"%s.%s" % (os.path.join(config['dataDir'], config['trainData']),
config['sourceLanguage']))]
trainTargetFiles = [os.path.expanduser(
"%s.%s" % (os.path.join(config['dataDir'], config['trainData']),
config['targetLanguage']))]
if config['trainDataTag'] != '':
trainSourceFiles.append(os.path.expanduser("%s.%s" % (
os.path.join(config['dataDir'], config['trainDataTag']),
config['sourceLanguage'])))
trainTargetFiles.append(os.path.expanduser("%s.%s" % (
os.path.join(config['dataDir'], config['trainDataTag']),
config['targetLanguage'])))
if config['trainAlignment'] != '':
trainAlignment = os.path.expanduser("%s.%s" % (
os.path.join(config['dataDir'], config['trainData']),
config['trainAlignment']))
else:
trainAlignment = ''
__logger.info("Loading dataset")
trainDataset = loadDataset(trainSourceFiles,
trainTargetFiles,
trainAlignment,
linesToLoad=config['trainSize'])
if config['intersect'] is True:
__logger.info("Loading reversed dataset")
trainDataset2 = loadDataset(trainTargetFiles,
trainSourceFiles,
trainAlignment,
reverse=True,
linesToLoad=config['trainSize'])
else:
trainDataset2 = None
else:
trainDataset = trainDataset2 = None
if config['testData'] != "":
testSourceFiles = [os.path.expanduser(
"%s.%s" % (os.path.join(config['dataDir'], config['testData']),
config['sourceLanguage']))]
testTargetFiles = [os.path.expanduser(
"%s.%s" % (os.path.join(config['dataDir'], config['testData']),
config['targetLanguage']))]
if config['testDataTag'] != '':
testSourceFiles.append(os.path.expanduser("%s.%s" % (
os.path.join(config['dataDir'], config['testDataTag']),
config['sourceLanguage'])))
testTargetFiles.append(os.path.expanduser("%s.%s" % (
os.path.join(config['dataDir'], config['testDataTag']),
config['targetLanguage'])))
testDataset = loadDataset(testSourceFiles, testTargetFiles,
linesToLoad=config['testSize'])
if config['intersect'] is True:
testDataset2 = loadDataset(testTargetFiles, testSourceFiles,
linesToLoad=config['testSize'])
else:
testDataset2 = None
else:
testDataset = testDataset2 = None
def work(arguments):
trainDataset, testDataset, reversed = arguments
aligner = Model()
if config['loadModel'] != "":
loadFile = config['loadModel']
if reversed:
loadFile = ".".join(loadFile.split(".")[:-1] + ["rev"] +
[loadFile.split(".")[-1]])
aligner.loadModel(loadFile, force=config['forceLoad'])
if trainDataset is not None:
aligner.train(trainDataset, config['iterations'])
if config['saveModel'] != "":
saveFile = config['saveModel']
if reversed:
if saveFile.endswith("pklz") or saveFile.endswith("pkl"):
saveFile = ".".join(saveFile.split(".")[:-1] + ["rev"] +
[saveFile.split(".")[-1]])
else:
saveFile += ".rev"
aligner.saveModel(saveFile)
if testDataset is not None:
alignResult = aligner.decode(testDataset, config['showFigure'])
return (reversed, alignResult)
return (None, None)
arg = [(trainDataset, testDataset, False), ]
if config['intersect'] is True:
arg.append((trainDataset2, testDataset2, True))
if len(arg) == 1:
result = [work(arg[0])]
else:
result = multiprocessing.Pool(2).map(work, arg)
if config['testData'] != "":
for (resultReversed, resultAlignment) in result:
if resultReversed:
alignResultRev = resultAlignment
else:
alignResult = resultAlignment
if config['intersect'] is True:
# Intersection is performed here.
result = []
for align, alignRev in zip(alignResult, alignResultRev):
sentenceAlignment = []
for item in align:
if len(item) == 2:
# Without alignment type
if (item[1], item[0]) in alignRev:
sentenceAlignment.append(item)
else:
# With alignment type
if (item[1], item[0], item[2]) in alignRev:
sentenceAlignment.append(item)
result.append(sentenceAlignment)
alignResult = result
if config['output'] != "":
exportToFile(alignResult, config['output'])
if config['reference'] != "":
reference = loadAlignment(config['reference'])
if aligner.evaluate:
aligner.evaluate(alignResult, reference, config['showFigure'])
if config['showFigure'] > 0:
from models.plot import showPlot
showPlot()
|
|
import os
import time
import socket
import struct
from traceback import format_exc, format_stack
from scapy.utils import wrpcap, rdpcap, PcapReader
from scapy.plist import PacketList
from vpp_interface import VppInterface
from scapy.layers.l2 import Ether, ARP
from scapy.layers.inet6 import IPv6, ICMPv6ND_NS, ICMPv6ND_NA,\
ICMPv6NDOptSrcLLAddr, ICMPv6NDOptDstLLAddr, ICMPv6ND_RA, RouterAlert, \
IPv6ExtHdrHopByHop
from util import ppp, ppc
from scapy.utils6 import in6_getnsma, in6_getnsmac, in6_ismaddr
from scapy.utils import inet_pton, inet_ntop
import picklable_packet
class CaptureTimeoutError(Exception):
""" Exception raised if capture or packet doesn't appear within timeout """
pass
def is_ipv6_misc(p):
""" Is packet one of uninteresting IPv6 broadcasts? """
if p.haslayer(ICMPv6ND_RA):
if in6_ismaddr(p[IPv6].dst):
return True
if p.haslayer(IPv6ExtHdrHopByHop):
for o in p[IPv6ExtHdrHopByHop].options:
if isinstance(o, RouterAlert):
return True
return False
class VppPGInterface(VppInterface):
"""
VPP packet-generator interface
"""
@property
def pg_index(self):
"""packet-generator interface index assigned by VPP"""
return self._pg_index
@property
def out_path(self):
"""pcap file path - captured packets"""
return self._out_path
@property
def in_path(self):
""" pcap file path - injected packets"""
return self._in_path
@property
def capture_cli(self):
"""CLI string to start capture on this interface"""
return self._capture_cli
@property
def cap_name(self):
"""capture name for this interface"""
return self._cap_name
@property
def input_cli(self):
"""CLI string to load the injected packets"""
return self._input_cli
@property
def in_history_counter(self):
"""Self-incrementing counter used when renaming old pcap files"""
v = self._in_history_counter
self._in_history_counter += 1
return v
@property
def out_history_counter(self):
"""Self-incrementing counter used when renaming old pcap files"""
v = self._out_history_counter
self._out_history_counter += 1
return v
def __init__(self, test, pg_index):
""" Create VPP packet-generator interface """
r = test.vapi.pg_create_interface(pg_index)
self._sw_if_index = r.sw_if_index
super(VppPGInterface, self).__init__(test)
self._in_history_counter = 0
self._out_history_counter = 0
self._out_assert_counter = 0
self._pg_index = pg_index
self._out_file = "pg%u_out.pcap" % self.pg_index
self._out_path = self.test.tempdir + "/" + self._out_file
self._in_file = "pg%u_in.pcap" % self.pg_index
self._in_path = self.test.tempdir + "/" + self._in_file
self._capture_cli = "packet-generator capture pg%u pcap %s" % (
self.pg_index, self.out_path)
self._cap_name = "pcap%u" % self.sw_if_index
self._input_cli = \
"packet-generator new pcap %s source pg%u name %s" % (
self.in_path, self.pg_index, self.cap_name)
def enable_capture(self):
""" Enable capture on this packet-generator interface"""
try:
if os.path.isfile(self.out_path):
name = "%s/history.[timestamp:%f].[%s-counter:%04d].%s" % \
(self.test.tempdir,
time.time(),
self.name,
self.out_history_counter,
self._out_file)
self.test.logger.debug("Renaming %s->%s" %
(self.out_path, name))
os.rename(self.out_path, name)
except:
pass
# FIXME this should be an API, but no such exists atm
self.test.vapi.cli(self.capture_cli)
self._pcap_reader = None
def add_stream(self, pkts):
"""
Add a stream of packets to this packet-generator
:param pkts: iterable packets
"""
try:
if os.path.isfile(self.in_path):
name = "%s/history.[timestamp:%f].[%s-counter:%04d].%s" %\
(self.test.tempdir,
time.time(),
self.name,
self.in_history_counter,
self._in_file)
self.test.logger.debug("Renaming %s->%s" %
(self.in_path, name))
os.rename(self.in_path, name)
except:
pass
wrpcap(self.in_path, pkts)
self.test.register_capture(self.cap_name)
# FIXME this should be an API, but no such exists atm
self.test.vapi.cli(self.input_cli)
def add_stream_vpp2(self, pkts):
"""
Add a stream of packets to this packet-generator
:param pkts: iterable packets
"""
n_p = []
for p in pkts:
n_p.append(p())
pkts = n_p
try:
if os.path.isfile(self.in_path):
name = "%s/history.[timestamp:%f].[%s-counter:%04d].%s" %\
(self.test.tempdir,
time.time(),
self.name,
self.in_history_counter,
self._in_file)
self.test.logger.debug("Renaming %s->%s" %
(self.in_path, name))
os.rename(self.in_path, name)
except:
pass
wrpcap(self.in_path, pkts)
self.test.register_capture(self.cap_name)
# FIXME this should be an API, but no such exists atm
self.test.vapi.cli(self.input_cli)
def generate_debug_aid(self, kind):
""" Create a hardlink to the out file with a counter and a file
containing stack trace to ease debugging in case of multiple capture
files present. """
self.test.logger.debug("Generating debug aid for %s on %s" %
(kind, self._name))
link_path, stack_path = ["%s/debug_%s_%s_%s.%s" %
(self.test.tempdir, self._name,
self._out_assert_counter, kind, suffix)
for suffix in ["pcap", "stack"]
]
os.link(self.out_path, link_path)
with open(stack_path, "w") as f:
f.writelines(format_stack())
self._out_assert_counter += 1
def _get_capture(self, timeout, filter_out_fn=is_ipv6_misc):
""" Helper method to get capture and filter it """
try:
if not self.wait_for_capture_file(timeout):
return None
output = rdpcap(self.out_path)
self.test.logger.debug("Capture has %s packets" % len(output.res))
except:
self.test.logger.debug("Exception in scapy.rdpcap (%s): %s" %
(self.out_path, format_exc()))
return None
before = len(output.res)
if filter_out_fn:
output.res = [p for p in output.res if not filter_out_fn(p)]
removed = before - len(output.res)
if removed:
self.test.logger.debug(
"Filtered out %s packets from capture (returning %s)" %
(removed, len(output.res)))
return output
def get_capture(self, expected_count=None, remark=None, timeout=1,
filter_out_fn=is_ipv6_misc):
""" Get captured packets
:param expected_count: expected number of packets to capture, if None,
then self.test.packet_count_for_dst_pg_idx is
used to lookup the expected count
:param remark: remark printed into debug logs
:param timeout: how long to wait for packets
:param filter_out_fn: filter applied to each packet, packets for which
the filter returns True are removed from capture
:returns: iterable packets
"""
remaining_time = timeout
capture = None
name = self.name if remark is None else "%s (%s)" % (self.name, remark)
based_on = "based on provided argument"
if expected_count is None:
expected_count = \
self.test.get_packet_count_for_if_idx(self.sw_if_index)
based_on = "based on stored packet_infos"
if expected_count == 0:
raise Exception(
"Internal error, expected packet count for %s is 0!" %
name)
self.test.logger.debug("Expecting to capture %s (%s) packets on %s" % (
expected_count, based_on, name))
while remaining_time > 0:
before = time.time()
capture = self._get_capture(remaining_time, filter_out_fn)
elapsed_time = time.time() - before
if capture:
if len(capture.res) == expected_count:
# bingo, got the packets we expected
return capture
elif len(capture.res) > expected_count:
self.test.logger.error(
ppc("Unexpected packets captured:", capture))
break
else:
self.test.logger.debug("Partial capture containing %s "
"packets doesn't match expected "
"count %s (yet?)" %
(len(capture.res), expected_count))
elif expected_count == 0:
# bingo, got None as we expected - return empty capture
return PacketList()
remaining_time -= elapsed_time
if capture:
self.generate_debug_aid("count-mismatch")
raise Exception("Captured packets mismatch, captured %s packets, "
"expected %s packets on %s" %
(len(capture.res), expected_count, name))
else:
raise Exception("No packets captured on %s" % name)
def assert_nothing_captured(self, remark=None, filter_out_fn=is_ipv6_misc):
""" Assert that nothing unfiltered was captured on interface
:param remark: remark printed into debug logs
:param filter_out_fn: filter applied to each packet, packets for which
the filter returns True are removed from capture
"""
if os.path.isfile(self.out_path):
try:
capture = self.get_capture(
0, remark=remark, filter_out_fn=filter_out_fn)
if not capture or len(capture.res) == 0:
# junk filtered out, we're good
return
except:
pass
self.generate_debug_aid("empty-assert")
if remark:
raise AssertionError(
"Non-empty capture file present for interface %s (%s)" %
(self.name, remark))
else:
raise AssertionError("Capture file present for interface %s" %
self.name)
def wait_for_capture_file(self, timeout=1):
"""
Wait until pcap capture file appears
:param timeout: How long to wait for the packet (default 1s)
:returns: True/False if the file is present or appears within timeout
"""
deadline = time.time() + timeout
if not os.path.isfile(self.out_path):
self.test.logger.debug("Waiting for capture file %s to appear, "
"timeout is %ss" % (self.out_path, timeout))
else:
self.test.logger.debug("Capture file %s already exists" %
self.out_path)
return True
while time.time() < deadline:
if os.path.isfile(self.out_path):
break
time.sleep(0) # yield
if os.path.isfile(self.out_path):
self.test.logger.debug("Capture file appeared after %fs" %
(time.time() - (deadline - timeout)))
else:
self.test.logger.debug("Timeout - capture file still nowhere")
return False
return True
def verify_enough_packet_data_in_pcap(self):
"""
Check if enough data is available in file handled by internal pcap
reader so that a whole packet can be read.
:returns: True if enough data present, else False
"""
orig_pos = self._pcap_reader.f.tell() # save file position
enough_data = False
# read packet header from pcap
packet_header_size = 16
caplen = None
end_pos = None
hdr = self._pcap_reader.f.read(packet_header_size)
if len(hdr) == packet_header_size:
# parse the capture length - caplen
sec, usec, caplen, wirelen = struct.unpack(
self._pcap_reader.endian + "IIII", hdr)
self._pcap_reader.f.seek(0, 2) # seek to end of file
end_pos = self._pcap_reader.f.tell() # get position at end
if end_pos >= orig_pos + len(hdr) + caplen:
enough_data = True # yay, we have enough data
self._pcap_reader.f.seek(orig_pos, 0) # restore original position
return enough_data
def wait_for_packet(self, timeout, filter_out_fn=is_ipv6_misc):
"""
Wait for next packet captured with a timeout
:param timeout: How long to wait for the packet
:returns: Captured packet if no packet arrived within timeout
:raises Exception: if no packet arrives within timeout
"""
deadline = time.time() + timeout
if self._pcap_reader is None:
if not self.wait_for_capture_file(timeout):
raise CaptureTimeoutError("Capture file %s did not appear "
"within timeout" % self.out_path)
while time.time() < deadline:
try:
self._pcap_reader = PcapReader(self.out_path)
break
except:
self.test.logger.debug(
"Exception in scapy.PcapReader(%s): %s" %
(self.out_path, format_exc()))
if not self._pcap_reader:
raise CaptureTimeoutError("Capture file %s did not appear within "
"timeout" % self.out_path)
poll = False
if timeout > 0:
self.test.logger.debug("Waiting for packet")
else:
poll = True
self.test.logger.debug("Polling for packet")
while time.time() < deadline or poll:
if not self.verify_enough_packet_data_in_pcap():
time.sleep(0) # yield
poll = False
continue
p = self._pcap_reader.recv()
if p is not None:
if filter_out_fn is not None and filter_out_fn(p):
self.test.logger.debug(
"Packet received after %ss was filtered out" %
(time.time() - (deadline - timeout)))
else:
self.test.logger.debug(
"Packet received after %fs" %
(time.time() - (deadline - timeout)))
return p
time.sleep(0) # yield
poll = False
self.test.logger.debug("Timeout - no packets received")
raise CaptureTimeoutError("Packet didn't arrive within timeout")
def create_arp_req(self):
"""Create ARP request applicable for this interface"""
return (Ether(dst="ff:ff:ff:ff:ff:ff", src=self.remote_mac) /
ARP(op=ARP.who_has, pdst=self.local_ip4,
psrc=self.remote_ip4, hwsrc=self.remote_mac))
def create_ndp_req(self):
"""Create NDP - NS applicable for this interface"""
nsma = in6_getnsma(inet_pton(socket.AF_INET6, self.local_ip6))
d = inet_ntop(socket.AF_INET6, nsma)
return (Ether(dst=in6_getnsmac(nsma)) /
IPv6(dst=d, src=self.remote_ip6) /
ICMPv6ND_NS(tgt=self.local_ip6) /
ICMPv6NDOptSrcLLAddr(lladdr=self.remote_mac))
def resolve_arp(self, pg_interface=None):
"""Resolve ARP using provided packet-generator interface
:param pg_interface: interface used to resolve, if None then this
interface is used
"""
if pg_interface is None:
pg_interface = self
self.test.logger.info("Sending ARP request for %s on port %s" %
(self.local_ip4, pg_interface.name))
arp_req = self.create_arp_req()
pg_interface.add_stream(arp_req)
pg_interface.enable_capture()
self.test.pg_start()
self.test.logger.info(self.test.vapi.cli("show trace"))
try:
captured_packet = pg_interface.wait_for_packet(1)
except:
self.test.logger.info("No ARP received on port %s" %
pg_interface.name)
return
arp_reply = captured_packet.copy() # keep original for exception
# Make Dot1AD packet content recognizable to scapy
if arp_reply.type == 0x88a8:
arp_reply.type = 0x8100
arp_reply = Ether(str(arp_reply))
try:
if arp_reply[ARP].op == ARP.is_at:
self.test.logger.info("VPP %s MAC address is %s " %
(self.name, arp_reply[ARP].hwsrc))
self._local_mac = arp_reply[ARP].hwsrc
else:
self.test.logger.info("No ARP received on port %s" %
pg_interface.name)
except:
self.test.logger.error(
ppp("Unexpected response to ARP request:", captured_packet))
raise
def resolve_ndp(self, pg_interface=None, timeout=1):
"""Resolve NDP using provided packet-generator interface
:param pg_interface: interface used to resolve, if None then this
interface is used
:param timeout: how long to wait for response before giving up
"""
if pg_interface is None:
pg_interface = self
self.test.logger.info("Sending NDP request for %s on port %s" %
(self.local_ip6, pg_interface.name))
ndp_req = self.create_ndp_req()
pg_interface.add_stream(ndp_req)
pg_interface.enable_capture()
self.test.pg_start()
now = time.time()
deadline = now + timeout
# Enabling IPv6 on an interface can generate more than the
# ND reply we are looking for (namely MLD). So loop through
# the replies to look for want we want.
while now < deadline:
try:
captured_packet = pg_interface.wait_for_packet(
deadline - now, filter_out_fn=None)
except:
self.test.logger.error(
"Timeout while waiting for NDP response")
raise
ndp_reply = captured_packet.copy() # keep original for exception
# Make Dot1AD packet content recognizable to scapy
if ndp_reply.type == 0x88a8:
ndp_reply.type = 0x8100
ndp_reply = Ether(str(ndp_reply))
try:
ndp_na = ndp_reply[ICMPv6ND_NA]
opt = ndp_na[ICMPv6NDOptDstLLAddr]
self.test.logger.info("VPP %s MAC address is %s " %
(self.name, opt.lladdr))
self._local_mac = opt.lladdr
self.test.logger.debug(self.test.vapi.cli("show trace"))
# we now have the MAC we've been after
return
except:
self.test.logger.info(
ppp("Unexpected response to NDP request:",
captured_packet))
now = time.time()
self.test.logger.debug(self.test.vapi.cli("show trace"))
raise Exception("Timeout while waiting for NDP response")
|
|
#!/usr/bin/python
import argparse
import os
from distutils.version import StrictVersion
import utility
from config import REQDEPS_FILE_PATH, DEPSINSTALL_DIR_PATH, CURRENTDEPS_FILE_PATH, GENERATED_ENVIRONMENT_PATH
from registryclient import RegistryClient
from repositoryclient import RepositoryClient
from dependencymanager import DependencyManager, InstalledDependencies
from settings import Settings
from environmentmanager import EnvironmentManager
def parseArguments():
parser = argparse.ArgumentParser(description="Project Package Manager", formatter_class=lambda prog: argparse.ArgumentDefaultsHelpFormatter(prog, width=150, max_help_position=27))
subparsers = parser.add_subparsers(title='commands', dest='subparser_name')
parser_sync = subparsers.add_parser('sync', help='synchronize with project dependencies')
parser_sync.add_argument('--without-install', help="do not install inexistant dependencies", default=False, action='store_true')
parser_sync.add_argument('--without-update', help="do not update installed packages", default=False, action='store_true')
parser_sync.add_argument('--without-downgrade', help="do not downgrade packages", default=False, action='store_true')
parser_sync.add_argument('--without-remove', help="do not remove installed packages which are not present in {d}".format(d=os.path.basename(REQDEPS_FILE_PATH)), default=False, action='store_true')
parser_sync.set_defaults(func=cmd_sync)
parser_download = subparsers.add_parser('download', help='download one or more packages(witout installing them)')
parser_download.add_argument('packages', help="dependencies to download in the format dependencyName@(version|latest)", nargs='+')
parser_download.add_argument('--directory', help="directory where to download files")
parser_download.set_defaults(func=cmd_download)
parser_settingsHandler = subparsers.add_parser('set', help='set a setting')
parser_settingsHandler.add_argument('setting_name', help="(registry-server|repository-server|project)")
parser_settingsHandler.add_argument('setting_value', help="option value")
parser_settingsHandler.set_defaults(func=cmd_set_setting)
parser_settingsUnsetHandler = subparsers.add_parser('unset', help='unset a setting')
parser_settingsUnsetHandler.add_argument('setting_name', help="(registry-server|repository-server|project)")
parser_settingsUnsetHandler.set_defaults(func=cmd_unset_setting)
args = parser.parse_args()
args.func(args)
def cmd_sync(args):
"""cmd_sync validate and prepare synchronization operation environment"""
flags = Flags(install=not args.without_install,
update=not args.without_update,
downgrade=not args.without_downgrade,
remove=not args.without_remove)
utility.ensure_directory(DEPSINSTALL_DIR_PATH)
# load currently installed dependencies
installedDeps = InstalledDependencies(load_installed_deps_file())
# make sure all dependencies are installed
check_integrity(installedDeps, DEPSINSTALL_DIR_PATH)
dependencyManager = DependencyManager(installedDeps, DEPSINSTALL_DIR_PATH)
registryClient = get_registry_client()
if not registryClient:
print "registry server is not set, please set it before running ppm"
return
settings = Settings()
if settings.get_current_project():
project_name = settings.get_current_project()
try:
jsonData = registryClient.get_project_details(project_name)
except Exception as e:
print "Error occured while retrieving project {p} details from registry server: {e}".format(p=project_name, e=str(e))
return
elif os.path.exists(REQDEPS_FILE_PATH):
try:
jsonData = utility.load_json_file(REQDEPS_FILE_PATH)
except Exception as e:
print "Error occured while reading {f}: {e}".format(f=os.path.basename(REQDEPS_FILE_PATH), e=str(e))
return
else:
print "unable to fetch dependencies, you have to set a project or create a {d} file".format(d=os.path.basename(REQDEPS_FILE_PATH))
return
requiredDeps = RequiredDependencies(jsonData.get('devdependencies',{}))
repositoryClient = get_repository_client()
# synchronizing dependencies
sync_dependencies(requiredDeps, installedDeps, registryClient, repositoryClient, dependencyManager, flags)
# save newly installed packages as current dependencies
save_installed_deps(installedDeps.get_data())
def cmd_download(args):
""" downloading one or more packages without monitoring them"""
downloadDirectory = utility.joinPaths(os.getcwd(), args.directory)
packages = [('@' in p and p.split('@')) or [p,"latest"] for p in args.packages]
utility.ensure_directory(downloadDirectory)
registryClient = get_registry_client()
if not registryClient:
raise Exception("registry server is not set, please set it using set-registry-server command")
repositoryClient = get_repository_client()
for name, version in packages:
try:
package_handler = registryClient.get_package_details(name)
except Exception as e:
utility.log(str(e))
continue
if version == 'latest':
version = get_latest_version(package_handler.get_package_versions())
if version == '0.0':
utility.log("Package {p} is not in the ppm registry".format(p=name))
continue
else:
version = str(StrictVersion(version))
if not package_handler.check_version_existence(version):
utility.log("Package {p} is not in the ppm registry".format(p=name))
continue
url = package_handler.get_package_url(version)
# check for repository url
if repositoryClient:
repository_url = repositoryClient.get_package_repository_url(url)
if repository_url:
url = repository_url
utility.download_file(url, downloadDirectory)
def cmd_set_setting(args):
setting_name = args.setting_name
setting_value = args.setting_value
try:
set_setting(setting_name, setting_value)
print "{n} has been set to {v}".format(n=setting_name, v=setting_value)
except Exception as e:
print str(e)
def cmd_unset_setting(args):
setting_name = args.setting_name
try:
set_setting(setting_name, None)
print "{n} has been unset".format(n=setting_name)
except Exception as e:
print str(e)
def set_setting(setting_name, setting_value):
settings = Settings()
if setting_name == "registry-server":
settings.set_registry_server(setting_value)
elif setting_name == "repository-server":
settings.set_repository_server(setting_value)
elif setting_name == "project":
settings.set_current_project(setting_value)
else:
raise Exception("invalid setting")
settings.save()
# I prefer writing flags.install instead of flags["install"] or installFlag, this class is merely for that purpose
class Flags:
def __init__(self, install, update, downgrade, remove):
self.install = install
self.update = update
self.downgrade = downgrade
self.remove = remove
def sync_dependencies(requiredDeps, installedDependencies, registryClient, repositoryClient, dependencyManager, flags):
"""synchronizing installed dependencies with requiredDeps, include installing,updating,downgrading and removing dependencies, in accordance to flags,
Args:
requiredDeps: array containing required dependencies for the project, in the format [{depName:version},{depName2,version}]
installedDependencies: currently installed dependencies
registryClient: client used for requesting a package details from the registry
repositoryClient: client used for checking for a package repository url
DependencyManager: responsible for dependency installation (or remove)
flags: operations to be performed (can be update, install, downgrade, remove or any combintation of them)
"""
utility.log("synchronizing dependencies")
utility.ensure_directory(DEPSINSTALL_DIR_PATH)
required_dependencies_names = requiredDeps.get_dependencies_names()
for depName in required_dependencies_names:
utility.log("Processing {d}".format(d=depName), 1)
# get current installed version (or set version to 0.0 for new dependencies)
if installedDependencies.is_installed(depName):
installedVersion = installedDependencies.get_installed_version(depName)
else:
installedVersion = str(StrictVersion('0.0'))
# get and normalize required version
requiredVersion = requiredDeps.get_dependency_version(depName)
requiredVersion = str(StrictVersion(requiredVersion))
if StrictVersion(requiredVersion) == StrictVersion(installedVersion):
utility.log("version {v} already installed".format(v=installedVersion))
elif StrictVersion(requiredVersion) < StrictVersion(installedVersion):
if flags.downgrade:
if install_dependency(depName, requiredVersion, dependencyManager, registryClient, repositoryClient):
utility.log("{p} version {v} installed successfuly".format(p=depName, v=requiredVersion))
else:
utility.log("{p} installation failed".format(p=depName))
else:
utility.log("Required version {v1} < Installed version {v2}, No action taken (downgrade flag is not set)".format(v1=requiredVersion, v2=installedVersion))
else:
if (flags.update and StrictVersion(installedVersion) > StrictVersion('0.0')) or (flags.install and StrictVersion(installedVersion) == StrictVersion('0.0')):
if install_dependency(depName, requiredVersion, dependencyManager, registryClient, repositoryClient):
utility.log("{p} version {v} installed successfuly".format(p=depName, v=requiredVersion))
else:
utility.log("{p} installation failed".format(p=depName))
else:
utility.log("Required version {v1} > Installed version {v2}, No action taken (update flag is not set)".format(v1=requiredVersion, v2=installedVersion))
# unident log messages
utility.log("", -1)
dependenciesToRemove = [item for item, version in installedDependencies.get_dependencies_list().items() if item not in required_dependencies_names]
if dependenciesToRemove:
utility.log("Installed dependencies that are not needed anymore : " + ",".join(dependenciesToRemove))
if not flags.remove:
utility.log("ommiting uneeded dependencies (remove flag is not set)")
else:
for dependencyName in dependenciesToRemove:
utility.log("removing {d}".format(d=dependencyName))
dependencyManager.remove_dependency(dependencyName)
generate_environment(installedDependencies.get_dependencies_list(), registryClient, os.path.basename(DEPSINSTALL_DIR_PATH), GENERATED_ENVIRONMENT_PATH)
utility.log("synchronization operation finished")
def install_dependency(name, version, dependencyManager, registryClient, repositoryClient):
try:
packageHandler = registryClient.get_package_details(name)
except Exception as e:
utility.log(str(e))
return False
if not packageHandler.check_version_existence(version):
utility.log("package {p} version {v} is not in the ppm registry".format(p=name, v=version))
return False
url = packageHandler.get_package_url(version)
# check for repository url
if repositoryClient:
repository_url = repositoryClient.get_package_repository_url(url)
if repository_url:
url = repository_url
parentDirectoryPath = packageHandler.get_package_parentdir(version)
directoryName = packageHandler.get_package_dirname(version)
installDirectoryRelPath = utility.joinPaths(parentDirectoryPath, directoryName)
try:
dependencyManager.install_dependency(name, version, url, installDirectoryRelPath)
return True
except Exception as e:
utility.log(str(e))
return False
def generate_environment(dependencies, registryClient, baseDirPath, savePath):
environment_manager = EnvironmentManager(baseDirPath)
for name, version in dependencies.items():
package_details = registryClient.get_package_details(name)
environment_manager.add_package_env(name, package_details.get_package_env(version))
utility.ensure_file_directory(savePath)
with open(savePath, 'w+') as f:
f.write(environment_manager.generate_script())
def get_registry_client():
settings = Settings()
if settings.get_registry_server():
return RegistryClient(settings.get_registry_server())
def get_repository_client():
settings = Settings()
if settings.get_repository_server():
return RepositoryClient(settings.get_repository_server())
def load_installed_deps_file():
installedDepsContents = None
if os.path.exists(CURRENTDEPS_FILE_PATH):
installedDepsContents = utility.load_json_file(CURRENTDEPS_FILE_PATH)
return installedDepsContents
def save_installed_deps(content):
utility.ensure_file_directory(CURRENTDEPS_FILE_PATH)
utility.save_json_to_file(content, CURRENTDEPS_FILE_PATH)
def get_latest_version(availableVersions):
try:
availableVersions.sort(key=StrictVersion)
return str(StrictVersion(availableVersions[-1]))
except Exception:
return str(StrictVersion('0.0'))
def check_integrity(installed_dependencies, dependencies_directory):
installed_paths = dict([(installed_dependencies.get_installation_path(name), name) for name in installed_dependencies.get_dependencies_list().keys()])
not_found_dirs = [d for d in installed_paths.keys() if not os.path.isdir(os.path.join(dependencies_directory, d))]
if not_found_dirs:
print "checking dependencies integrity"
print "some dependencies directories have been deleted manually:" + str(not_found_dirs)
for d in not_found_dirs:
installed_dependencies.remove_dependency(installed_paths[d])
print "uninstalling " + d
class RequiredDependencies:
def __init__(self, data):
if data is None:
self.data = {}
elif self.__validate_schema(data):
self.data = data
else:
raise ValueError("invalid Data")
def get_dependency_version(self, depName):
assert(self.is_dep_existant(depName))
return self.data[depName]
def is_dep_existant(self, depName):
assert(depName)
if depName in self.data:
return True
return False
def get_dependencies_names(self):
return self.data.keys()
def __validate_schema(self, data):
# TODO
return True
if __name__ == "__main__":
parseArguments()
|
|
from bottle import get, post, request, run, template, static_file, redirect, static_file
from classes import controller
import bottle
import bottle_session
import os
import time
import cgi
import re
from beaker.middleware import SessionMiddleware
session_opts = {
'session.type': 'file',
'session.cookie_expires': 300,
'session.data_dir': './data',
'session.auto': True
}
sesapp = SessionMiddleware(bottle.app(), session_opts)
app = bottle.app()
plugin = bottle_session.SessionPlugin()
app.install(plugin)
@ get('/')
def index():
session = bottle.request.environ.get('beaker.session')
if session.get('logged'):
return template('views/index.tpl', session=session.get('logged'))
else:
return template('views/index.tpl', session='')
###################################################################################################
# -- -- -- --Login view for user & POST Login-- -- -- -- -- -- -- -- -- -- -- -- -
@get('/user/login')
def userlogin(session): #Login page
return template('views/userLogin.tpl')
@ post('/user/login/post')
def postuserlogin():
users = controller.logIn(request, 'user')
if(users['status'] == 'success'):
s = bottle.request.environ.get('beaker.session')
s['logged'] = users
s.save()
redirect('/')
else:
redirect('/user/login')
#-- -- -- --Register view for user & POST register save-- -- -- -- -- -- -- -- -- -- --
@get('/user/register')
def userreg(session): #Login page
if session['msg'] != '' and session['status'] != '':
msg = session['msg']
classs = session['status']
session['status'] = ''
session['msg'] = ''
else :
msg = ''
classs = ''
return template('views/userRegistration.tpl', classname = classs, msg = msg)
@post('/user/post')
def postuserreg(session):
users = controller.insertUser(request, 'user')
session['status'] = users['status']
session['msg'] = users['msg']
redirect('/user/register')
# -- -- -- -- -- -Search file-- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- --
@get('/user/findfile')
def find(session):
login = bottle.request.environ.get('beaker.session')
if login.get('logged'):
name = request.GET.get('fname') if request.GET.get('fname') else '';
files = controller.getFilesforsearch(name);
user = login.get('logged')
return template('views/userSearch.tpl', files = files, session=user)
else:
redirect('/user/login')
# -- -- -- -- -- -- -- -Download searched files-- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -
@post('/download')
def downloafile():
name = request.forms.get('name')
path = request.forms.get('path')
newpath = path.split(name);
try:
return static_file(name, root = newpath[0], download = name)
except:
return {
"status": "failed",
"msg": "File not found"
};
###################################################################################################
#-- -- -- --Login view for owner & POST Login-- -- -- -- -- -- -- -- -- --
@get('/owner/login')
def ownerlogin(session): #Login page
return template('views/dataLogin.tpl')
@ post('/owner/login/post')
def postownerlogin(session):
users = controller.logIn(request, 'dataowner')
if(users['status'] == 'success'):
s = bottle.request.environ.get('beaker.session')
s['logged'] = users
s.save()
redirect('/')
else:
redirect('/user/login')
#-- -- -- --Register view for owner & POST register save-- -- -- -- -- -- -- -- -- -- --
@get('/owner/register')
def ownerreg(session): #Login page
if session['msg'] != '' and session['status'] != '':
msg = session['msg']
classs = session['status']
session['status'] = ''
session['msg'] = ''
else :
msg = ''
classs = ''
return template('views/dataownerRegistration.tpl', classname = classs, msg = msg)
@ post('/owner/post')
def postownerreg(session):
users = controller.insertUser(request, 'dataowner')
session['status'] = users['status']
session['msg'] = users['msg']
redirect('/owner/register')
# -- -- -- --Upload view for owner & POST upload-- -- -- -- -- -- -- -- -- -- --
@get('/owner/upload')
def showuploadpage(session):
login = bottle.request.environ.get('beaker.session')
if login.get('logged'):
if session['msg'] != ''and session['clsname'] != '':
msg = session['msg']
classname = session['clsname']
session['status'] = ''
session['msg'] = ''
else :
msg = ''
classname = ''
user = login.get('logged')
files = controller.getFiles(user['id'])
print(files);
return template('views/uploaddata.tpl', classname = classname, msg = msg, login=user, files=files)
else:
redirect('/owner/login')
@post('/owner/upload')
def postupload(session):
login = bottle.request.environ.get('beaker.session')
loggeduser = login.get('logged');
title = request.forms.get('title')
keywords = request.forms.get('keywords')
upload = request.files.get('documents')
name, ext = os.path.splitext(upload.filename)
if title != "":
fname = "%s%s" % (title, ext)
else :
fname = upload.filename
fname = "%s_%s" % (time.strftime('%Y-%m-%d_%H:%M:%S'), re.sub(r"\s+", '-', fname))
save_path = "%s/uploads" % (os.getcwd())
if not os.path.exists(save_path):
os.makedirs(save_path)
file_path = "{path}/{file}".format(path = save_path, file = fname)
upload.save(file_path)
filesv = controller.saveFile(file_path, loggeduser['id'], keywords)# upload.save(file_path)
session['clsname'] = filesv['class']
session['msg'] = filesv['msg']
redirect('/owner/upload')
@post('/deletefile/<imgid>')
def deletefile(imgid):
login = bottle.request.environ.get('beaker.session');
loggeduser = login.get('logged');
deletests = controller.deleteFile(loggeduser['id'], imgid)
return deletests
#################################################################################################
#-- -- -- -- -Generate secret key-- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -
@post('/getkey')
def sendkey(session):
secret = controller.sendSecret(request)
return secret
#################################################################################################
#-- -- -- -- -- -- --Show loader gif only for ajax purpose-- -- -- -- -- -- -- -- -- -- -- -- -
@get('/loader')
def showloader():
return static_file('loader.gif', root = os.getcwd());
@post('/checkemail')
def checkIfexist():
email = request.forms.get('email')
exists = controller.checkIfmail(email)
return exists
@get('/logout')
def logout():
session = bottle.request.environ.get('beaker.session')
session.delete()
redirect('/')
#################################################################################################
#You can configure host, port and debug as per your requirements
bottle.debug(True)
host = os.getenv("HOST", '0.0.0.0')
port = os.getenv("PORT", 5000)
# port = os.getenv("PORT", 8000)
# host = os.getenv("HOST", 'localhost')
run(app=sesapp ,host = host, port = port, debug = True)
|
|
# Copyright 2013 VMware, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import contextlib
import mock
import webob.exc
from neutron import context
from neutron.db.firewall import firewall_db
from neutron.openstack.common import uuidutils
from neutron.plugins.vmware.vshield.common import exceptions as vcns_exc
from neutron.plugins.vmware.vshield import vcns_driver
from neutron.tests.unit.db.firewall import test_db_firewall
from neutron.tests.unit import vmware
from neutron.tests.unit.vmware.vshield import fake_vcns
_uuid = uuidutils.generate_uuid
VSE_ID = 'edge-1'
ROUTER_ID = '42f95450-5cc9-44e4-a744-1320e592a9d5'
VCNS_CONFIG_FILE = vmware.get_fake_conf("vcns.ini.test")
class VcnsDriverTestCase(test_db_firewall.FirewallPluginDbTestCase,
firewall_db.Firewall_db_mixin):
def vcns_firewall_patch(self):
instance = self.mock_vcns.start()
instance.return_value.update_firewall.side_effect = (
self.fc2.update_firewall)
instance.return_value.delete_firewall.side_effect = (
self.fc2.delete_firewall)
instance.return_value.update_firewall_rule.side_effect = (
self.fc2.update_firewall_rule)
instance.return_value.delete_firewall_rule.side_effect = (
self.fc2.delete_firewall_rule)
instance.return_value.add_firewall_rule_above.side_effect = (
self.fc2.add_firewall_rule_above)
instance.return_value.add_firewall_rule.side_effect = (
self.fc2.add_firewall_rule)
instance.return_value.get_firewall.side_effect = (
self.fc2.get_firewall)
instance.return_value.get_firewall_rule.side_effect = (
self.fc2.get_firewall_rule)
def setUp(self):
self.config_parse(args=['--config-file', VCNS_CONFIG_FILE])
# mock vcns
self.fc2 = fake_vcns.FakeVcns(unique_router_name=False)
self.mock_vcns = mock.patch(vmware.VCNS_NAME, autospec=True)
self.vcns_firewall_patch()
self.driver = vcns_driver.VcnsDriver(mock.Mock())
super(VcnsDriverTestCase, self).setUp()
self.addCleanup(self.fc2.reset_all)
self.addCleanup(self.mock_vcns.stop)
self.tenant_id = _uuid()
self.subnet_id = _uuid()
class TestEdgeFwDriver(VcnsDriverTestCase):
def _make_firewall_dict_with_rules(self, context, firewall_id):
fw = self.get_firewall(context, firewall_id)
fw_policy_id = fw['firewall_policy_id']
if fw_policy_id:
firewall_policy_db = self._get_firewall_policy(
context, fw_policy_id)
fw['firewall_rule_list'] = [
self._make_firewall_rule_dict(fw_rule_db)
for fw_rule_db in firewall_policy_db['firewall_rules']
]
return fw
def _compare_firewall_rule_lists(self, firewall_policy_id,
list1, list2):
for r1, r2 in zip(list1, list2):
rule = r1['firewall_rule']
rule['firewall_policy_id'] = firewall_policy_id
for k in rule:
self.assertEqual(rule[k], r2[k])
def test_create_and_get_firewall(self):
ctx = context.get_admin_context()
name = 'firewall'
with contextlib.nested(self.firewall_rule(name='fwr1',
do_delete=False),
self.firewall_rule(name='fwr2',
do_delete=False),
self.firewall_rule(name='fwr3',
do_delete=False)) as fr:
fw_rule_ids = [r['firewall_rule']['id'] for r in fr]
with self.firewall_policy(firewall_rules=fw_rule_ids,
do_delete=False) as fwp:
fwp_id = fwp['firewall_policy']['id']
with self.firewall(name=name,
firewall_policy_id=fwp_id) as firewall:
fw_create = firewall['firewall']
fw_expect = self._make_firewall_dict_with_rules(
ctx, fw_create['id'])
self.driver.update_firewall(ctx, VSE_ID, fw_expect)
fw_get = self.driver.get_firewall(ctx, VSE_ID)
self._compare_firewall_rule_lists(
fwp_id, fw_get['firewall_rule_list'],
fw_expect['firewall_rule_list'])
def test_update_firewall_with_rules(self):
ctx = context.get_admin_context()
name = 'new_firewall'
with contextlib.nested(self.firewall_rule(name='fwr1',
do_delete=False),
self.firewall_rule(name='fwr2',
do_delete=False),
self.firewall_rule(name='fwr3',
do_delete=False)) as fr:
fw_rule_ids = [r['firewall_rule']['id'] for r in fr]
with self.firewall_policy(firewall_rules=fw_rule_ids,
do_delete=False) as fwp:
fwp_id = fwp['firewall_policy']['id']
with self.firewall(name=name,
firewall_policy_id=fwp_id) as firewall:
fw_create = firewall['firewall']
fw_create = self._make_firewall_dict_with_rules(
ctx, fw_create['id'])
self.driver.update_firewall(ctx, VSE_ID, fw_create)
data = {'firewall_rule': {'name': name,
'source_port': '10:20',
'destination_port': '30:40'}}
self.new_update_request('firewall_rules', data,
fr[0]['firewall_rule']['id'])
fw_expect = self._make_firewall_dict_with_rules(
ctx, fw_create['id'])
self.driver.update_firewall(ctx, VSE_ID, fw_expect)
fw_get = self.driver.get_firewall(
ctx, VSE_ID)
self._compare_firewall_rule_lists(
fwp_id, fw_get['firewall_rule_list'],
fw_expect['firewall_rule_list'])
def test_delete_firewall(self):
ctx = context.get_admin_context()
name = 'firewall'
with contextlib.nested(self.firewall_rule(name='fwr1',
do_delete=False),
self.firewall_rule(name='fwr2',
do_delete=False),
self.firewall_rule(name='fwr3',
do_delete=False)) as fr:
fw_rule_ids = [r['firewall_rule']['id'] for r in fr]
with self.firewall_policy(firewall_rules=fw_rule_ids,
do_delete=False) as fwp:
fwp_id = fwp['firewall_policy']['id']
with self.firewall(name=name,
firewall_policy_id=fwp_id) as firewall:
fw_create = firewall['firewall']
fw_expect = self._make_firewall_dict_with_rules(
ctx, fw_create['id'])
self.driver.update_firewall(ctx, VSE_ID, fw_expect)
self.driver.delete_firewall(ctx, VSE_ID)
fw_get = self.driver.get_firewall(
ctx, VSE_ID)
self.assertFalse(fw_get['firewall_rule_list'])
def test_update_firewall_rule(self):
ctx = context.get_admin_context()
name = 'new_firewall'
with contextlib.nested(self.firewall_rule(name='fwr1',
do_delete=False)) as fr:
fw_rule_ids = [r['firewall_rule']['id'] for r in fr]
with self.firewall_policy(firewall_rules=fw_rule_ids,
do_delete=False) as fwp:
fwp_id = fwp['firewall_policy']['id']
with self.firewall(name=name,
firewall_policy_id=fwp_id) as firewall:
fw_create = firewall['firewall']
fw_create = self._make_firewall_dict_with_rules(
ctx, fw_create['id'])
self.driver.update_firewall(ctx, VSE_ID, fw_create)
data = {'firewall_rule': {'name': name,
'source_port': '10:20',
'destination_port': '30:40'}}
req = self.new_update_request(
'firewall_rules', data,
fr[0]['firewall_rule']['id'])
res = self.deserialize(self.fmt,
req.get_response(self.ext_api))
rule_expect = res['firewall_rule']
rule_expect['edge_id'] = VSE_ID
self.driver.update_firewall_rule(
ctx, rule_expect['id'], VSE_ID, rule_expect)
rule_get = self.driver.get_firewall_rule(
ctx, rule_expect['id'], VSE_ID)
for k, v in rule_get['firewall_rule'].items():
self.assertEqual(rule_expect[k], v)
def test_delete_firewall_rule(self):
ctx = context.get_admin_context()
name = 'new_firewall'
with contextlib.nested(self.firewall_rule(name='fwr1',
do_delete=False),
self.firewall_rule(name='fwr2',
do_delete=False)) as fr:
fw_rule_ids = [r['firewall_rule']['id'] for r in fr]
with self.firewall_policy(firewall_rules=fw_rule_ids,
do_delete=False) as fwp:
fwp_id = fwp['firewall_policy']['id']
with self.firewall(name=name,
firewall_policy_id=fwp_id) as firewall:
fw_create = firewall['firewall']
fw_create = self._make_firewall_dict_with_rules(
ctx, fw_create['id'])
self.driver.update_firewall(ctx, VSE_ID, fw_create)
fr[0]['firewall_rule']['edge_id'] = VSE_ID
self.driver.delete_firewall_rule(
ctx, fr[0]['firewall_rule']['id'],
VSE_ID)
self.assertRaises(vcns_exc.VcnsNotFound,
self.driver.get_firewall_rule,
ctx, fr[0]['firewall_rule']['id'],
VSE_ID)
def test_insert_rule(self):
ctx = context.get_admin_context()
with self.firewall_policy() as fwp:
fwp_id = fwp['firewall_policy']['id']
with self.firewall(firewall_policy_id=fwp_id) as firewall:
fw_create = firewall['firewall']
fw_create = self._make_firewall_dict_with_rules(
ctx, fw_create['id'])
self.driver.update_firewall(ctx, VSE_ID, fw_create)
with contextlib.nested(self.firewall_rule(name='fwr0',
do_delete=False),
self.firewall_rule(name='fwr1',
do_delete=False),
self.firewall_rule(name='fwr2',
do_delete=False),
self.firewall_rule(name='fwr3',
do_delete=False),
self.firewall_rule(name='fwr4',
do_delete=False),
self.firewall_rule(name='fwr5',
do_delete=False),
self.firewall_rule(
name='fwr6',
do_delete=False)) as fwr:
# test insert when rule list is empty
fwr0_id = fwr[0]['firewall_rule']['id']
self._rule_action('insert', fwp_id, fwr0_id,
insert_before=None,
insert_after=None,
expected_code=webob.exc.HTTPOk.code)
fw_update = self._make_firewall_dict_with_rules(
ctx, fw_create['id'])
self.driver.update_firewall(ctx, VSE_ID, fw_update)
# test insert at top of list above existing rule
fwr1_id = fwr[1]['firewall_rule']['id']
self._rule_action('insert', fwp_id, fwr1_id,
insert_before=fwr0_id,
insert_after=None,
expected_code=webob.exc.HTTPOk.code)
fw_expect = self._make_firewall_dict_with_rules(
ctx, fw_create['id'])
rule_info = {'firewall_rule_id': fwr1_id,
'insert_before': fwr0_id,
'insert_after': None}
rule = fwr[1]['firewall_rule']
self.driver.insert_rule(ctx, rule_info, VSE_ID, rule)
fw_get = self.driver.get_firewall(
ctx, VSE_ID)
self._compare_firewall_rule_lists(
fwp_id, fw_get['firewall_rule_list'],
fw_expect['firewall_rule_list'])
# test insert at bottom of list
fwr2_id = fwr[2]['firewall_rule']['id']
self._rule_action('insert', fwp_id, fwr2_id,
insert_before=None,
insert_after=fwr0_id,
expected_code=webob.exc.HTTPOk.code)
fw_expect = self._make_firewall_dict_with_rules(
ctx, fw_create['id'])
rule_info = {'firewall_rule_id': fwr2_id,
'insert_before': None,
'insert_after': fwr0_id}
rule = fwr[2]['firewall_rule']
self.driver.insert_rule(ctx, rule_info, VSE_ID, rule)
fw_get = self.driver.get_firewall(
ctx, VSE_ID)
self._compare_firewall_rule_lists(
fwp_id, fw_get['firewall_rule_list'],
fw_expect['firewall_rule_list'])
# test insert in the middle of the list using
# insert_before
fwr3_id = fwr[3]['firewall_rule']['id']
self._rule_action('insert', fwp_id, fwr3_id,
insert_before=fwr2_id,
insert_after=None,
expected_code=webob.exc.HTTPOk.code)
fw_expect = self._make_firewall_dict_with_rules(
ctx, fw_create['id'])
rule_info = {'firewall_rule_id': fwr3_id,
'insert_before': fwr2_id,
'insert_after': None}
rule = fwr[3]['firewall_rule']
self.driver.insert_rule(ctx, rule_info, VSE_ID, rule)
fw_get = self.driver.get_firewall(
ctx, VSE_ID)
self._compare_firewall_rule_lists(
fwp_id, fw_get['firewall_rule_list'],
fw_expect['firewall_rule_list'])
# test insert in the middle of the list using
# insert_after
fwr4_id = fwr[4]['firewall_rule']['id']
self._rule_action('insert', fwp_id, fwr4_id,
insert_before=None,
insert_after=fwr3_id,
expected_code=webob.exc.HTTPOk.code)
fw_expect = self._make_firewall_dict_with_rules(
ctx, fw_create['id'])
rule_info = {'firewall_rule_id': fwr4_id,
'insert_before': None,
'insert_after': fwr3_id}
rule = fwr[4]['firewall_rule']
self.driver.insert_rule(ctx, rule_info, VSE_ID, rule)
fw_get = self.driver.get_firewall(
ctx, VSE_ID)
self._compare_firewall_rule_lists(
fwp_id, fw_get['firewall_rule_list'],
fw_expect['firewall_rule_list'])
# test insert when both insert_before and
# insert_after are set
fwr5_id = fwr[5]['firewall_rule']['id']
self._rule_action('insert', fwp_id, fwr5_id,
insert_before=fwr4_id,
insert_after=fwr4_id,
expected_code=webob.exc.HTTPOk.code)
fw_expect = self._make_firewall_dict_with_rules(
ctx, fw_create['id'])
rule_info = {'firewall_rule_id': fwr5_id,
'insert_before': fwr4_id,
'insert_after': fwr4_id}
rule = fwr[5]['firewall_rule']
self.driver.insert_rule(ctx, rule_info, VSE_ID, rule)
fw_get = self.driver.get_firewall(
ctx, VSE_ID)
self._compare_firewall_rule_lists(
fwp_id, fw_get['firewall_rule_list'],
fw_expect['firewall_rule_list'])
|
|
# Natural Language Toolkit: Chunk parsing API
#
# Copyright (C) 2001-2013 NLTK Project
# Author: Edward Loper <edloper@gmail.com>
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
"""
Named entity chunker
"""
from __future__ import print_function
import os, re, pickle
from xml.etree import ElementTree as ET
from nltk.tag import ClassifierBasedTagger, pos_tag
try:
from nltk.classify import MaxentClassifier
except ImportError:
pass
from nltk.tree import Tree
from nltk.tokenize import word_tokenize
from nltk.data import find
from nltk.chunk.api import ChunkParserI
from nltk.chunk.util import ChunkScore
class NEChunkParserTagger(ClassifierBasedTagger):
"""
The IOB tagger used by the chunk parser.
"""
def __init__(self, train):
ClassifierBasedTagger.__init__(
self, train=train,
classifier_builder=self._classifier_builder)
def _classifier_builder(self, train):
return MaxentClassifier.train(train, algorithm='megam',
gaussian_prior_sigma=1,
trace=2)
def _english_wordlist(self):
try:
wl = self._en_wordlist
except AttributeError:
from nltk.corpus import words
self._en_wordlist = set(words.words('en-basic'))
wl = self._en_wordlist
return wl
def _feature_detector(self, tokens, index, history):
word = tokens[index][0]
pos = simplify_pos(tokens[index][1])
if index == 0:
prevword = prevprevword = None
prevpos = prevprevpos = None
prevshape = prevtag = prevprevtag = None
elif index == 1:
prevword = tokens[index-1][0].lower()
prevprevword = None
prevpos = simplify_pos(tokens[index-1][1])
prevprevpos = None
prevtag = history[index-1][0]
prevshape = prevprevtag = None
else:
prevword = tokens[index-1][0].lower()
prevprevword = tokens[index-2][0].lower()
prevpos = simplify_pos(tokens[index-1][1])
prevprevpos = simplify_pos(tokens[index-2][1])
prevtag = history[index-1]
prevprevtag = history[index-2]
prevshape = shape(prevword)
if index == len(tokens)-1:
nextword = nextnextword = None
nextpos = nextnextpos = None
elif index == len(tokens)-2:
nextword = tokens[index+1][0].lower()
nextpos = tokens[index+1][1].lower()
nextnextword = None
nextnextpos = None
else:
nextword = tokens[index+1][0].lower()
nextpos = tokens[index+1][1].lower()
nextnextword = tokens[index+2][0].lower()
nextnextpos = tokens[index+2][1].lower()
# 89.6
features = {
'bias': True,
'shape': shape(word),
'wordlen': len(word),
'prefix3': word[:3].lower(),
'suffix3': word[-3:].lower(),
'pos': pos,
'word': word,
'en-wordlist': (word in self._english_wordlist()),
'prevtag': prevtag,
'prevpos': prevpos,
'nextpos': nextpos,
'prevword': prevword,
'nextword': nextword,
'word+nextpos': '%s+%s' % (word.lower(), nextpos),
'pos+prevtag': '%s+%s' % (pos, prevtag),
'shape+prevtag': '%s+%s' % (prevshape, prevtag),
}
return features
class NEChunkParser(ChunkParserI):
"""
Expected input: list of pos-tagged words
"""
def __init__(self, train):
self._train(train)
def parse(self, tokens):
"""
Each token should be a pos-tagged word
"""
tagged = self._tagger.tag(tokens)
tree = self._tagged_to_parse(tagged)
return tree
def _train(self, corpus):
# Convert to tagged sequence
corpus = [self._parse_to_tagged(s) for s in corpus]
self._tagger = NEChunkParserTagger(train=corpus)
def _tagged_to_parse(self, tagged_tokens):
"""
Convert a list of tagged tokens to a chunk-parse tree.
"""
sent = Tree('S', [])
for (tok,tag) in tagged_tokens:
if tag == 'O':
sent.append(tok)
elif tag.startswith('B-'):
sent.append(Tree(tag[2:], [tok]))
elif tag.startswith('I-'):
if (sent and isinstance(sent[-1], Tree) and
sent[-1].label() == tag[2:]):
sent[-1].append(tok)
else:
sent.append(Tree(tag[2:], [tok]))
return sent
@staticmethod
def _parse_to_tagged(sent):
"""
Convert a chunk-parse tree to a list of tagged tokens.
"""
toks = []
for child in sent:
if isinstance(child, Tree):
if len(child) == 0:
print("Warning -- empty chunk in sentence")
continue
toks.append((child[0], 'B-%s' % child.label()))
for tok in child[1:]:
toks.append((tok, 'I-%s' % child.label()))
else:
toks.append((child, 'O'))
return toks
def shape(word):
if re.match('[0-9]+(\.[0-9]*)?|[0-9]*\.[0-9]+$', word):
return 'number'
elif re.match('\W+$', word):
return 'punct'
elif re.match('[A-Z][a-z]+$', word):
return 'upcase'
elif re.match('[a-z]+$', word):
return 'downcase'
elif re.match('\w+$', word):
return 'mixedcase'
else:
return 'other'
def simplify_pos(s):
if s.startswith('V'): return "V"
else: return s.split('-')[0]
def postag_tree(tree):
# Part-of-speech tagging.
words = tree.leaves()
tag_iter = (pos for (word, pos) in pos_tag(words))
newtree = Tree('S', [])
for child in tree:
if isinstance(child, Tree):
newtree.append(Tree(child.label(), []))
for subchild in child:
newtree[-1].append( (subchild, next(tag_iter)) )
else:
newtree.append( (child, next(tag_iter)) )
return newtree
def load_ace_data(roots, fmt='binary', skip_bnews=True):
for root in roots:
for root, dirs, files in os.walk(root):
if root.endswith('bnews') and skip_bnews:
continue
for f in files:
if f.endswith('.sgm'):
for sent in load_ace_file(os.path.join(root, f), fmt):
yield sent
def load_ace_file(textfile, fmt):
print(' - %s' % os.path.split(textfile)[1])
annfile = textfile+'.tmx.rdc.xml'
# Read the xml file, and get a list of entities
entities = []
with open(annfile, 'r') as infile:
xml = ET.parse(infile).getroot()
for entity in xml.findall('document/entity'):
typ = entity.find('entity_type').text
for mention in entity.findall('entity_mention'):
if mention.get('TYPE') != 'NAME': continue # only NEs
s = int(mention.find('head/charseq/start').text)
e = int(mention.find('head/charseq/end').text)+1
entities.append( (s, e, typ) )
# Read the text file, and mark the entities.
with open(textfile, 'r') as infile:
text = infile.read()
# Strip XML tags, since they don't count towards the indices
text = re.sub('<(?!/?TEXT)[^>]+>', '', text)
# Blank out anything before/after <TEXT>
def subfunc(m): return ' '*(m.end()-m.start()-6)
text = re.sub('[\s\S]*<TEXT>', subfunc, text)
text = re.sub('</TEXT>[\s\S]*', '', text)
# Simplify quotes
text = re.sub("``", ' "', text)
text = re.sub("''", '" ', text)
entity_types = set(typ for (s,e,typ) in entities)
# Binary distinction (NE or not NE)
if fmt == 'binary':
i = 0
toks = Tree('S', [])
for (s,e,typ) in sorted(entities):
if s < i: s = i # Overlapping! Deal with this better?
if e <= s: continue
toks.extend(word_tokenize(text[i:s]))
toks.append(Tree('NE', text[s:e].split()))
i = e
toks.extend(word_tokenize(text[i:]))
yield toks
# Multiclass distinction (NE type)
elif fmt == 'multiclass':
i = 0
toks = Tree('S', [])
for (s,e,typ) in sorted(entities):
if s < i: s = i # Overlapping! Deal with this better?
if e <= s: continue
toks.extend(word_tokenize(text[i:s]))
toks.append(Tree(typ, text[s:e].split()))
i = e
toks.extend(word_tokenize(text[i:]))
yield toks
else:
raise ValueError('bad fmt value')
# This probably belongs in a more general-purpose location (as does
# the parse_to_tagged function).
def cmp_chunks(correct, guessed):
correct = NEChunkParser._parse_to_tagged(correct)
guessed = NEChunkParser._parse_to_tagged(guessed)
ellipsis = False
for (w, ct), (w, gt) in zip(correct, guessed):
if ct == gt == 'O':
if not ellipsis:
print(" %-15s %-15s %s" % (ct, gt, w))
print(' %-15s %-15s %s' % ('...', '...', '...'))
ellipsis = True
else:
ellipsis = False
print(" %-15s %-15s %s" % (ct, gt, w))
def build_model(fmt='binary'):
print('Loading training data...')
train_paths = [find('corpora/ace_data/ace.dev'),
find('corpora/ace_data/ace.heldout'),
find('corpora/ace_data/bbn.dev'),
find('corpora/ace_data/muc.dev')]
train_trees = load_ace_data(train_paths, fmt)
train_data = [postag_tree(t) for t in train_trees]
print('Training...')
cp = NEChunkParser(train_data)
del train_data
print('Loading eval data...')
eval_paths = [find('corpora/ace_data/ace.eval')]
eval_trees = load_ace_data(eval_paths, fmt)
eval_data = [postag_tree(t) for t in eval_trees]
print('Evaluating...')
chunkscore = ChunkScore()
for i, correct in enumerate(eval_data):
guess = cp.parse(correct.leaves())
chunkscore.score(correct, guess)
if i < 3: cmp_chunks(correct, guess)
print(chunkscore)
outfilename = '/tmp/ne_chunker_%s.pickle' % fmt
print('Saving chunker to %s...' % outfilename)
with open(outfilename, 'wb') as outfile:
pickle.dump(cp, outfile, -1)
return cp
if __name__ == '__main__':
# Make sure that the pickled object has the right class name:
from nltk.chunk.named_entity import build_model
build_model('binary')
build_model('multiclass')
|
|
#from scamp import entryExit
import utilities
global itr
itr = 0
def load_spectra():
import pickle
f = open('picklespectra','r')
m = pickle.Unpickler(f)
spectra = m.load()
return spectra
''' get SDSS zeropoint if exists '''
def get_sdss_zp(run,night,snpath):
import MySQLdb
db2 = MySQLdb.connect(db='calib')
c = db2.cursor()
zps={'JCAT':0}
OK = True
for filt in ['u','g','r','i','z']:
command = "SELECT SDSSZP from CALIB where SN='" + snpath + "' and FILT='" + filt + "' and NAME='reg' and RUN='" + run + "'"
print command
c.execute(command)
zp = c.fetchall()[0][0]
if str(zp) != 'None':
print zp
zps[filt] = float(zp)
else: OK = False
if OK:
return zps #['u'], zps['g'], zps['r'], zps['i'], zps['z']
else:
return None
def assign_zp(filt,pars,zps):
if filt in zps:
out = pars[zps[filt]]
else:
# raise Exception
out = 0
return out
def get_kit():
import pickle, os
f = open(os.environ['kpno'] + '/process_kpno/locuskit','r')
m = pickle.Unpickler(f)
locus = m.load()
return locus
#def get_locus():
# import pickle
# f = open('/Volumes/mosquitocoast/patrick/kpno/process_kpno/kpnolocus','r')
# m = pickle.Unpickler(f)
# locus = m.load()
# return locus
def get_locus():
import pickle
f = open('synthlocus','r')
m = pickle.Unpickler(f)
locus = m.load()
return locus
def locus():
import os, re
f = open('locus.txt','r').readlines()
id = -1
rows = {}
bands = {}
for i in range(len(f)):
l = f[i]
if l[0] != ' ':
rows[i] = l[:-1]
else:
id += 1
bands[rows[id]] = [float(x) for x in re.split('\s+',l[:-1])[1:]]
print bands.keys()
#pylab.scatter(bands['GSDSS_ZSDSS'],bands['RSDSS_ISDSS'])
#pylab.show()
return bands
#@entryExit
#def all(catalog_dir,cluster,magtype='APER1',location=None):
def all(subarudir,cluster,DETECT_FILTER,aptype,magtype,location=None):
save_slr_flag = photocalibrate_cat_flag = '--spec mode=' + magtype.replace('1','').replace('APER','aper').replace('2','')
catalog_dir = subarudir + '/' + cluster + '/PHOTOMETRY_' + DETECT_FILTER + aptype + '/'
catalog_dir_iso = subarudir + '/' + cluster + '/PHOTOMETRY_' + DETECT_FILTER + '_iso/'
import astropy.io.fits as pyfits, os, string, random
min_err = 0.02
#catalog_dir = '/'.join(catalog.split('/')[:-1])
catalog_notcal = catalog_dir + '/' + cluster + '.stars.cat'
catalog = catalog_dir + '/' + cluster + '.stars.calibrated.cat'
command = './photocalibrate_cat.py -i %(catalog_notcal)s -c %(cluster)s -t standard -o %(catalog)s %(photocalibrate_cat_flag)s' % {'cluster':cluster, 'catalog_notcal':catalog_notcal, 'catalog':catalog, 'photocalibrate_cat_flag':photocalibrate_cat_flag}
print command
os.system(command)
offset_list = catalog_dir + '/multiSAVEZP.offsets.list'
complete_offset_list = catalog_dir + '/multiCOMPLETE.offsets.list'
slr_high = catalog_dir + '/slr.offsets.list'
from glob import glob
startingzps = {}
if glob(slr_high):
f = open(slr_high,'r').readlines()
for l in f:
res = l.split(' ')
filt = res[1]
zp = float(res[2])
startingzps[filt.replace('10_2','').replace('10_1','').replace('10_3','')] = zp
offset_list_file = open(offset_list,'w')
complete_offset_list_file = open(complete_offset_list,'w')
print catalog_dir, offset_list
#zps_dict = {'full':{'SUBARU-10_2-1-W-J-B': 0.16128103741856098, 'SUBARU-10_2-1-W-C-RC': 0.0, 'SUBARU-10_2-1-W-S-Z+': 0.011793588122789772, 'MEGAPRIME-10_2-1-u': 0.060291451932493148, 'SUBARU-10_2-1-W-C-IC': 0.0012269407091880637, 'SUBARU-10_2-1-W-J-V': 0.013435398732369786}}
''' get catalog filters '''
import do_multiple_photoz
filterlist = do_multiple_photoz.get_filters(catalog,'OBJECTS')
print filterlist
filterlist.sort()
print filterlist
reload(do_multiple_photoz)
import pylab
print catalog
table = pyfits.open(catalog)[1].data[:]
print catalog, 'catalog'
alpha = [table.field('ALPHA_J2000')[0]]
delta = [table.field('DELTA_J2000')[0]]
import utilities
gallong, gallat = utilities.convert_to_galactic(alpha, delta)
ebv = utilities.getDust(alpha,delta)
extinct = {}
for filt in filterlist:
extinct[filt] = utilities.getExtinction(filt) * ebv[0]
print extinct
print ebv, 'ebv', alpha, delta, gallong, gallat
#location = os.environ['sne'] + '/photoz/' + cluster + '/SLRplots/'
if location is None:
location = os.environ['sne'] + '/photoz/' + cluster + '/SLRplots/'
print 'deleting old plots'
os.system('rm ' + location + '/*')
os.system('mkdir -p ' + location)
print 'finished deleting old plots'
import pickle
f = open('maglocus_SYNTH','r')
m = pickle.Unpickler(f)
locus_mags = m.load()
#import pickle
#f = open('maglocus_SYNTH','r')
#m = pickle.Unpickler(f)
locus_pairs = get_locus() #m.load()
if True:
''' assign locus color to each instrument band '''
instrument_to_locus = {}
for filt in filterlist:
a_short = filt.replace('+','').replace('C','')[-1]
print filt, a_short
ok = True
if string.find(filt,'WHT') != -1:
a_short = 'WHT' + a_short.upper()
elif string.find(filt,'MEGAPRIME') != -1:
a_short = 'MP' + a_short.upper() + 'SUBARU'
elif string.find(filt,'SUBARU') != -1:
if string.find(filt,"W-S-") != -1:
a_short = 'WS' + a_short.upper() + 'SUBARU'
else:
a_short = a_short.upper() + 'JOHN'
if string.find(filt,'-2-')==-1 and not (string.find(filt,'MEGAPRIME') != -1 and filt[-1] == 'u') and not (string.find(filt,'WHT') != -1 and filt[-1] == 'U'):
instrument_to_locus[filt] = a_short
print instrument_to_locus
#instrument_to_locus = {'u':'U'+DET,'g':'G'+DET,'r':'R'+DET,'i':'I'+DET,'z':'Z'+DET,'JCAT':'JTMASS'}
''' figure out the filter to hold '''
list = ['SUBARU-10_1-1-W-C-RC','SUBARU-10_2-1-W-C-RC','MEGAPRIME-0-1-r','SUBARU-10_2-1-W-S-R+','SUBARU-9-4-W-C-RC','SUBARU-10_2-1-W-S-I+',]
for filt in list:
if filt in filterlist:
hold_all = filt
break
''' THROWING OUT ALL 10_*_2 chips '''
def f(x): return x!=hold_all and not (string.find(x,'-2-') != -1 and string.find(x,'10')!=-1) and not (string.find(x,'MEGAPRIME') != -1 and x[-1] == 'u') and not (string.find(x,'WHT') != -1 and string.find(x,'U') != -1) #and string.find(x,'10_3') == -1
vary_list = filter(f, filterlist)
print vary_list, filterlist
#vary_list = ['SUBARU-10_3-1-W-J-V','SUBARU-10_2-1-W-S-Z+','SUBARU-10_2-1-W-C-IC','SUBARU-10_3-1-W-C-IC','MEGAPRIME-10_2-1-i','MEGAPRIME-10_2-1-z']
print vary_list
moststarfilts, good_star_nums = do_multiple_photoz.figure_out_slr_chip(vary_list+[hold_all],catalog,'OBJECTS')
print moststarfilts
#vary_list = ['SUBARU-10_3-1-W-J-V','SUBARU-10_2-1-W-S-Z+','SUBARU-10_2-1-W-C-IC','MEGAPRIME-10_2-1-i']
print vary_list
hold_all
#while
''' designate which filter zeropoint to be held constant when matching bands '''
combos = [{'hold':hold_all,'vary':vary_list}]
zps_dict_all = {}
def update_zps(zps_dict_all,results):
if not combo['hold'] in zps_dict_all:
zps_dict_all[combo['hold']] = 0.
for key in combo['vary']:
zps_dict_all[key] = zps_dict_all[combo['hold']] + results['full'][key]
return zps_dict_all
for combo in combos:
results = fit(table, combo, instrument_to_locus, magtype, locus_mags, locus_pairs, min_err, bootstrap=False, startingzps=None, plotdir=location, pre_zps=None, gallat=gallat, extinct=extinct)
print results
zps_dict_all = update_zps(zps_dict_all,results)
''' finally fit all bands at once '''
#combo = {'hold':'JCAT','vary':['u','g','r','i','z']}
#results = fit(table, combo, instrument_to_locus, magtype, locus_mags, min_err, startingzps=zps_dict_all, bootstrap=True, plotdir=location, pre_zps=None,gallat=gallat)
#zps_dict_all = update_zps(zps_dict_all,results)
#print zps_dict_all
if False:
''' assign locus color to each instrument band '''
DET = 'SDSS'
magtype='APER1'
instrument_to_locus = {'SDSS_u':'U'+DET,'SDSS_g':'G'+DET,'SDSS_r':'R'+DET,'SDSS_i':'I'+DET,'SDSS_z':'Z'+DET,'JCAT':'JTMASS'}
''' designate which filter zeropoint to be held constant when matching bands '''
combos = [{'hold':'SDSS_z','vary':['SDSS_r','SDSS_i']},{'hold':'SDSS_r','vary':['SDSS_u','SDSS_g']}]
zps_dict_all = {}
def update_zps(zps_dict_all,results):
if not combo['hold'] in zps_dict_all:
zps_dict_all[combo['hold']] = 0.
for key in combo['vary']:
zps_dict_all[key] = zps_dict_all[combo['hold']] + results['full'][key]
return zps_dict_all
if True:
''' first fit combinations of three bands'''
for combo in combos:
results = fit(table, combo, instrument_to_locus, magtype, locus_mags, locus_pairs, min_err, bootstrap=False,plotdir=location, pre_zps=False)
print results
zps_dict_all = update_zps(zps_dict_all,results)
''' finally fit all bands at once '''
combo = {'hold':'SDSS_z','vary':['SDSS_u','SDSS_g','SDSS_r','SDSS_i']}
results = fit(table, combo, instrument_to_locus, magtype, locus_mags, locus_pairs, min_err, startingzps=zps_dict_all, bootstrap=True, plotdir=location, pre_zps=False, extinct=extinct)
zps_dict_all = update_zps(zps_dict_all,results)
print zps_dict_all
#zps_dict_all = {'SUBARU-10_2-1-W-J-B': 0.16128103741856098, 'SUBARU-10_2-1-W-C-RC': 0.0, 'SUBARU-10_2-1-W-S-Z+': 0.011793588122789772, 'MEGAPRIME-10_2-1-u': 0.060291451932493148, 'SUBARU-10_2-1-W-C-IC': 0.0012269407091880637, 'SUBARU-10_2-1-W-J-V': 0.013435398732369786}
#zps_dict_all['SUBARU-10_2-1-W-C-RC'] = -99
print zps_dict_all
#for key in zps_dict_all.keys():
print zps_dict_all.keys(),
''' select chips w/ most stars '''
for key in moststarfilts:
print key
offset_list_file.write('DUMMY ' + moststarfilts[key] + ' ' + str(zps_dict_all[moststarfilts[key]]) + ' 0\n')
#offset_list_file.write('DUMMY ' + key + ' ' + str(-99) + ' 0\n')
offset_list_file.close()
''' record all ZPs and numbers of stars '''
for key in zps_dict_all.keys():
complete_offset_list_file.write('DUMMY ' + key + ' ' + str(zps_dict_all[key]) + ' ' + str(good_star_nums[key]) + '\n')
complete_offset_list_file.close()
if magtype == 'APER1': aptype='aper'
elif magtype == 'ISO': aptype='iso'
save_slr_flag = photocalibrate_cat_flag = '--spec mode=' + magtype
print 'running save_slr'
command = './save_slr.py -c %(cluster)s -i %(catalog)s -o %(offset_list)s %(save_slr_flag)s' % {'cluster':cluster, 'catalog':catalog, 'offset_list':offset_list, 'save_slr_flag':save_slr_flag}
print command
os.system(command)
if False:
slr_catalog_dir = subarudir + '/' + cluster + '/PHOTOMETRY_' + DETECT_FILTER + aptype + '/'
slr_catalog_dir_iso = subarudir + '/' + cluster + '/PHOTOMETRY_' + DETECT_FILTER + '_iso/'
photocalibrate_cat_flag = '--spec mode=' + magtype
all_phot_cat = slr_catalog_dir + '/' + cluster + '.unstacked.cat'
all_phot_cat_iso = slr_catalog_dir_iso + '/' + cluster + '.unstacked.cat'
slr_out = slr_catalog_dir + '/' + cluster + '.slr.cat'
slr_out_iso = slr_catalog_dir_iso + '/' + cluster + '.slr.cat'
print 'running photocalibrate_cat'
command = './photocalibrate_cat.py -i %(all_phot_cat_iso)s -c %(cluster)s -o %(slr_out_iso)s -t slr %(photocalibrate_cat_flag)s' % {'cluster':cluster, 'all_phot_cat_iso':all_phot_cat, 'slr_out_iso':slr_out_iso, 'photocalibrate_cat_flag':photocalibrate_cat_flag}
os.system(command)
command = './photocalibrate_cat.py -i %(all_phot_cat)s -c %(cluster)s -o %(slr_out)s -t slr %(photocalibrate_cat_flag)s' % {'cluster':cluster, 'all_phot_cat':all_phot_cat, 'slr_out':slr_out, 'photocalibrate_cat_flag':photocalibrate_cat_flag}
os.system(command)
print 'finished'
import calc_test_save
calc_test_save.photocalibrate(cluster)
#for band in [['r','i','u','g'],['g','r','i','z'],['g','r','u','g'],['r','i','i','z'],['i','JCAT','i','z']]:
# plot(table,zps_dict_all,instrument_to_locus,magtype,locus_c, min_err,band,location)
#return results
def plot(table,zplist,instrument_to_locus,magtype,locus_c, min_err,bands,location, alt_locus_c=None):
b1,b2,b3,b4 = bands
import pylab
pylab.clf()
if alt_locus_c:
if instrument_to_locus[b1]+'_'+instrument_to_locus[b2] in alt_locus_c and instrument_to_locus[b3]+'_'+instrument_to_locus[b4] in alt_locus_c:
print [instrument_to_locus[a] for a in [b1,b2,b3,b4]]
pylab.scatter(alt_locus_c[instrument_to_locus[b1]+'_'+instrument_to_locus[b2]],alt_locus_c[instrument_to_locus[b3]+'_'+instrument_to_locus[b4]],color='green')
if instrument_to_locus[b1]+'_'+instrument_to_locus[b2] in locus_c and instrument_to_locus[b3]+'_'+instrument_to_locus[b4] in locus_c:
print [instrument_to_locus[a] for a in [b1,b2,b3,b4]]
pylab.scatter(locus_c[instrument_to_locus[b1]+'_'+instrument_to_locus[b2]],locus_c[instrument_to_locus[b3]+'_'+instrument_to_locus[b4]],color='red')
else:
print '\n\n\n********************'
print b1 +'-'+b2 + ' and ' + b3 + '-' + b4 + ' not both locus color'
print 'possible locus bands:'
print locus_c.keys()
return
x1 = table.field('MAG_' + magtype + '_reg_' + b1)
x2 = table.field('MAG_' + magtype + '_reg_' + b2)
x1_err = table.field('MAGERR_' + magtype + '_reg_' + b1)
x2_err = table.field('MAGERR_' + magtype + '_reg_' + b2)
x = x1 -zplist[b1] - (x2 - zplist[b2])
x1_err[x1_err<min_err] = min_err
x2_err[x2_err<min_err] = min_err
x_err = (x1_err**2.+x2_err**2.)**0.5
y1 = table.field('MAG_' + magtype + '_reg_' + b3)
y2 = table.field('MAG_' + magtype + '_reg_' + b4)
y1_err = table.field('MAGERR_' + magtype + '_reg_' + b3)
y2_err = table.field('MAGERR_' + magtype + '_reg_' + b4)
y1_err[y1_err<min_err] = min_err
y2_err[y2_err<min_err] = min_err
y = y1 -zplist[b3] - (y2 - zplist[b4])
y_err = (y1_err**2.+y2_err**2.)**0.5
import scipy
good = scipy.array(abs(x1)<90) * scipy.array(abs(x2)<90) * scipy.array(abs(y1)<90) * scipy.array(abs(y2)<90)
pylab.scatter(x[good],y[good])
pylab.errorbar(x[good],y[good],xerr=x_err[good],yerr=y_err[good],fmt=None)
pylab.xlabel(b1 + '-' + b2,fontsize='x-large')
pylab.ylabel(b3 + '-' + b4,fontsize='x-large')
os.system('mkdir -p ' + location)
file = location + '/SLR'+b1+b2+b3+b4 +'.png'
print file
pylab.savefig(file)
#pylab.show()
#pylab.savefig('/Users/pkelly/Dropbox/plot.pdf')
def fit(table, combo_dict, instrument_to_locus, magtype, locus_mags, locus_pairs,
min_err=0.02,
min_bands_per_star=3,
startingzps=None,
plot_iteration_increment=50,
max_err=0.1,
bootstrap=False,
bootstrap_num=0,
plotdir='.',
save_bootstrap_plots=False,
live_plot=True,
pre_zps=None,
gallat=None,
extinct=None):
import string, re, pyfits, random, scipy, pylab
from copy import copy
if live_plot:
pylab.ion()
zps ={}
for i in range(len(combo_dict['vary'])):
zps[combo_dict['vary'][i]] = i
number_locus_points = len(locus_mags)
number_all_stars = len(table.field('MAG_' + magtype + '-' + instrument_to_locus.keys()[0]))
''' for each point in locus, make a list of the locus in each color (locus has same number of points in each color) '''
''' just a rearrangement '''
locus_list = []
for j in range(number_locus_points):
o = []
for c in instrument_to_locus.values():
o.append(locus_mags[j][c])
locus_list.append(o)
results = {}
if bootstrap:
cycles = ['full'] + ['bootstrap' + str(i) for i in range(bootstrap_num)]
else:
cycles = ['full']
for iteration in cycles:
''' make matrix with a full set of locus points for each star '''
locus_matrix = scipy.array(number_all_stars*[locus_list])
print locus_matrix.shape
''' assemble matricies to make instrumental measured bands '''
print instrument_to_locus.keys()
A_band = scipy.swapaxes(scipy.swapaxes(scipy.array(number_locus_points*[[table.field('MAG_' + magtype + '-' + a)[:] for a in instrument_to_locus.keys()]]),0,2),1,2)
n = len(table.field('MAG_' + magtype + '-' + instrument_to_locus.keys()[0]))
def isitJ(name):
import string
if string.find(name,'JCAT') != -1:
return scipy.ones(n)
else:
return scipy.zeros(n)
A_err = scipy.swapaxes(scipy.swapaxes(scipy.array(number_locus_points*[[table.field('MAGERR_' + magtype + '-' + a)[:] for a in instrument_to_locus.keys()]]),0,2),1,2)
print A_err.shape
''' only use stars with errors less than max_err '''
if True:
mask = A_err > max_err
#mask[A_band_J == 1] = 0
mask[A_err > 1.5] = 1
A_band[mask] = 99
''' make matrix specifying good values '''
good = scipy.ones(A_band.shape)
#A_band[abs(A_FLAG) != 0] = 99
#A_band[abs(A_IMAFLAG) != 0] = 99
good[abs(A_band) == 99] = 0
good[abs(A_band) == 0] = 0
good = good[:,0,:]
good_bands_per_star = good.sum(axis=1) # sum all of the good bands for any given star
print good_bands_per_star , A_band.shape
''' figure out the cut-off '''
A_band = A_band[good_bands_per_star>=min_bands_per_star]
A_err = A_err[good_bands_per_star>=min_bands_per_star]
A_err[A_err<min_err] = min_err
locus_matrix = locus_matrix[good_bands_per_star>=min_bands_per_star]
''' if a bootstrap iteration, bootstrap with replacement '''
if string.find(iteration,'bootstrap') != -1:
length = len(A_band)
random_indices = []
unique_indices = {}
for e in range(length):
index = int(random.random()*length - 1)
unique_indices[index] = 'yes'
random_indices.append(index)
print random_indices, len(unique_indices.keys())
A_band = scipy.array([A_band[i] for i in random_indices])
A_err = scipy.array([A_err[i] for i in random_indices])
locus_matrix = scipy.array([locus_matrix[i] for i in random_indices])
bands = A_band
bands_err = A_err
''' set errors on bad measurements (value=+-99) equal to 100000. and bands equal to 0 '''
bands_err[abs(A_band) == 99] = 1000.
bands[abs(A_band) == 99] = 0.
print bands.shape, locus_matrix.shape
number_good_stars = len(locus_matrix)
''' update good matrix after masking '''
good = scipy.ones(A_band.shape)
good[abs(A_band) == 99] = 0
good[abs(A_band) == 0] = 0
global itr
itr = 0
keep_fitting = True
outliers = 'no outlier rejection'
while keep_fitting:
def errfunc(pars,residuals=False,savefig=None):
global itr
stat_tot = 0
zp_bands = scipy.zeros((number_good_stars,number_locus_points,len(instrument_to_locus.keys())))
for i in range(len(instrument_to_locus.keys())):
a = instrument_to_locus.keys()[i]
zp_bands[:,:,i] = assign_zp(a,pars,zps)
print zp_bands.shape, bands.shape, locus_matrix.shape, good.shape, number_good_stars, number_locus_points
num_prelim = (bands - locus_matrix + zp_bands) / bands_err**2.
num_prelim[good == 0] = 0.
num = (num_prelim.sum(axis=2))
denom_prelim = 1. / bands_err**2.
denom_prelim[good == 0] = 0.
denom = (denom_prelim.sum(axis=2))
mean = num / denom
mean_array = scipy.dstack(len(instrument_to_locus.keys())*[mean])
ds_prelim = (bands - locus_matrix + zp_bands - mean_array)**2. #/ ds_err**2.
ds_prelim[good == 0] = 0
''' calculate reduced chi squared '''
ds = ds_prelim.sum(axis=2)**0.5
resid_prelim = (bands - locus_matrix + zp_bands - mean_array )**2. / bands_err**2.
plot = (bands -locus_matrix + zp_bands - mean_array )
resid_prelim[good == 0] = 0
''' calculate reduced chi squared '''
resid = resid_prelim.sum(axis=2) / good.sum(axis=2)
if False: #live_plot and iteration is 'full' and (itr % plot_iteration_increment == 0 or savefig is not None):
id = 100
for id in [10,20,30,40,100,150]:
best = 99999999999999999999999999999999999999999999999999999999
for i in range(len(resid[id])):
if resid[id][i] < best:
star = i
best = resid[id][i]
pylab.clf()
pylab.scatter(range(len(plot[id][star])),plot[id][star])
#pylab.scatter(range(len(plot[id][0])),locus_matrix[id][0])
pylab.errorbar(range(len(plot[id][star])),plot[id][star],yerr=bands_err[id][star])
pylab.ylim([-0.4,0.4])
pylab.draw()
pylab.draw()
print combo_dict['vary'], resid_prelim[id][star], (bands - locus_matrix + zp_bands - mean_array)[id][star], bands_err[id][star], good[id][star], resid[id], resid[id][star]
#print (bands - locus_matrix + zp_bands - mean_array )[10], bands_err[10]
''' these two are not necessarily the same star '''
dist = ds.min(axis=1)
select_diff = resid.min(axis=1)
#for i in range(len(ds.min(axis=0))):
# print i
# print len(ds[0]), len(ds.min(axis=0))
# print ds[0][i]
# print ds.min(axis=1)[i]
#print 'end of locus', end_of_locus, ds.min(axis=1), ds[0]
stat_tot = select_diff.sum()
print 'ZPs', dict(zip([combo_dict['hold']]+combo_dict['vary'],([0.] + ['%.6f' % a for a in pars.tolist()])))
print len(bands), 'stars'
redchi = stat_tot / float(len(bands) - 1)
print 'chi^2', '%.5f' % stat_tot,
print 'red chi^2', '%.5f' % redchi
print 'iteration', itr
if live_plot and iteration is 'full' and (itr % plot_iteration_increment == 0 or savefig is not None):
#savefig = None
plot_progress(pars,stat_tot,savefig)
itr += 1
if residuals:
end_of_locus = scipy.array([reduce(lambda x,y: x*y, [resid.min(axis=1)[i] != resid[i][x] for x in range(5)]) for i in range(len(ds.min(axis=1)))])
print end_of_locus
return select_diff, dist, redchi, end_of_locus, len(bands)
else: return stat_tot
def plot_progress(pars,stat_tot=None,savefig=None):
import pylab, scipy
zp_bands = scipy.zeros((number_good_stars,number_locus_points,len(instrument_to_locus.keys())))
for i in range(len(instrument_to_locus.keys())):
a = instrument_to_locus.keys()[i]
zp_bands[:,:,i] = assign_zp(a,pars,zps)
if pre_zps:
#pre_zp_A = scipy.swapaxes(scipy.swapaxes(scipy.array(number_locus_points*[number_good_stars*[[pre_zps[a[0][0]] for a in complist]]]),0,1),0,0)
#pre_zp_B = scipy.swapaxes(scipy.swapaxes(scipy.array(number_locus_points*[number_good_stars*[[pre_zps[a[1][0]] for a in complist]]]),0,1),0,0)
#pre_zp_bands = pre_zp_A - pre_zp_B
pre_zp_bands = scipy.swapaxes(scipy.swapaxes(scipy.array(number_locus_points*[number_good_stars*[[assign_zp(a[0],pars,pre_zps) for a in instrument_to_locus.keys()]]]),0,1),0,0)
pre_zp_bands = scipy.zeros((number_good_stars,number_locus_points,len(pre_zpz)))
for i in range(len(pre_zps)):
a = pre_zps[i]
zp_bands[:,:,i] = assign_zp(a[0][0],pars,zps)-assign_zp(a[1][0],pars,zps)
oa = instrument_to_locus.keys()
if savefig is not None:
#index_list = zip([int(x) for x in 2*scipy.arange(len(complist)/2)],[int(x) for x in 2*scipy.arange(len(complist)/2)+scipy.ones(len(complist)/2)])
#if len(complist) > 2*(len(complist)/2):
# index_list.append([len(complist)-2,len(complist)-1])
#print index_list
index_list = []
print instrument_to_locus.keys()
rng = range(len(instrument_to_locus.keys()))
for a in rng: #range(len(complist)):
for b in rng: #range(len(complist)):
if a < b:
for c in rng: #range(len(complist)):
if b <= c:
for d in rng: #range(len(complist)):
if c < d:
#if c < d and c > b:
index_list.append([[oa[a],oa[b]],[oa[c],oa[d]]])
print index_list #, range(len(complist)), complist
index_list += [[['SUBARU-10_3-1-W-J-V','SUBARU-10_2-1-W-S-Z+'],['SUBARU-10_2-1-W-C-IC','MEGAPRIME-10_2-1-i']]]
index_list = index_list[:]
#index_list = [[['SUBARU-10_2-1-W-C-RC''SUBARU-10_2-1-W-S-Z+'],['SUBARU-10_2-1-W-S-Z+','SUBARU-10_3-1-W-J-V']]]
else:
#index_list = [[[complist[1][0],complist[2][0]],[complist[3][0],complist[4][0]]]]
index_list = [[['MEGAPRIME-10_2-1-g','SUBARU-10_2-1-W-C-RC'],['SUBARU-10_2-1-W-C-RC','MEGAPRIME-10_2-1-z']]]
index_list = [[['SUBARU-10_2-1-W-C-RC','SUBARU-10_2-1-W-S-Z+'],['SUBARU-10_2-1-W-S-Z+','SUBARU-10_3-1-W-J-V']]]
index_list = [[['SUBARU-10_3-1-W-J-V','SUBARU-10_2-1-W-S-Z+'],['SUBARU-10_2-1-W-C-IC','MEGAPRIME-10_2-1-i']]]
index_list = [[[oa[0],oa[1]],[oa[1],oa[2]]]]
if savefig:
print index_list
print index_list
print instrument_to_locus
def ind(filt):
for j in range(len(instrument_to_locus.keys())):
if instrument_to_locus.keys()[j] == filt:
return j
for [c1_band1, c1_band2], [c2_band1,c2_band2] in index_list:
print instrument_to_locus.values()
print ind(c1_band1), ind(c1_band2)
print ind(c2_band1), ind(c2_band2)
print c2_band1, c2_band2
if ind(c1_band1) is not None and ind(c1_band2) is not None and ind(c2_band1) is not None and ind(c2_band2) is not None:
x_color = scipy.array(bands + zp_bands)[:,0,ind(c1_band1)] - scipy.array(bands + zp_bands)[:,0,ind(c1_band2)]
print ind(c2_band1), ind(c2_band2)
y_color = (bands + zp_bands)[:,0,ind(c2_band1)] - (bands + zp_bands)[:,0,ind(c2_band2)]
if pre_zps:
pre_x_color = scipy.array((bands + pre_zp_bands)[:,0,color1_index].tolist())
pre_y_color = (bands + pre_zp_bands)[:,0,color2_index]
x_err_1 = (bands_err)[:,0,ind(c1_band1)]
x_err_2 = (bands_err)[:,0,ind(c1_band2)]
y_err_1 = (bands_err)[:,0,ind(c2_band1)]
y_err_2 = (bands_err)[:,0,ind(c2_band2)]
mask = (x_err_1<100)*(x_err_2<100)*(y_err_1<100)*(y_err_2<100)
x_color = x_color[mask]
y_color = y_color[mask]
x_err = (x_err_1**2. + x_err_2**2.)**0.5
y_err = (y_err_1**2. + y_err_2**2.)**0.5
y_err = y_err[mask]
x_err = x_err[mask]
if pre_zps:
pre_x_color = pre_x_color[mask]
pre_y_color = pre_y_color[mask]
print len(x_color), len(x_color)
pylab.clf()
x_a = c1_band1 #complist[color1_index][0][0]
x_b = c1_band2 #complist[color1_index][1][0]
y_a = c2_band1 #complist[color2_index][0][0]
y_b = c2_band2 #complist[color2_index][1][0]
print extinct, x_a
x_extinct = extinct[x_a] - extinct[x_b]
y_extinct = extinct[y_a] - extinct[y_b]
x_color_name = x_a + '-' + x_b
y_color_name = y_a + '-' + y_b
pylab.xlabel(x_color_name,fontsize='x-large')
pylab.ylabel(y_color_name,fontsize='x-large')
print x_color_name, y_color_name, x_color, y_color, x_err, y_err
if len(x_color) and len(y_color):
pylab.errorbar(x_color,y_color,xerr=x_err,yerr=y_err,fmt=None,ecolor='gray')
pylab.errorbar(x_color,y_color,xerr=0,yerr=0,fmt=None,marker='s',
mfc='re d', mec='green', ms=1, mew=1)
#pylab.scatter(x_color,y_color,s=0.1)
c1_locus = locus_matrix[0,:,ind(c1_band1)] - locus_matrix[0,:,ind(c1_band2)]
c2_locus = locus_matrix[0,:,ind(c2_band1)] - locus_matrix[0,:,ind(c2_band2)]
pylab.errorbar(c1_locus,c2_locus,xerr=0,yerr=0,color='red')
if pre_zps:
pylab.errorbar(pre_x_color,pre_y_color,xerr=x_err,yerr=y_err,fmt=None,c='green')
pylab.scatter(pre_x_color,pre_y_color,c='green')
#print locus_matrix[0,:,color1_index][0]
pylab.arrow(c1_locus[0],c2_locus[-1],x_extinct,y_extinct,width=0.01,color='black')
if stat_tot is not None:
pylab.title('N=' + str(len(x_color)) + ' chi$^{2}$=' + ('%.1f' % stat_tot) + ' ' + iteration + ' ' + outliers + ' LAT=' + ('%.1f' % gallat))
if live_plot:
pylab.draw()
fit_band_zps = reduce(lambda x,y: x + y, [z[-2:].replace('C','').replace('-','') for z in [combo_dict['hold']] + combo_dict['vary']])
''' only save figure if savefig is not None '''
if savefig is not None:
if (string.find(iteration,'bootstrap')==-1 or save_bootstrap_plots):
file = plotdir + '/' + fit_band_zps + '_' + x_color_name + '_' + y_color_name + '_' + savefig.replace(' ','_')
command = 'mkdir -p ' + plotdir
print command
os.system(command)
pylab.savefig(file)
#pylab.show()
#def median_starting_zp():
#for key in combo_dict['vary']:
# median = instrument_to_locus[key]
if outliers == 'no outlier rejection':
''' starting guess for zeropoint : median hold instrumental magnitude - median hold locus magnitude '''
print A_band.shape
pinit = []
for i in range(1,len(instrument_to_locus.keys())):
key = instrument_to_locus.keys()[i]
key_hold = instrument_to_locus.keys()[0]
print i, A_band.shape, len(instrument_to_locus)
diff = A_band[:,0,i] - A_band[:,0,0]
good_diff = good[:,0,i] + good[:,0,0]
diff = diff[good_diff == 2]
median_instrumental = scipy.median(diff)
locus_here = [locus_mags[x][instrument_to_locus[key]] - locus_mags[x][instrument_to_locus[key_hold]] for x in range(len(locus_mags))]
median_locus = scipy.median(locus_here)
pinit.append(median_locus - median_instrumental)
print pinit
if True:
if iteration == 'full':
#print startingzps.keys()
if startingzps is None:
pinit = scipy.zeros(len(combo_dict['vary']))
else:
pinit = []
for key in combo_dict['vary']:
try1 = key.replace('10_2','').replace('10_1','').replace('10_3','').replace('9-4','')
try2 = key.replace('10_2','').replace('10_1','').replace('10_3','').replace('9-4','') + '-1'
if startingzps.has_key(key):
val = startingzps[key]
elif startingzps.has_key(try1):
val = startingzps[try1]
elif startingzps.has_key(try2):
val = startingzps[try2]
pinit.append(val)
else:
import random
''' add random offset of 1.0 mag '''
pinit = [results['full'][key] + random.random()*1.0 for key in combo_dict['vary']]
from scipy import optimize
out = scipy.optimize.fmin(errfunc,pinit,maxiter=100000,args=(),ftol=0.00001,xtol=0.00001) #,gtol=0.001)
if iteration is 'full':
errfunc(out,savefig=iteration+'_'+outliers+'.png')
print out
import scipy
print 'starting'
residuals,dist,redchi,end_of_locus, num = errfunc(pars=[0.] + out,residuals=True)
print dist
print 'finished'
print 'bands' , len(bands)
''' first filter on distance '''
bands = bands[dist < 1.5]
bands_err = bands_err[dist < 1.5]
locus_matrix = locus_matrix[dist < 1.5]
good = good[dist < 1.5]
residuals = residuals[dist < 1.5]
end_of_locus = end_of_locus[dist < 1.5]
print end_of_locus
print bands.shape
print dist.shape, residuals.shape
if True:
''' filter on residuals '''
print bands.shape, residuals.shape
bands = bands[residuals < 6]
bands_err = bands_err[residuals < 6]
locus_matrix = locus_matrix[residuals < 6]
good = good[residuals < 6]
end_of_locus = end_of_locus[residuals < 6]
if True:
''' filter on end of locus '''
bands = bands[end_of_locus]
bands_err = bands_err[end_of_locus]
locus_matrix = locus_matrix[end_of_locus]
good = good[end_of_locus]
print number_good_stars, len(locus_matrix)
print len(filter(lambda x: x is False,end_of_locus.tolist()))
if number_good_stars > len(locus_matrix) or len(filter(lambda x: x is False,end_of_locus.tolist())) > 0:
print 'REFITTING AFTER REMOVING ' + str(number_good_stars - len(locus_matrix) ) + ' OUTLIERS AND STARS MATCHING BLUE END OF LOCUS'
number_good_stars = len(locus_matrix)
print 'bands' , len(bands)
print bands.shape, locus_matrix.shape
pinit = out #scipy.zeros(len(zps_list))
outliers = 'outliers removed'
if False:
pinit = scipy.array(out) + scipy.array([random.random()*1.0 for p in pinit])
pinit = out #scipy.zeros(len(zps_list))
from scipy import optimize
out = scipy.optimize.fmin(errfunc,pinit,args=())
residuals,dist,redchi,end_of_locus, num = errfunc(out,savefig=iteration+'_'+outliers+'.png',residuals=True)
print out
else:
print 'NO OUTLYING STARS OR STARS MATCHING BLUE END OF LOCUS, PROCEEDING'
keep_fitting = False
results[iteration] = dict(zip([combo_dict['hold']]+combo_dict['vary'],([0.] + out.tolist())))
mask = bands_err < 100
results['redchi'] = redchi
results['num'] = num
print results
errors = {}
bootstraps = {}
import scipy
print 'BOOTSTRAPPING ERRORS:'
for key in [combo_dict['hold']] + combo_dict['vary']:
l = []
for r in results.keys():
if r != 'full' and r != 'redchi' and r != 'num':
l.append(results[r][key])
print key+':', scipy.std(l), 'mag'
errors[key] = scipy.std(l)
if bootstrap_num > 0 and len(l) > 0:
bootstraps[key] = reduce(lambda x,y: x + ',' + y, [str(z) for z in l])
else: bootstraps[key] = 'None'
results['bootstraps'] = bootstraps
results['errors'] = errors
results['bootstrapnum'] = bootstrap_num
if False:
def save_results(save_file,results,errors):
f = open(save_file,'w')
for key in results['full'].keys():
f.write(key + ' ' + str(results['full'][key]) + ' +- ' + str(errors[key]) + '\n')
f.close()
import pickle
f = open(save_file + '.pickle','w')
m = pickle.Pickler(f)
pickle.dump({'results':results,'errors':errors},m)
f.close()
if results.has_key('full') and save_results is not None: save_results(save_file,results, errors)
return results
#@entryExit
def sdss(run,night,snpath,name=None):
import pylab, pyfits, commands
input_cat = '/Volumes/mosquitocoast/patrick/kpno/' + run +'/' + night + '/' + snpath + '/stars.fits'
p = pyfits.open(input_cat)[1].data
#pylab.scatter(p.field('psfMag_g') - p.field('psfMag_r'),p.field('MAG_APER_u') - p.field('psfMag_u'))
#pylab.errorbar(x[good],y[good],xerr=x_err[good],yerr=y_err[good],fmt=None)
#pylab.show()
import transform_filts, scipy
kit = get_kit()
det = 'T2KB'
print kit.keys()
aptype = 'psfMag_' #'MAG_APERCORR-SDSS_'
aptype_err = 'psfMagErr_' #'MAGERR_APERCORR-SDSS_'
for mag in ['APERCORR','APERDUST']:
cat_aptype = 'MAG_' + mag + '-' #'psfMag_'
cat_aptype_err = 'MAGERR_' + mag + '-' #'psfMagErr_'
for filt in ['u','g','r','i','z']:
running = p.field(aptype + 'g') - p.field(aptype + 'i')
x = p.field('ra')[running==0.47440300000000235]
y = p.field('dec')[running==0.47440300000000235]
#print x,y
variation=transform_filts.apply_kit(running,kit[filt.upper() + det])
print variation
calibrated = p.field(aptype + filt) + variation
uncalibrated = p.field(cat_aptype + filt)
error = (p.field(aptype_err + filt)**2. + p.field(cat_aptype_err + filt)**2.)**0.5
mask= (error < 0.1) * (p.field('FLAGS-' + filt) == 0) * (p.field('IMAFLAGS_ISO-' + filt) == 0.)
#mask *= (error < 0.1) * (p.field('FLAGS-SDSS_' + filt) == 0) * (p.field('IMAFLAGS_ISO-SDSS_' + filt) == 0.)
mask *= (p.field('FLAGS-g') == 0) * (p.field('IMAFLAGS_ISO-g') == 0.)
mask *= (p.field('FLAGS-i') == 0) * (p.field('IMAFLAGS_ISO-i') == 0.)
#mask *= p.field('FLAGS_SDSS') == 0
print mask
running = running[mask]
calibrated = calibrated[mask]
uncalibrated = uncalibrated[mask]
error = error[mask]
#print running, p.field('psfMag_g'), p.field('psfMag_i')
#print sorted(running)
#print p.field('SDSS_NEIGHBORS'), p.field('psfMag_g')
error[error < 0.02] = 0.02
print calibrated
def compute(cal_sample, uncal_sample, error_sample):
zp = scipy.average(cal_sample - uncal_sample,weights=1./error_sample**2.)
zp = scipy.median(cal_sample - uncal_sample)
mask = abs(cal_sample- uncal_sample-zp)/error_sample < 6.
cal_sample= cal_sample[mask]
uncal_sample= uncal_sample[mask]
error_sample = error_sample[mask]
zp = scipy.average(cal_sample - uncal_sample,weights=1./error_sample**2.)
zp_med = scipy.median(cal_sample - uncal_sample)
return zp, zp_med
zps = []
for i in range(100):
import random
random_indices = []
unique_indices = {}
length = len(calibrated)
for e in range(length):
index = int(random.random()*length - 1)
unique_indices[index] = 'yes'
random_indices.append(index)
cal = scipy.array([calibrated[i] for i in random_indices])
uncal = scipy.array([uncalibrated[i] for i in random_indices])
err = scipy.array([error[i] for i in random_indices])
zp, zp_med = compute(cal,uncal,err)
zps.append(zp)
zp = scipy.mean(zps)
zp_err = scipy.std(zps)
pylab.clf()
pylab.title(str(zp) + ' +- ' + str(zp_err))
pylab.axhline(zp,c='red')
pylab.axhline(zp+zp_err,c='red')
pylab.axhline(zp-zp_err,c='red')
pylab.scatter(running,calibrated-uncalibrated)
pylab.errorbar(running,calibrated-uncalibrated,yerr=error,fmt=None)
tab = '/Volumes/mosquitocoast/patrick/kpno/' + run +'/' + night + '/' + snpath + '/sdss_stars' + filt + '.png'
print tab
pylab.savefig(tab)
pylab.savefig('/Users/pkelly/Dropbox/sdss' + filt + '.png')
pylab.clf()
pylab.title(str(zp) + ' +- ' + str(zp_err))
pylab.scatter(calibrated,uncalibrated-calibrated)
pylab.errorbar(calibrated,uncalibrated-calibrated,yerr=error,fmt=None)
tab = '/Volumes/mosquitocoast/patrick/kpno/' + run +'/' + night + '/' + snpath + '/bias_stars' + filt + '.png'
print tab
pylab.savefig(tab)
pylab.savefig('/Users/pkelly/Dropbox/bias_sdss' + filt + '.png')
#pylab.show()
image = '/Volumes/mosquitocoast/patrick/kpno/' + run +'/' + night + '/' + snpath + '/' + filt + '/reg.fits'
import scamp
name = 'reg'
print image, snpath, filt, name, run
reload(scamp).add_image(image,snpath,filt,name,run)
import MySQLdb
db2 = MySQLdb.connect(db='calib')
c = db2.cursor()
if mag=='APERCORR':
command = "UPDATE CALIB set SDSSZP=" + str(zp) + " WHERE SN='" + snpath + "' and FILT='" + filt + "' and NAME='" + name + "' and RUN='" + run + "'"
c.execute(command)
command = "UPDATE CALIB set SDSSZPERR=" + str(zp_err) + " WHERE SN='" + snpath + "' and FILT='" + filt + "' and NAME='" + name + "' and RUN='" + run + "'"
c.execute(command)
command = "UPDATE CALIB set SDSSNUM=" + str(len(calibrated)) + " WHERE SN='" + snpath + "' and FILT='" + filt + "' and NAME='" + name + "' and RUN='" + run + "'"
c.execute(command)
elif mag=='APERDUST':
command = "UPDATE CALIB set SDSSDUSTZP=" + str(zp) + " WHERE SN='" + snpath + "' and FILT='" + filt + "' and NAME='" + name + "' and RUN='" + run + "'"
print command
c.execute(command)
command = "UPDATE CALIB set SDSSDUSTZPERR=" + str(zp_err) + " WHERE SN='" + snpath + "' and FILT='" + filt + "' and NAME='" + name + "' and RUN='" + run + "'"
c.execute(command)
command = "UPDATE CALIB set SDSSDUSTNUM=" + str(len(calibrated)) + " WHERE SN='" + snpath + "' and FILT='" + filt + "' and NAME='" + name + "' and RUN='" + run + "'"
c.execute(command)
print filt, zp, zp_med
def plot_zp():
import MySQLdb, scipy
db2 = MySQLdb.connect(db='calib')
c = db2.cursor()
for filt in ['u','g','r','i','z']:
command = 'select JD, SLRZP, sn from calib where gallat is not null and slrzp is not null and filt="' + filt + '"' # and run="kpno_May2010"' #JD > 2455470'
#command = 'select JD, SLRZP, sn from calib where gallat is not null and slrzp is not null and filt="' + filt + '" and exptime=120'
c.execute(command)
results = c.fetchall()
print results
x = [float(a[0]) for a in results]
y = [float(a[1]) for a in results]
s = [(a[2][4:]) for a in results]
import pylab
pylab.clf()
for i in range(len(x)):
pylab.text(x[i],y[i],s[i],fontsize=8)
pylab.scatter(x,y)
pylab.title(filt)
pylab.savefig('/Users/pkelly/Dropbox/test' + filt + '.pdf')
#pylab.show()
def plot_detail(calibrate=False):
import MySQLdb, scipy
db2 = MySQLdb.connect(db='calib')
c = db2.cursor()
for filt in ['u','g','r','i','z']:
import pylab
pylab.clf()
def p(command,color):
import MySQLdb, scipy
db2 = MySQLdb.connect(db='calib')
c = db2.cursor()
c.execute(command)
results = c.fetchall()
print results
x = scipy.array([float(a[0]) for a in results])
y = scipy.array([float(a[1]) for a in results])
y_err = scipy.array([float(a[2]) for a in results])
s = [(a[3][4:]) for a in results]
for i in range(len(x)):
pylab.text(x[i]+0.01,y[i]+0.00,s[i],fontsize=8)
print x
if 1:
pylab.errorbar(x,y,y_err,fmt='ro',color=color)
pylab.scatter(x,y,c=color)
x_new = scipy.arange(1,3)
print len(x), len(y)
p = scipy.polyfit(x,y,1)
y_new = scipy.polyval(p, x_new)
pylab.plot(x_new,y_new, color='black')
A = scipy.vstack([x/y_err, scipy.ones(len(x))/y_err]).T
print A
from scipy import linalg
m,c = scipy.linalg.lstsq(A,y/y_err)[0]
print m,c
pylab.plot(x_new,m*x_new + c, color='blue')
print x_new, m*x_new
return m,c
run = 'kpno_Oct2010'
variable = 'airmass'
command = 'select b.' + variable + ', c.slrdustzp+b.RELZP, c.slrdustzperr, b.sn from calib as c join calib b on c.sn=b.sn and c.run=b.run and c.filt=b.filt where c.slrzp is not null and c.slrzperr is not null and c.slrnum > 10 and b.relzp is not null and c.filt="' + filt + '" and c.run="' + run + '" and c.slrzperr<8 and c.JD>2455475'
#p(command,'blue')
command = 'select b.' + variable + ', c.sdssdustzp+b.RELZP, c.sdssdustzperr, b.sn from calib as c join calib b on c.sn=b.sn and c.run=b.run and c.filt=b.filt where c.sdssdustzp is not null and c.sdsszperr is not null and c.sdssnum > 1 and b.relzp is not null and c.filt="' + filt + '" and c.run="' + run + '" and b.night=4297' # and c.JD>2455475'
print command
m_fit,c_fit = p(command,'red')
if calibrate:
#for filt in ['u','g','r','i','z']:
#command = 'select sn, airmass, sdssdustzp, run from calib where night=4297 and filt="' + filt + '" group by sn,filt'
command = 'select sn, airmass, sdssdustzp, run from calib where night=4353 and sn="sn1997ef" and filt="' + filt + '" group by sn,filt'
print command
c.execute(command)
results = c.fetchall()
print results
import string , os
for sn, airmass, sdssdustzp, run in results:
if not sdssdustzp:
sdssphotozp = m_fit*float(airmass) + c_fit
else:
sdssphotozp = float(sdssdustzp)
print sdssphotozp, sdssdustzp, sn
command = 'sethead ' + os.environ['kpno'] + '/' + run+ '/work_night/' + sn + '/' + filt + '/reg.fits SDSSPHOTOZP=' + str(sdssphotozp)
print command
os.system(command)
command = 'sethead ' + os.environ['kpno'] + '/' + run+ '/work_night/' + sn + '/' + filt + '/reg.sdss.fits SDSSPHOTOZP=' + str(sdssphotozp)
print command
os.system(command)
command = 'update calib set sdssphotozp=' + str(sdssphotozp) + ' where sn="' + sn + '" and run="' + run + '" and filt="' + filt + '"'
c.execute(command)
import anydbm
gh = anydbm.open(sn)
gh['sdssphotozp_' + filt ] = str(sdssphotozp)
import commands
gain = commands.getoutput('gethead ' + os.environ['kpno'] + '/' + run+ '/work_night/' + sn + '/' + filt + '/reg.fits GAIN')
detector = commands.getoutput('gethead ' + os.environ['kpno'] + '/' + run+ '/work_night/' + sn + '/' + filt + '/reg.fits DETECTOR')
gh['gain_' + filt + '_' + detector ] = gain
pylab.title(filt)
pylab.savefig('/Users/pkelly/Dropbox/test' + filt + '.pdf')
if __name__ == '__main__':
import os , sys, string
subarudir = os.environ['subdir']
cluster = sys.argv[1] #'MACS1423+24'
spec = False
train_first = False
magtype = 'APER1'
AP_TYPE = ''
type = 'all'
if len(sys.argv) > 2:
for s in sys.argv:
if s == 'spec':
type = 'spec'
spec = True
if s == 'rand':
type = 'rand'
if s == 'train':
train_first = True
if s == 'ISO':
magtype = 'ISO'
if s == 'APER1':
magtype = 'APER1'
if s == 'APER':
magtype = 'APER'
if string.find(s,'detect') != -1:
import re
rs = re.split('=',s)
DETECT_FILTER=rs[1]
if string.find(s,'spectra') != -1:
import re
rs = re.split('=',s)
SPECTRA=rs[1]
if string.find(s,'aptype') != -1:
import re
rs = re.split('=',s)
AP_TYPE = '_' + rs[1]
#photdir = subarudir + '/' + cluster + '/PHOTOMETRY_' + DETECT_FILTER + AP_TYPE + '/'
all(subarudir,cluster,DETECT_FILTER,AP_TYPE,magtype)
|
|
from panda3d.core import *
from direct.showbase.DirectObject import DirectObject
import math
import copy
class TexMemWatcher(DirectObject):
"""
This class creates a separate graphics window that displays an
approximation of the current texture memory, showing the textures
that are resident and/or active, and an approximation of the
amount of texture memory consumed by each one. It's intended as a
useful tool to help determine where texture memory is being spent.
Although it represents the textures visually in a 2-d space, it
doesn't actually have any idea how textures are physically laid
out in memory--but it has to lay them out somehow, so it makes
something up. It occasionally rearranges the texture display when
it feels it needs to, without regard to what the graphics card is
actually doing. This tool can't be used to research texture
memory fragmentation issues.
"""
NextIndex = 1
StatusHeight = 20 # in pixels
def __init__(self, gsg = None, limit = None):
DirectObject.__init__(self)
# First, we'll need a name to uniquify the object.
self.name = 'tex-mem%s' % (TexMemWatcher.NextIndex)
TexMemWatcher.NextIndex += 1
self.cleanedUp = False
self.top = 1.0
# The textures managed by the TexMemWatcher are packed
# arbitrarily into the canvas, which is the viewable region
# that represents texture memory allocation. The packing
# arrangement has no relation to actual layout within texture
# memory (which we have no way to determine).
# The visual size of each texture is chosen in proportion to
# the total number of bytes of texture memory the texture
# consumes. This includes mipmaps, and accounts for texture
# compression. Visually, a texture with mipmaps will be
# represented by a rectangle 33% larger than an
# equivalent-sized texture without mipmaps. Of course, this
# once again has little bearing to the way the textures are
# actually arranged in memory; but it serves to give a visual
# indication of how much texture memory each texture consumes.
# There is an arbitrary limit, self.limit, which may have been
# passed to the constructor, or which may be arbitrarily
# determined. This represents the intended limit to texture
# memory utilization. We (generously) assume that the
# graphics card will implement a perfect texture packing
# algorithm, so that as long as our total utilization <=
# self.limit, it must fit within texture memory. We represent
# this visually by aggressively packing textures within the
# self.limit block so that they are guaranteed to fit, as long
# as we do not exceed the total utilization. This may
# sometimes mean distorting a texture block or even breaking
# it into multiple pieces to get it to fit, clearly
# fictionalizing whatever the graphics driver is actually
# doing.
# Internally, textures are packed into an integer grid of
# Q-units. Q-units are in proportion to texture bytes.
# Specifically, each Q-unit corresponds to a block of
# self.quantize * self.quantize texture bytes in the Texture
# Memory window. The Q-units are the smallest packable unit;
# increasing self.quantize therefore reduces the visual
# packing resolution correspondingly. Q-units very roughly
# correspond to pixels onscreen (they may be larger, sometimes
# considerably larger, than 1 pixel, depending on the window
# size).
# This number defines the size of a Q-unit square, in texture
# bytes. It is automatically adjusted in repack() based on
# the window size and the texture memory size.
self.quantize = 1
# This is the maximum number of bitmask rows (within
# self.limit) to allocate for packing. This controls the
# value assigned to self.quantize in repack().
self.maxHeight = base.config.GetInt('tex-mem-max-height', 300)
# The total number of texture bytes tracked, including overflow.
self.totalSize = 0
# The total number of texture bytes placed, not including
# overflow (that is, within self.limit).
self.placedSize = 0
# The total number of Q-units placed, not including overflow.
self.placedQSize = 0
# If no GSG is specified, use the main GSG.
if gsg is None:
gsg = base.win.getGsg()
elif isinstance(gsg, GraphicsOutput):
# If we were passed a window, use that window's GSG.
gsg = gsg.getGsg()
self.gsg = gsg
# Now open a new window just to render the output.
size = ConfigVariableInt('tex-mem-win-size', '300 300')
origin = ConfigVariableInt('tex-mem-win-origin', '100 100')
self.winSize = (size[0], size[1])
name = 'Texture Memory'
props = WindowProperties()
props.setOrigin(origin[0], origin[1])
props.setSize(*self.winSize)
props.setTitle(name)
props.setFullscreen(False)
props.setUndecorated(False)
fbprops = FrameBufferProperties.getDefault()
flags = GraphicsPipe.BFFbPropsOptional | GraphicsPipe.BFRequireWindow
self.pipe = None
# Set this to tinydisplay if you're running on a machine with
# limited texture memory. That way you won't compete for
# texture memory with the main scene.
moduleName = base.config.GetString('tex-mem-pipe', '')
if moduleName:
self.pipe = base.makeModulePipe(moduleName)
# If the requested pipe fails for some reason, we'll use the
# regular pipe.
if not self.pipe:
self.pipe = base.pipe
self.win = base.graphicsEngine.makeOutput(self.pipe, name, 0, fbprops,
props, flags)
assert self.win
# We should render at the end of the frame.
self.win.setSort(10000)
# We don't need to clear the color buffer, since we'll be
# filling it with a texture. We also don't need to clear the
# depth buffer, since we won't be using it.
self.win.setClearColorActive(False)
self.win.setClearDepthActive(False)
eventName = '%s-window' % (self.name)
self.win.setWindowEvent(eventName)
self.accept(eventName, self.windowEvent)
# Listen for this event so we can update appropriately, if
# anyone changes the window's graphics memory limit,
self.accept('graphics_memory_limit_changed',
self.graphicsMemoryLimitChanged)
# We'll need a mouse object to get mouse events.
self.mouse = base.dataRoot.attachNewNode(MouseAndKeyboard(self.win, 0, '%s-mouse' % (self.name)))
bt = ButtonThrower('%s-thrower' % (self.name))
self.mouse.attachNewNode(bt)
bt.setPrefix('button-%s-' % (self.name))
self.accept('button-%s-mouse1' % (self.name), self.mouseClick)
self.setupGui()
self.setupCanvas()
# Now start handling up the actual stuff in the scene.
self.background = None
self.nextTexRecordKey = 0
self.rollover = None
self.isolate = None
self.isolated = None
self.needsRepack = False
# How frequently should the texture memory window check for
# state changes?
updateInterval = base.config.GetDouble("tex-mem-update-interval", 0.5)
self.task = taskMgr.doMethodLater(updateInterval, self.updateTextures, 'TexMemWatcher')
self.setLimit(limit)
def setupGui(self):
""" Creates the gui elements and supporting structures. """
self.render2d = NodePath('render2d')
self.render2d.setDepthTest(False)
self.render2d.setDepthWrite(False)
self.render2d.setTwoSided(True)
self.render2d.setBin('unsorted', 0)
# Create a DisplayRegion and an associated camera.
dr = self.win.makeDisplayRegion()
cam = Camera('cam2d')
self.lens = OrthographicLens()
self.lens.setNearFar(-1000, 1000)
self.lens.setFilmSize(2, 2)
cam.setLens(self.lens)
np = self.render2d.attachNewNode(cam)
dr.setCamera(np)
self.aspect2d = self.render2d.attachNewNode('aspect2d')
cm = CardMaker('statusBackground')
cm.setColor(0.85, 0.85, 0.85, 1)
cm.setFrame(0, 2, 0, 2)
self.statusBackground = self.render2d.attachNewNode(cm.generate(), -1)
self.statusBackground.setPos(-1, 0, -1)
self.status = self.aspect2d.attachNewNode('status')
self.statusText = TextNode('statusText')
self.statusText.setTextColor(0, 0, 0, 1)
self.statusTextNP = self.status.attachNewNode(self.statusText)
self.statusTextNP.setScale(1.5)
self.sizeText = TextNode('sizeText')
self.sizeText.setTextColor(0, 0, 0, 1)
self.sizeText.setAlign(TextNode.ARight)
self.sizeText.setCardAsMargin(0.25, 0, 0, -0.25)
self.sizeText.setCardColor(0.85, 0.85, 0.85, 1)
self.sizeTextNP = self.status.attachNewNode(self.sizeText)
self.sizeTextNP.setScale(1.5)
def setupCanvas(self):
""" Creates the "canvas", which is the checkerboard area where
texture memory is laid out. The canvas has its own
DisplayRegion. """
self.canvasRoot = NodePath('canvasRoot')
self.canvasRoot.setDepthTest(False)
self.canvasRoot.setDepthWrite(False)
self.canvasRoot.setTwoSided(True)
self.canvasRoot.setBin('unsorted', 0)
self.canvas = self.canvasRoot.attachNewNode('canvas')
# Create a DisplayRegion and an associated camera.
self.canvasDR = self.win.makeDisplayRegion()
self.canvasDR.setSort(-10)
cam = Camera('cam2d')
self.canvasLens = OrthographicLens()
self.canvasLens.setNearFar(-1000, 1000)
cam.setLens(self.canvasLens)
np = self.canvasRoot.attachNewNode(cam)
self.canvasDR.setCamera(np)
# Create a MouseWatcher so we can interact with the various
# textures.
self.mw = MouseWatcher('%s-watcher' % (self.name))
self.mw.setDisplayRegion(self.canvasDR)
mwnp = self.mouse.attachNewNode(self.mw)
eventName = '%s-enter' % (self.name)
self.mw.setEnterPattern(eventName)
self.accept(eventName, self.enterRegion)
eventName = '%s-leave' % (self.name)
self.mw.setLeavePattern(eventName)
self.accept(eventName, self.leaveRegion)
# Create a checkerboard background card for the canvas.
p = PNMImage(2, 2, 1)
p.setGray(0, 0, 0.40)
p.setGray(1, 1, 0.40)
p.setGray(0, 1, 0.75)
p.setGray(1, 0, 0.75)
self.checkTex = Texture('checkTex')
self.checkTex.load(p)
self.checkTex.setMagfilter(Texture.FTNearest)
self.canvasBackground = None
self.makeCanvasBackground()
def makeCanvasBackground(self):
if self.canvasBackground:
self.canvasBackground.removeNode()
self.canvasBackground = self.canvasRoot.attachNewNode('canvasBackground', -100)
cm = CardMaker('background')
cm.setFrame(0, 1, 0, 1)
cm.setUvRange((0, 0), (1, 1))
self.canvasBackground.attachNewNode(cm.generate())
cm.setFrame(0, 1, 1, self.top)
cm.setUvRange((0, 1), (1, self.top))
bad = self.canvasBackground.attachNewNode(cm.generate())
bad.setColor((0.8, 0.2, 0.2, 1))
self.canvasBackground.setTexture(self.checkTex)
def setLimit(self, limit = None):
""" Indicates the texture memory limit. If limit is None or
unspecified, the limit is taken from the GSG, if any; or there
is no limit. """
self.__doSetLimit(limit)
self.reconfigureWindow()
def __doSetLimit(self, limit):
""" Internal implementation of setLimit(). """
self.limit = limit
self.lruLimit = False
self.dynamicLimit = False
if not limit:
# If no limit was specified, use the specified graphics
# memory limit, if any.
lruSize = self.gsg.getPreparedObjects().getGraphicsMemoryLimit()
if lruSize and lruSize < 2**32 - 1:
# Got a real lruSize. Use it.
self.limit = lruSize
self.lruLimit = True
else:
# No LRU limit either, so there won't be a practical
# limit to the TexMemWatcher. We'll determine our
# limit on-the-fly instead.
self.dynamicLimit = True
if self.dynamicLimit:
# Choose a suitable limit by rounding to the next power of two.
self.limit = Texture.upToPower2(self.totalSize)
# Set our GSG to limit itself to no more textures than we
# expect to display onscreen, so we don't go crazy with
# texture memory.
self.win.getGsg().getPreparedObjects().setGraphicsMemoryLimit(self.limit)
# The actual height of the canvas, including the overflow
# area. The texture memory itself is restricted to (0..1)
# vertically; anything higher than 1 is overflow.
top = 1.25
if self.dynamicLimit:
# Actually, we'll never exceed texture memory, so never mind.
top = 1
if top != self.top:
self.top = top
self.makeCanvasBackground()
self.canvasLens.setFilmSize(1, self.top)
self.canvasLens.setFilmOffset(0.5, self.top / 2.0) # lens covers 0..1 in x and y
def cleanup(self):
if not self.cleanedUp:
self.cleanedUp = True
# Remove the window.
base.graphicsEngine.removeWindow(self.win)
self.win = None
self.gsg = None
self.pipe = None
# Remove the mouse.
self.mouse.detachNode()
taskMgr.remove(self.task)
self.ignoreAll()
self.canvas.getChildren().detach()
self.texRecordsByTex = {}
self.texRecordsByKey = {}
self.texPlacements = {}
def graphicsMemoryLimitChanged(self):
if self.dynamicLimit or self.lruLimit:
self.__doSetLimit(None)
self.reconfigureWindow()
def windowEvent(self, win):
if win == self.win:
props = win.getProperties()
if not props.getOpen():
# User closed window.
self.cleanup()
return
size = (props.getXSize(), props.getYSize())
if size != self.winSize:
self.winSize = size
self.reconfigureWindow()
def enterRegion(self, region, buttonName):
""" the mouse has rolled over a texture. """
key, pi = map(int, region.getName().split(':'))
tr = self.texRecordsByKey.get(key)
if not tr:
return
self.setRollover(tr, pi)
def leaveRegion(self, region, buttonName):
""" the mouse is no longer over a texture. """
key, pi = map(int, region.getName().split(':'))
tr = self.texRecordsByKey.get(key)
if tr != self.rollover:
return
self.setRollover(None, None)
def mouseClick(self):
""" Received a mouse-click within the window. This isolates
the currently-highlighted texture into a full-window
presentation. """
if self.isolate:
# We're already isolating a texture; the click undoes this.
self.isolateTexture(None)
return
if self.rollover:
self.isolateTexture(self.rollover)
def setRollover(self, tr, pi):
""" Sets the highlighted texture (due to mouse rollover) to
the indicated texture, or None to clear it. """
self.rollover = tr
if self.rollover:
self.statusText.setText(tr.tex.getName())
else:
self.statusText.setText('')
def isolateTexture(self, tr):
""" Isolates the indicated texture onscreen, or None to
restore normal mode. """
if self.isolate:
self.isolate.removeNode()
self.isolate = None
self.isolated = tr
# Undo the previous call to isolate.
self.canvas.show()
self.canvasBackground.clearColor()
self.win.getGsg().setTextureQualityOverride(Texture.QLDefault)
if hasattr(self.gsg, 'clearFlashTexture'):
self.gsg.clearFlashTexture()
if not tr:
return
# Now isolate.
self.canvas.hide()
# Disable the red bar at the top.
self.canvasBackground.setColor(1, 1, 1, 1, 1)
# Show the texture in all its filtered glory.
self.win.getGsg().setTextureQualityOverride(Texture.QLBest)
if hasattr(self.gsg, 'setFlashTexture'):
# Start the texture flashing in the main window.
self.gsg.setFlashTexture(tr.tex)
self.isolate = self.render2d.attachNewNode('isolate')
wx, wy = self.winSize
# Put a label on the bottom of the screen.
tn = TextNode('tn')
tn.setText('%s\n%s x %s\n%s' % (
tr.tex.getName(), tr.tex.getXSize(), tr.tex.getYSize(),
self.formatSize(tr.size)))
tn.setAlign(tn.ACenter)
tn.setCardAsMargin(100.0, 100.0, 0.1, 0.1)
tn.setCardColor(0.1, 0.2, 0.4, 1)
tnp = self.isolate.attachNewNode(tn)
scale = 30.0 / wy
tnp.setScale(scale * wy / wx, scale, scale)
tnp.setPos(render2d, 0, 0, -1 - tn.getBottom() * scale)
labelTop = tn.getHeight() * scale
# Make a card that shows the texture in actual pixel size, but
# don't let it exceed the screen size.
tw = tr.tex.getXSize()
th = tr.tex.getYSize()
wx = float(wx)
wy = float(wy) * (2.0 - labelTop) * 0.5
w = min(tw, wx)
h = min(th, wy)
sx = w / tw
sy = h / th
s = min(sx, sy)
w = tw * s / float(self.winSize[0])
h = th * s / float(self.winSize[1])
cx = 0.0
cy = 1.0 - (2.0 - labelTop) * 0.5
l = cx - w
r = cx + w
b = cy - h
t = cy + h
cm = CardMaker('card')
cm.setFrame(l, r, b, t)
c = self.isolate.attachNewNode(cm.generate())
c.setTexture(tr.tex)
c.setTransparency(TransparencyAttrib.MAlpha)
ls = LineSegs('frame')
ls.setColor(0, 0, 0, 1)
ls.moveTo(l, 0, b)
ls.drawTo(r, 0, b)
ls.drawTo(r, 0, t)
ls.drawTo(l, 0, t)
ls.drawTo(l, 0, b)
self.isolate.attachNewNode(ls.create())
def reconfigureWindow(self):
""" Resets everything for a new window size. """
wx, wy = self.winSize
if wx <= 0 or wy <= 0:
return
self.aspect2d.setScale(float(wy) / float(wx), 1, 1)
# Reserve self.StatusHeight pixels for the status bar;
# everything else is for the canvas.
statusScale = float(self.StatusHeight) / float(wy)
self.statusBackground.setScale(1, 1, statusScale)
self.status.setScale(statusScale)
self.statusTextNP.setPos(self.statusBackground, 0, 0, 0.5)
self.sizeTextNP.setPos(self.statusBackground, 2, 0, 0.5)
self.canvasDR.setDimensions(0, 1, statusScale, 1)
w = self.canvasDR.getPixelWidth()
h = self.canvasDR.getPixelHeight()
self.canvasBackground.setTexScale(TextureStage.getDefault(),
w / 20.0, h / (20.0 * self.top))
if self.isolate:
# If we're currently showing an isolated texture, refresh
# that display so we get its size right. And when we come
# back to the main window (but not now), repack it.
self.needsRepack = True
self.isolateTexture(self.isolated)
else:
# If we're showing the main window, just repack it
# immediately.
self.repack()
def updateTextures(self, task):
""" Gets the current list of resident textures and adds new
textures or removes old ones from the onscreen display, as
necessary. """
if self.isolate:
# never mind for now.
return task.again
if self.needsRepack:
self.needsRepack = False
self.repack()
return task.again
pgo = self.gsg.getPreparedObjects()
totalSize = 0
texRecords = []
neverVisited = copy.copy(self.texRecordsByTex)
for tex in self.gsg.getPreparedTextures():
# We have visited this texture; remove it from the
# neverVisited list.
if tex in neverVisited:
del neverVisited[tex]
size = 0
if tex.getResident(pgo):
size = tex.getDataSizeBytes(pgo)
tr = self.texRecordsByTex.get(tex, None)
if size:
totalSize += size
active = tex.getActive(pgo)
if not tr:
# This is a new texture; need to record it.
key = self.nextTexRecordKey
self.nextTexRecordKey += 1
tr = TexRecord(key, tex, size, active)
texRecords.append(tr)
else:
tr.setActive(active)
if tr.size != size or not tr.placements:
# The size has changed; reapply it.
tr.setSize(size)
self.unplaceTexture(tr)
texRecords.append(tr)
else:
if tr:
# This texture is no longer resident; need to remove it.
self.unplaceTexture(tr)
# Now go through and make sure we unplace (and remove!) any
# textures that we didn't visit at all this pass.
for tex, tr in neverVisited.items():
self.unplaceTexture(tr)
del self.texRecordsByTex[tex]
del self.texRecordsByKey[tr.key]
self.totalSize = totalSize
self.sizeText.setText(self.formatSize(self.totalSize))
if totalSize > self.limit and self.dynamicLimit:
# Actually, never mind on the update: we have exceeded the
# dynamic limit computed before, and therefore we need to
# repack.
self.repack()
else:
overflowCount = sum([tp.overflowed for tp in self.texPlacements.keys()])
if totalSize <= self.limit and overflowCount:
# Shouldn't be overflowing any more. Better repack.
self.repack()
else:
# Pack in just the newly-loaded textures.
# Sort the regions from largest to smallest to maximize
# packing effectiveness.
texRecords.sort(key = lambda tr: (tr.tw, tr.th), reverse = True)
for tr in texRecords:
self.placeTexture(tr)
self.texRecordsByTex[tr.tex] = tr
self.texRecordsByKey[tr.key] = tr
return task.again
def repack(self):
""" Repacks all of the current textures. """
self.canvas.getChildren().detach()
self.texRecordsByTex = {}
self.texRecordsByKey = {}
self.texPlacements = {}
self.bitmasks = []
self.mw.clearRegions()
self.setRollover(None, None)
self.w = 1
self.h = 1
self.placedSize = 0
self.placedQSize = 0
pgo = self.gsg.getPreparedObjects()
totalSize = 0
for tex in self.gsg.getPreparedTextures():
if tex.getResident(pgo):
size = tex.getDataSizeBytes(pgo)
if size:
active = tex.getActive(pgo)
key = self.nextTexRecordKey
self.nextTexRecordKey += 1
tr = TexRecord(key, tex, size, active)
self.texRecordsByTex[tr.tex] = tr
self.texRecordsByKey[tr.key] = tr
totalSize += size
self.totalSize = totalSize
self.sizeText.setText(self.formatSize(self.totalSize))
if not self.totalSize:
return
if self.dynamicLimit or self.lruLimit:
# Adjust the limit to ensure we keep tracking the lru size.
self.__doSetLimit(None)
# Now make that into a 2-D rectangle of the appropriate shape,
# such that w * h == limit.
# Window size
x, y = self.winSize
# There should be a little buffer on the top so we can see if
# we overflow.
y /= self.top
r = float(y) / float(x)
# Region size
w = math.sqrt(self.limit) / math.sqrt(r)
h = w * r
# Now choose self.quantize so that we don't exceed
# self.maxHeight.
if h > self.maxHeight:
self.quantize = int(math.ceil(h / self.maxHeight))
else:
self.quantize = 1
w = max(int(w / self.quantize + 0.5), 1)
h = max(int(h / self.quantize + 0.5), 1)
self.w = w
self.h = h
self.area = self.w * self.h
# We store a bitarray for each row, for fast lookup for
# unallocated space on the canvas. Each Q-unit on the row
# corresponds to a bit in the bitarray, where bit 0 is Q-unit
# 0, bit 1 is Q-unit 1, and so on. If the bit is set, the
# space is occupied.
self.bitmasks = []
for i in range(self.h):
self.bitmasks.append(BitArray())
self.canvas.setScale(1.0 / w, 1.0, 1.0 / h)
self.mw.setFrame(0, w, 0, h * self.top)
# Sort the regions from largest to smallest to maximize
# packing effectiveness.
texRecords = self.texRecordsByTex.values()
texRecords.sort(key = lambda tr: (tr.tw, tr.th), reverse = True)
for tr in texRecords:
self.placeTexture(tr)
def formatSize(self, size):
""" Returns a size in MB, KB, GB, whatever. """
if size < 1000:
return '%s bytes' % (size)
size /= 1024.0
if size < 1000:
return '%0.1f kb' % (size)
size /= 1024.0
if size < 1000:
return '%0.1f MB' % (size)
size /= 1024.0
return '%0.1f GB' % (size)
def unplaceTexture(self, tr):
""" Removes the texture from its place on the canvas. """
if tr.placements:
for tp in tr.placements:
tp.clearBitmasks(self.bitmasks)
if not tp.overflowed:
self.placedQSize -= tp.area
assert self.placedQSize >= 0
del self.texPlacements[tp]
tr.placements = []
tr.clearCard(self)
if not tr.overflowed:
self.placedSize -= tr.size
assert self.placedSize >= 0
tr.overflowed = 0
def placeTexture(self, tr):
""" Places the texture somewhere on the canvas where it will
fit. """
tr.computePlacementSize(self)
tr.overflowed = 0
shouldFit = False
availableSize = self.limit - self.placedSize
if availableSize >= tr.size:
shouldFit = True
availableQSize = self.area - self.placedQSize
if availableQSize < tr.area:
# The texture should fit, but won't, due to roundoff
# error. Make it correspondingly smaller, so we can
# place it anyway.
tr.area = availableQSize
if shouldFit:
# Look for a single rectangular hole to hold this piece.
tp = self.findHole(tr.area, tr.w, tr.h)
if tp:
texCmp = cmp(tr.w, tr.h)
holeCmp = cmp(tp.p[1] - tp.p[0], tp.p[3] - tp.p[2])
if texCmp != 0 and holeCmp != 0 and texCmp != holeCmp:
tp.rotated = True
tr.placements = [tp]
tr.makeCard(self)
tp.setBitmasks(self.bitmasks)
self.placedQSize += tp.area
self.texPlacements[tp] = tr
self.placedSize += tr.size
return
# Couldn't find a single rectangular hole. We'll have to
# divide the texture up into several smaller pieces to cram it
# in.
tpList = self.findHolePieces(tr.area)
if tpList:
texCmp = cmp(tr.w, tr.h)
tr.placements = tpList
for tp in tpList:
holeCmp = cmp(tp.p[1] - tp.p[0], tp.p[3] - tp.p[2])
if texCmp != 0 and holeCmp != 0 and texCmp != holeCmp:
tp.rotated = True
tp.setBitmasks(self.bitmasks)
self.placedQSize += tp.area
self.texPlacements[tp] = tr
self.placedSize += tr.size
tr.makeCard(self)
return
# Just let it overflow.
tr.overflowed = 1
tp = self.findOverflowHole(tr.area, tr.w, tr.h)
tp.overflowed = 1
while len(self.bitmasks) <= tp.p[3]:
self.bitmasks.append(BitArray())
tr.placements = [tp]
tr.makeCard(self)
tp.setBitmasks(self.bitmasks)
self.texPlacements[tp] = tr
def findHole(self, area, w, h):
""" Searches for a rectangular hole that is at least area
square units big, regardless of its shape, but attempt to find
one that comes close to the right shape, at least. If one is
found, returns an appropriate TexPlacement; otherwise, returns
None. """
if area == 0:
tp = TexPlacement(0, 0, 0, 0)
return tp
# Rotate the hole to horizontal first.
w, h = max(w, h), min(w, h)
aspect = float(w) / float(h)
holes = self.findAvailableHoles(area, w, h)
# Walk through the list and find the one with the best aspect
# match.
matches = []
for tarea, tp in holes:
l, r, b, t = tp.p
tw = r - l
th = t - b
# To constrain our area within this rectangle, how would
# we have to squish it?
if tw < w:
# We'd have to make it taller.
nh = min(area / tw, th)
th = nh
elif th < h:
# We'd have to make it narrower.
nw = min(area / th, tw)
tw = nw
else:
# Hey, we don't have to squish it after all! Just
# return this hole.
tw = w
th = h
# Make a new tp that has the right area.
tp = TexPlacement(l, l + tw, b, b + th)
ta = float(max(tw, th)) / float(min(tw, th))
if ta == aspect:
return tp
match = min(ta, aspect) / max(ta, aspect)
matches.append((match, tp))
if matches:
return max(matches)[1]
return None
def findHolePieces(self, area):
""" Returns a list of holes whose net area sums to the given
area, or None if there are not enough holes. """
# First, save the original value of self.texPlacements, since
# we will be modifying that during this search.
savedTexPlacements = copy.copy(self.texPlacements)
savedBitmasks = []
for ba in self.bitmasks:
savedBitmasks.append(BitArray(ba))
result = []
while area > 0:
# We have to call findLargestHole() each time through this
# loop, instead of just walking through
# findAvailableHoles() in order, because
# findAvailableHoles() might return a list of overlapping
# holes.
tp = self.findLargestHole()
if not tp:
break
l, r, b, t = tp.p
tpArea = (r - l) * (t - b)
if tpArea >= area:
# we're done.
shorten = (tpArea - area) / (r - l)
t -= shorten
tp.p = (l, r, b, t)
tp.area = (r - l) * (t - b)
result.append(tp)
self.texPlacements = savedTexPlacements
self.bitmasks = savedBitmasks
return result
# Keep going.
area -= tpArea
result.append(tp)
tp.setBitmasks(self.bitmasks)
self.texPlacements[tp] = None
# Huh, not enough room, or no more holes.
self.texPlacements = savedTexPlacements
self.bitmasks = savedBitmasks
return None
def findLargestHole(self):
holes = self.findAvailableHoles(0)
if holes:
return max(holes)[1]
return None
def findAvailableHoles(self, area, w = None, h = None):
""" Finds a list of available holes, of at least the indicated
area. Returns a list of tuples, where each tuple is of the
form (area, tp).
If w and h are non-None, this will short-circuit on the first
hole it finds that fits w x h, and return just that hole in a
singleton list.
"""
holes = []
lastTuples = set()
lastBitmask = None
b = 0
while b < self.h:
# Separate this row into (l, r) tuples.
bm = self.bitmasks[b]
if bm == lastBitmask:
# This row is exactly the same as the row below; no
# need to reexamine.
b += 1
continue
lastBitmask = bm
tuples = self.findEmptyRuns(bm)
newTuples = tuples.difference(lastTuples)
for l, r in newTuples:
# Find out how high we can go with this bitmask.
mask = BitArray.range(l, r - l)
t = b + 1
while t < self.h and (self.bitmasks[t] & mask).isZero():
t += 1
tpw = (r - l)
tph = (t - b)
tarea = tpw * tph
assert tarea > 0
if tarea >= area:
tp = TexPlacement(l, r, b, t)
if w and h and \
((tpw >= w and tph >= h) or \
(tph >= w and tpw >= h)):
# This hole is big enough; short circuit.
return [(tarea, tp)]
holes.append((tarea, tp))
lastTuples = tuples
b += 1
return holes
def findOverflowHole(self, area, w, h):
""" Searches for a hole large enough for (w, h), in the
overflow space. Since the overflow space is infinite, this
will always succeed. """
if w > self.w:
# It won't fit within the margins at all; just stack it on
# the top.
# Scan down past all of the empty bitmasks that may be
# stacked on top.
b = len(self.bitmasks)
while b > self.h and self.bitmasks[b - 1].isZero():
b -= 1
tp = TexPlacement(0, w, b, b + h)
return tp
# It fits within the margins; find the first row with enough
# space for it.
lastTuples = set()
lastBitmask = None
b = self.h
while True:
if b >= len(self.bitmasks):
# Off the top. Just leave it here.
tp = TexPlacement(0, w, b, b + h)
return tp
# Separate this row into (l, r) tuples.
bm = self.bitmasks[b]
if bm == lastBitmask:
# This row is exactly the same as the row below; no
# need to reexamine.
b += 1
continue
lastBitmask = bm
tuples = self.findEmptyRuns(bm)
newTuples = tuples.difference(lastTuples)
for l, r in newTuples:
# Is this region wide enough?
if r - l < w:
continue
# Is it tall enough?
r = l + w
mask = BitArray.range(l, r - l)
t = b + 1
while t < b + h and \
(t >= len(self.bitmasks) or (self.bitmasks[t] & mask).isZero()):
t += 1
if t < b + h:
# Not tall enough.
continue
tp = TexPlacement(l, r, b, t)
return tp
lastTuples = tuples
b += 1
def findEmptyRuns(self, bm):
""" Separates a bitmask into a list of (l, r) tuples,
corresponding to the empty regions in the row between 0 and
self.w. """
tuples = set()
l = bm.getLowestOffBit()
assert l != -1
if l < self.w:
r = bm.getNextHigherDifferentBit(l)
if r == l or r >= self.w:
r = self.w
tuples.add((l, r))
l = bm.getNextHigherDifferentBit(r)
while l != r and l < self.w:
r = bm.getNextHigherDifferentBit(l)
if r == l or r >= self.w:
r = self.w
tuples.add((l, r))
l = bm.getNextHigherDifferentBit(r)
return tuples
class TexRecord:
def __init__(self, key, tex, size, active):
self.key = key
self.tex = tex
self.active = active
self.root = None
self.regions = []
self.placements = []
self.overflowed = 0
self.setSize(size)
def setSize(self, size):
self.size = size
x = self.tex.getXSize()
y = self.tex.getYSize()
r = float(y) / float(x)
# Card size, in unscaled texel units.
self.tw = math.sqrt(self.size) / math.sqrt(r)
self.th = self.tw * r
def computePlacementSize(self, tmw):
self.w = max(int(self.tw / tmw.quantize + 0.5), 1)
self.h = max(int(self.th / tmw.quantize + 0.5), 1)
self.area = self.w * self.h
def setActive(self, flag):
self.active = flag
if self.active:
self.backing.clearColor()
self.matte.clearColor()
self.card.clearColor()
else:
self.backing.setColor((0.2, 0.2, 0.2, 1), 2)
self.matte.setColor((0.2, 0.2, 0.2, 1), 2)
self.card.setColor((0.4, 0.4, 0.4, 1), 2)
def clearCard(self, tmw):
if self.root:
self.root.detachNode()
self.root = None
for r in self.regions:
tmw.mw.removeRegion(r)
self.regions = []
def makeCard(self, tmw):
self.clearCard(tmw)
root = NodePath('root')
# A matte to frame the texture and indicate its status.
matte = root.attachNewNode('matte', 0)
# A backing to put behind the card.
backing = root.attachNewNode('backing', 10)
# A card to display the texture.
card = root.attachNewNode('card', 20)
# A wire frame to ring the matte and separate the card from
# its neighbors.
frame = root.attachNewNode('frame', 30)
for p in self.placements:
l, r, b, t = p.p
cx = (l + r) * 0.5
cy = (b + t) * 0.5
shrinkMat = Mat4.translateMat(-cx, 0, -cy) * Mat4.scaleMat(0.9) * Mat4.translateMat(cx, 0, cy)
cm = CardMaker('backing')
cm.setFrame(l, r, b, t)
cm.setColor(0.1, 0.3, 0.5, 1)
c = backing.attachNewNode(cm.generate())
c.setMat(shrinkMat)
cm = CardMaker('card')
cm.setFrame(l, r, b, t)
if p.rotated:
cm.setUvRange((0, 1), (0, 0), (1, 0), (1, 1))
c = card.attachNewNode(cm.generate())
c.setMat(shrinkMat)
cm = CardMaker('matte')
cm.setFrame(l, r, b, t)
matte.attachNewNode(cm.generate())
ls = LineSegs('frame')
ls.setColor(0, 0, 0, 1)
ls.moveTo(l, 0, b)
ls.drawTo(r, 0, b)
ls.drawTo(r, 0, t)
ls.drawTo(l, 0, t)
ls.drawTo(l, 0, b)
f1 = frame.attachNewNode(ls.create())
f2 = f1.copyTo(frame)
f2.setMat(shrinkMat)
#matte.flattenStrong()
self.matte = matte
#backing.flattenStrong()
self.backing = backing
card.setTransparency(TransparencyAttrib.MAlpha)
card.setTexture(self.tex)
#card.flattenStrong()
self.card = card
#frame.flattenStrong()
self.frame = frame
root.reparentTo(tmw.canvas)
self.root = root
# Also, make one or more clickable MouseWatcherRegions.
assert self.regions == []
for pi in range(len(self.placements)):
p = self.placements[pi]
r = MouseWatcherRegion('%s:%s' % (self.key, pi), *p.p)
tmw.mw.addRegion(r)
self.regions.append(r)
class TexPlacement:
def __init__(self, l, r, b, t):
self.p = (l, r, b, t)
self.area = (r - l) * (t - b)
self.rotated = False
self.overflowed = 0
def intersects(self, other):
""" Returns True if the placements intersect, False
otherwise. """
ml, mr, mb, mt = self.p
tl, tr, tb, tt = other.p
return (tl < mr and tr > ml and
tb < mt and tt > mb)
def setBitmasks(self, bitmasks):
""" Sets all of the appropriate bits to indicate this region
is taken. """
l, r, b, t = self.p
mask = BitArray.range(l, r - l)
for yi in range(b, t):
assert (bitmasks[yi] & mask).isZero()
bitmasks[yi] |= mask
def clearBitmasks(self, bitmasks):
""" Clears all of the appropriate bits to indicate this region
is available. """
l, r, b, t = self.p
mask = ~BitArray.range(l, r - l)
for yi in range(b, t):
assert (bitmasks[yi] | mask).isAllOn()
bitmasks[yi] &= mask
def hasOverlap(self, bitmasks):
""" Returns true if there is an overlap with this region and
any other region, false otherwise. """
l, r, b, t = self.p
mask = BitArray.range(l, r - l)
for yi in range(b, t):
if not (bitmasks[yi] & mask).isZero():
return True
return False
|
|
# -*- coding: utf-8 -*-
"""
Testing using the Test Client
The test client is a class that can act like a simple
browser for testing purposes.
It allows the user to compose GET and POST requests, and
obtain the response that the server gave to those requests.
The server Response objects are annotated with the details
of the contexts and templates that were rendered during the
process of serving the request.
``Client`` objects are stateful - they will retain cookie (and
thus session) details for the lifetime of the ``Client`` instance.
This is not intended as a replacement for Twill, Selenium, or
other browser automation frameworks - it is here to allow
testing against the contexts and templates produced by a view,
rather than the HTML rendered to the end-user.
"""
from __future__ import unicode_literals
import tempfile
from django.contrib.auth.models import User
from django.core import mail
from django.http import HttpResponse
from django.test import (
Client, RequestFactory, SimpleTestCase, TestCase, override_settings,
)
from django.urls import reverse_lazy
from .views import get_view, post_view, trace_view
@override_settings(ROOT_URLCONF='test_client.urls')
class ClientTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.u1 = User.objects.create_user(username='testclient', password='password')
cls.u2 = User.objects.create_user(username='inactive', password='password', is_active=False)
def test_get_view(self):
"GET a view"
# The data is ignored, but let's check it doesn't crash the system
# anyway.
data = {'var': '\xf2'}
response = self.client.get('/get_view/', data)
# Check some response details
self.assertContains(response, 'This is a test')
self.assertEqual(response.context['var'], '\xf2')
self.assertEqual(response.templates[0].name, 'GET Template')
def test_get_post_view(self):
"GET a view that normally expects POSTs"
response = self.client.get('/post_view/', {})
# Check some response details
self.assertEqual(response.status_code, 200)
self.assertEqual(response.templates[0].name, 'Empty GET Template')
self.assertTemplateUsed(response, 'Empty GET Template')
self.assertTemplateNotUsed(response, 'Empty POST Template')
def test_empty_post(self):
"POST an empty dictionary to a view"
response = self.client.post('/post_view/', {})
# Check some response details
self.assertEqual(response.status_code, 200)
self.assertEqual(response.templates[0].name, 'Empty POST Template')
self.assertTemplateNotUsed(response, 'Empty GET Template')
self.assertTemplateUsed(response, 'Empty POST Template')
def test_post(self):
"POST some data to a view"
post_data = {
'value': 37
}
response = self.client.post('/post_view/', post_data)
# Check some response details
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['data'], '37')
self.assertEqual(response.templates[0].name, 'POST Template')
self.assertContains(response, 'Data received')
def test_trace(self):
"""TRACE a view"""
response = self.client.trace('/trace_view/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['method'], 'TRACE')
self.assertEqual(response.templates[0].name, 'TRACE Template')
def test_response_headers(self):
"Check the value of HTTP headers returned in a response"
response = self.client.get("/header_view/")
self.assertEqual(response['X-DJANGO-TEST'], 'Slartibartfast')
def test_response_attached_request(self):
"""
Check that the returned response has a ``request`` attribute with the
originating environ dict and a ``wsgi_request`` with the originating
``WSGIRequest`` instance.
"""
response = self.client.get("/header_view/")
self.assertTrue(hasattr(response, 'request'))
self.assertTrue(hasattr(response, 'wsgi_request'))
for key, value in response.request.items():
self.assertIn(key, response.wsgi_request.environ)
self.assertEqual(response.wsgi_request.environ[key], value)
def test_response_resolver_match(self):
"""
The response contains a ResolverMatch instance.
"""
response = self.client.get('/header_view/')
self.assertTrue(hasattr(response, 'resolver_match'))
def test_response_resolver_match_redirect_follow(self):
"""
The response ResolverMatch instance contains the correct
information when following redirects.
"""
response = self.client.get('/redirect_view/', follow=True)
self.assertEqual(response.resolver_match.url_name, 'get_view')
def test_response_resolver_match_regular_view(self):
"""
The response ResolverMatch instance contains the correct
information when accessing a regular view.
"""
response = self.client.get('/get_view/')
self.assertEqual(response.resolver_match.url_name, 'get_view')
def test_raw_post(self):
"POST raw data (with a content type) to a view"
test_doc = """<?xml version="1.0" encoding="utf-8"?>
<library><book><title>Blink</title><author>Malcolm Gladwell</author></book></library>
"""
response = self.client.post("/raw_post_view/", test_doc,
content_type="text/xml")
self.assertEqual(response.status_code, 200)
self.assertEqual(response.templates[0].name, "Book template")
self.assertEqual(response.content, b"Blink - Malcolm Gladwell")
def test_insecure(self):
"GET a URL through http"
response = self.client.get('/secure_view/', secure=False)
self.assertFalse(response.test_was_secure_request)
self.assertEqual(response.test_server_port, '80')
def test_secure(self):
"GET a URL through https"
response = self.client.get('/secure_view/', secure=True)
self.assertTrue(response.test_was_secure_request)
self.assertEqual(response.test_server_port, '443')
def test_redirect(self):
"GET a URL that redirects elsewhere"
response = self.client.get('/redirect_view/')
# Check that the response was a 302 (redirect)
self.assertRedirects(response, '/get_view/')
def test_redirect_with_query(self):
"GET a URL that redirects with given GET parameters"
response = self.client.get('/redirect_view/', {'var': 'value'})
# Check if parameters are intact
self.assertRedirects(response, '/get_view/?var=value')
def test_permanent_redirect(self):
"GET a URL that redirects permanently elsewhere"
response = self.client.get('/permanent_redirect_view/')
# Check that the response was a 301 (permanent redirect)
self.assertRedirects(response, '/get_view/', status_code=301)
def test_temporary_redirect(self):
"GET a URL that does a non-permanent redirect"
response = self.client.get('/temporary_redirect_view/')
# Check that the response was a 302 (non-permanent redirect)
self.assertRedirects(response, '/get_view/', status_code=302)
def test_redirect_to_strange_location(self):
"GET a URL that redirects to a non-200 page"
response = self.client.get('/double_redirect_view/')
# Check that the response was a 302, and that
# the attempt to get the redirection location returned 301 when retrieved
self.assertRedirects(response, '/permanent_redirect_view/', target_status_code=301)
def test_follow_redirect(self):
"A URL that redirects can be followed to termination."
response = self.client.get('/double_redirect_view/', follow=True)
self.assertRedirects(response, '/get_view/', status_code=302, target_status_code=200)
self.assertEqual(len(response.redirect_chain), 2)
def test_follow_relative_redirect(self):
"A URL with a relative redirect can be followed."
response = self.client.get('/accounts/', follow=True)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.request['PATH_INFO'], '/accounts/login/')
def test_follow_relative_redirect_no_trailing_slash(self):
"A URL with a relative redirect with no trailing slash can be followed."
response = self.client.get('/accounts/no_trailing_slash', follow=True)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.request['PATH_INFO'], '/accounts/login/')
def test_redirect_http(self):
"GET a URL that redirects to an http URI"
response = self.client.get('/http_redirect_view/', follow=True)
self.assertFalse(response.test_was_secure_request)
def test_redirect_https(self):
"GET a URL that redirects to an https URI"
response = self.client.get('/https_redirect_view/', follow=True)
self.assertTrue(response.test_was_secure_request)
def test_notfound_response(self):
"GET a URL that responds as '404:Not Found'"
response = self.client.get('/bad_view/')
# Check that the response was a 404, and that the content contains MAGIC
self.assertContains(response, 'MAGIC', status_code=404)
def test_valid_form(self):
"POST valid data to a form"
post_data = {
'text': 'Hello World',
'email': 'foo@example.com',
'value': 37,
'single': 'b',
'multi': ('b', 'c', 'e')
}
response = self.client.post('/form_view/', post_data)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "Valid POST Template")
def test_valid_form_with_hints(self):
"GET a form, providing hints in the GET data"
hints = {
'text': 'Hello World',
'multi': ('b', 'c', 'e')
}
response = self.client.get('/form_view/', data=hints)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "Form GET Template")
# Check that the multi-value data has been rolled out ok
self.assertContains(response, 'Select a valid choice.', 0)
def test_incomplete_data_form(self):
"POST incomplete data to a form"
post_data = {
'text': 'Hello World',
'value': 37
}
response = self.client.post('/form_view/', post_data)
self.assertContains(response, 'This field is required.', 3)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "Invalid POST Template")
self.assertFormError(response, 'form', 'email', 'This field is required.')
self.assertFormError(response, 'form', 'single', 'This field is required.')
self.assertFormError(response, 'form', 'multi', 'This field is required.')
def test_form_error(self):
"POST erroneous data to a form"
post_data = {
'text': 'Hello World',
'email': 'not an email address',
'value': 37,
'single': 'b',
'multi': ('b', 'c', 'e')
}
response = self.client.post('/form_view/', post_data)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "Invalid POST Template")
self.assertFormError(response, 'form', 'email', 'Enter a valid email address.')
def test_valid_form_with_template(self):
"POST valid data to a form using multiple templates"
post_data = {
'text': 'Hello World',
'email': 'foo@example.com',
'value': 37,
'single': 'b',
'multi': ('b', 'c', 'e')
}
response = self.client.post('/form_view_with_template/', post_data)
self.assertContains(response, 'POST data OK')
self.assertTemplateUsed(response, "form_view.html")
self.assertTemplateUsed(response, 'base.html')
self.assertTemplateNotUsed(response, "Valid POST Template")
def test_incomplete_data_form_with_template(self):
"POST incomplete data to a form using multiple templates"
post_data = {
'text': 'Hello World',
'value': 37
}
response = self.client.post('/form_view_with_template/', post_data)
self.assertContains(response, 'POST data has errors')
self.assertTemplateUsed(response, 'form_view.html')
self.assertTemplateUsed(response, 'base.html')
self.assertTemplateNotUsed(response, "Invalid POST Template")
self.assertFormError(response, 'form', 'email', 'This field is required.')
self.assertFormError(response, 'form', 'single', 'This field is required.')
self.assertFormError(response, 'form', 'multi', 'This field is required.')
def test_form_error_with_template(self):
"POST erroneous data to a form using multiple templates"
post_data = {
'text': 'Hello World',
'email': 'not an email address',
'value': 37,
'single': 'b',
'multi': ('b', 'c', 'e')
}
response = self.client.post('/form_view_with_template/', post_data)
self.assertContains(response, 'POST data has errors')
self.assertTemplateUsed(response, "form_view.html")
self.assertTemplateUsed(response, 'base.html')
self.assertTemplateNotUsed(response, "Invalid POST Template")
self.assertFormError(response, 'form', 'email', 'Enter a valid email address.')
def test_unknown_page(self):
"GET an invalid URL"
response = self.client.get('/unknown_view/')
# Check that the response was a 404
self.assertEqual(response.status_code, 404)
def test_url_parameters(self):
"Make sure that URL ;-parameters are not stripped."
response = self.client.get('/unknown_view/;some-parameter')
# Check that the path in the response includes it (ignore that it's a 404)
self.assertEqual(response.request['PATH_INFO'], '/unknown_view/;some-parameter')
def test_view_with_login(self):
"Request a page that is protected with @login_required"
# Get the page without logging in. Should result in 302.
response = self.client.get('/login_protected_view/')
self.assertRedirects(response, '/accounts/login/?next=/login_protected_view/')
# Log in
login = self.client.login(username='testclient', password='password')
self.assertTrue(login, 'Could not log in')
# Request a page that requires a login
response = self.client.get('/login_protected_view/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['user'].username, 'testclient')
@override_settings(
INSTALLED_APPS=['django.contrib.auth'],
SESSION_ENGINE='django.contrib.sessions.backends.file',
)
def test_view_with_login_when_sessions_app_is_not_installed(self):
self.test_view_with_login()
def test_view_with_force_login(self):
"Request a page that is protected with @login_required"
# Get the page without logging in. Should result in 302.
response = self.client.get('/login_protected_view/')
self.assertRedirects(response, '/accounts/login/?next=/login_protected_view/')
# Log in
self.client.force_login(self.u1)
# Request a page that requires a login
response = self.client.get('/login_protected_view/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['user'].username, 'testclient')
def test_view_with_method_login(self):
"Request a page that is protected with a @login_required method"
# Get the page without logging in. Should result in 302.
response = self.client.get('/login_protected_method_view/')
self.assertRedirects(response, '/accounts/login/?next=/login_protected_method_view/')
# Log in
login = self.client.login(username='testclient', password='password')
self.assertTrue(login, 'Could not log in')
# Request a page that requires a login
response = self.client.get('/login_protected_method_view/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['user'].username, 'testclient')
def test_view_with_method_force_login(self):
"Request a page that is protected with a @login_required method"
# Get the page without logging in. Should result in 302.
response = self.client.get('/login_protected_method_view/')
self.assertRedirects(response, '/accounts/login/?next=/login_protected_method_view/')
# Log in
self.client.force_login(self.u1)
# Request a page that requires a login
response = self.client.get('/login_protected_method_view/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['user'].username, 'testclient')
def test_view_with_login_and_custom_redirect(self):
"Request a page that is protected with @login_required(redirect_field_name='redirect_to')"
# Get the page without logging in. Should result in 302.
response = self.client.get('/login_protected_view_custom_redirect/')
self.assertRedirects(response, '/accounts/login/?redirect_to=/login_protected_view_custom_redirect/')
# Log in
login = self.client.login(username='testclient', password='password')
self.assertTrue(login, 'Could not log in')
# Request a page that requires a login
response = self.client.get('/login_protected_view_custom_redirect/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['user'].username, 'testclient')
def test_view_with_force_login_and_custom_redirect(self):
"""
Request a page that is protected with
@login_required(redirect_field_name='redirect_to')
"""
# Get the page without logging in. Should result in 302.
response = self.client.get('/login_protected_view_custom_redirect/')
self.assertRedirects(response, '/accounts/login/?redirect_to=/login_protected_view_custom_redirect/')
# Log in
self.client.force_login(self.u1)
# Request a page that requires a login
response = self.client.get('/login_protected_view_custom_redirect/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['user'].username, 'testclient')
def test_view_with_bad_login(self):
"Request a page that is protected with @login, but use bad credentials"
login = self.client.login(username='otheruser', password='nopassword')
self.assertFalse(login)
def test_view_with_inactive_login(self):
"""
An inactive user may login if the authenticate backend allows it.
"""
credentials = {'username': 'inactive', 'password': 'password'}
self.assertFalse(self.client.login(**credentials))
with self.settings(AUTHENTICATION_BACKENDS=['django.contrib.auth.backends.AllowAllUsersModelBackend']):
self.assertTrue(self.client.login(**credentials))
@override_settings(
AUTHENTICATION_BACKENDS=[
'django.contrib.auth.backends.ModelBackend',
'django.contrib.auth.backends.AllowAllUsersModelBackend',
]
)
def test_view_with_inactive_force_login(self):
"Request a page that is protected with @login, but use an inactive login"
# Get the page without logging in. Should result in 302.
response = self.client.get('/login_protected_view/')
self.assertRedirects(response, '/accounts/login/?next=/login_protected_view/')
# Log in
self.client.force_login(self.u2, backend='django.contrib.auth.backends.AllowAllUsersModelBackend')
# Request a page that requires a login
response = self.client.get('/login_protected_view/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['user'].username, 'inactive')
def test_logout(self):
"Request a logout after logging in"
# Log in
self.client.login(username='testclient', password='password')
# Request a page that requires a login
response = self.client.get('/login_protected_view/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['user'].username, 'testclient')
# Log out
self.client.logout()
# Request a page that requires a login
response = self.client.get('/login_protected_view/')
self.assertRedirects(response, '/accounts/login/?next=/login_protected_view/')
def test_logout_with_force_login(self):
"Request a logout after logging in"
# Log in
self.client.force_login(self.u1)
# Request a page that requires a login
response = self.client.get('/login_protected_view/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['user'].username, 'testclient')
# Log out
self.client.logout()
# Request a page that requires a login
response = self.client.get('/login_protected_view/')
self.assertRedirects(response, '/accounts/login/?next=/login_protected_view/')
@override_settings(
AUTHENTICATION_BACKENDS=[
'django.contrib.auth.backends.ModelBackend',
'test_client.auth_backends.TestClientBackend',
],
)
def test_force_login_with_backend(self):
"""
Request a page that is protected with @login_required when using
force_login() and passing a backend.
"""
# Get the page without logging in. Should result in 302.
response = self.client.get('/login_protected_view/')
self.assertRedirects(response, '/accounts/login/?next=/login_protected_view/')
# Log in
self.client.force_login(self.u1, backend='test_client.auth_backends.TestClientBackend')
self.assertEqual(self.u1.backend, 'test_client.auth_backends.TestClientBackend')
# Request a page that requires a login
response = self.client.get('/login_protected_view/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['user'].username, 'testclient')
@override_settings(
AUTHENTICATION_BACKENDS=[
'django.contrib.auth.backends.ModelBackend',
'test_client.auth_backends.TestClientBackend',
],
)
def test_force_login_without_backend(self):
"""
force_login() without passing a backend and with multiple backends
configured should automatically use the first backend.
"""
self.client.force_login(self.u1)
response = self.client.get('/login_protected_view/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['user'].username, 'testclient')
self.assertEqual(self.u1.backend, 'django.contrib.auth.backends.ModelBackend')
@override_settings(SESSION_ENGINE="django.contrib.sessions.backends.signed_cookies")
def test_logout_cookie_sessions(self):
self.test_logout()
def test_view_with_permissions(self):
"Request a page that is protected with @permission_required"
# Get the page without logging in. Should result in 302.
response = self.client.get('/permission_protected_view/')
self.assertRedirects(response, '/accounts/login/?next=/permission_protected_view/')
# Log in
login = self.client.login(username='testclient', password='password')
self.assertTrue(login, 'Could not log in')
# Log in with wrong permissions. Should result in 302.
response = self.client.get('/permission_protected_view/')
self.assertRedirects(response, '/accounts/login/?next=/permission_protected_view/')
# TODO: Log in with right permissions and request the page again
def test_view_with_permissions_exception(self):
"Request a page that is protected with @permission_required but raises an exception"
# Get the page without logging in. Should result in 403.
response = self.client.get('/permission_protected_view_exception/')
self.assertEqual(response.status_code, 403)
# Log in
login = self.client.login(username='testclient', password='password')
self.assertTrue(login, 'Could not log in')
# Log in with wrong permissions. Should result in 403.
response = self.client.get('/permission_protected_view_exception/')
self.assertEqual(response.status_code, 403)
def test_view_with_method_permissions(self):
"Request a page that is protected with a @permission_required method"
# Get the page without logging in. Should result in 302.
response = self.client.get('/permission_protected_method_view/')
self.assertRedirects(response, '/accounts/login/?next=/permission_protected_method_view/')
# Log in
login = self.client.login(username='testclient', password='password')
self.assertTrue(login, 'Could not log in')
# Log in with wrong permissions. Should result in 302.
response = self.client.get('/permission_protected_method_view/')
self.assertRedirects(response, '/accounts/login/?next=/permission_protected_method_view/')
# TODO: Log in with right permissions and request the page again
def test_external_redirect(self):
response = self.client.get('/django_project_redirect/')
self.assertRedirects(response, 'https://www.djangoproject.com/', fetch_redirect_response=False)
def test_external_redirect_with_fetch_error_msg(self):
"""
Check that assertRedirects without fetch_redirect_response=False raises
a relevant ValueError rather than a non-descript AssertionError.
"""
response = self.client.get('/django_project_redirect/')
msg = (
"The test client is unable to fetch remote URLs (got "
"https://www.djangoproject.com/). If the host is served by Django, "
"add 'www.djangoproject.com' to ALLOWED_HOSTS. "
"Otherwise, use assertRedirects(..., fetch_redirect_response=False)."
)
with self.assertRaisesMessage(ValueError, msg):
self.assertRedirects(response, 'https://www.djangoproject.com/')
def test_session_modifying_view(self):
"Request a page that modifies the session"
# Session value isn't set initially
with self.assertRaises(KeyError):
self.client.session['tobacconist']
self.client.post('/session_view/')
# Check that the session was modified
self.assertEqual(self.client.session['tobacconist'], 'hovercraft')
@override_settings(
INSTALLED_APPS=[],
SESSION_ENGINE='django.contrib.sessions.backends.file',
)
def test_sessions_app_is_not_installed(self):
self.test_session_modifying_view()
@override_settings(
INSTALLED_APPS=[],
SESSION_ENGINE='django.contrib.sessions.backends.nonexistent',
)
def test_session_engine_is_invalid(self):
with self.assertRaisesMessage(ImportError, 'nonexistent'):
self.test_session_modifying_view()
def test_view_with_exception(self):
"Request a page that is known to throw an error"
with self.assertRaises(KeyError):
self.client.get("/broken_view/")
def test_mail_sending(self):
"Test that mail is redirected to a dummy outbox during test setup"
response = self.client.get('/mail_sending_view/')
self.assertEqual(response.status_code, 200)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].subject, 'Test message')
self.assertEqual(mail.outbox[0].body, 'This is a test email')
self.assertEqual(mail.outbox[0].from_email, 'from@example.com')
self.assertEqual(mail.outbox[0].to[0], 'first@example.com')
self.assertEqual(mail.outbox[0].to[1], 'second@example.com')
def test_reverse_lazy_decodes(self):
"Ensure reverse_lazy works in the test client"
data = {'var': 'data'}
response = self.client.get(reverse_lazy('get_view'), data)
# Check some response details
self.assertContains(response, 'This is a test')
def test_relative_redirect(self):
response = self.client.get('/accounts/')
self.assertRedirects(response, '/accounts/login/')
def test_relative_redirect_no_trailing_slash(self):
response = self.client.get('/accounts/no_trailing_slash')
self.assertRedirects(response, '/accounts/login/')
def test_mass_mail_sending(self):
"Test that mass mail is redirected to a dummy outbox during test setup"
response = self.client.get('/mass_mail_sending_view/')
self.assertEqual(response.status_code, 200)
self.assertEqual(len(mail.outbox), 2)
self.assertEqual(mail.outbox[0].subject, 'First Test message')
self.assertEqual(mail.outbox[0].body, 'This is the first test email')
self.assertEqual(mail.outbox[0].from_email, 'from@example.com')
self.assertEqual(mail.outbox[0].to[0], 'first@example.com')
self.assertEqual(mail.outbox[0].to[1], 'second@example.com')
self.assertEqual(mail.outbox[1].subject, 'Second Test message')
self.assertEqual(mail.outbox[1].body, 'This is the second test email')
self.assertEqual(mail.outbox[1].from_email, 'from@example.com')
self.assertEqual(mail.outbox[1].to[0], 'second@example.com')
self.assertEqual(mail.outbox[1].to[1], 'third@example.com')
def test_exception_following_nested_client_request(self):
"""
A nested test client request shouldn't clobber exception signals from
the outer client request.
"""
with self.assertRaisesMessage(Exception, 'exception message'):
self.client.get('/nesting_exception_view/')
def test_uploading_temp_file(self):
test_file = tempfile.TemporaryFile()
response = self.client.post('/upload_view/', data={'temp_file': test_file})
self.assertEqual(response.content, b'temp_file')
def test_uploading_named_temp_file(self):
test_file = tempfile.NamedTemporaryFile()
response = self.client.post('/upload_view/', data={'named_temp_file': test_file})
self.assertEqual(response.content, b'named_temp_file')
@override_settings(
MIDDLEWARE=['django.middleware.csrf.CsrfViewMiddleware'],
ROOT_URLCONF='test_client.urls',
)
class CSRFEnabledClientTests(SimpleTestCase):
def test_csrf_enabled_client(self):
"A client can be instantiated with CSRF checks enabled"
csrf_client = Client(enforce_csrf_checks=True)
# The normal client allows the post
response = self.client.post('/post_view/', {})
self.assertEqual(response.status_code, 200)
# The CSRF-enabled client rejects it
response = csrf_client.post('/post_view/', {})
self.assertEqual(response.status_code, 403)
class CustomTestClient(Client):
i_am_customized = "Yes"
class CustomTestClientTest(SimpleTestCase):
client_class = CustomTestClient
def test_custom_test_client(self):
"""A test case can specify a custom class for self.client."""
self.assertIs(hasattr(self.client, "i_am_customized"), True)
def _generic_view(request):
return HttpResponse(status=200)
@override_settings(ROOT_URLCONF='test_client.urls')
class RequestFactoryTest(SimpleTestCase):
"""Tests for the request factory."""
# A mapping between names of HTTP/1.1 methods and their test views.
http_methods_and_views = (
('get', get_view),
('post', post_view),
('put', _generic_view),
('patch', _generic_view),
('delete', _generic_view),
('head', _generic_view),
('options', _generic_view),
('trace', trace_view),
)
def setUp(self):
self.request_factory = RequestFactory()
def test_request_factory(self):
"""The request factory implements all the HTTP/1.1 methods."""
for method_name, view in self.http_methods_and_views:
method = getattr(self.request_factory, method_name)
request = method('/somewhere/')
response = view(request)
self.assertEqual(response.status_code, 200)
def test_get_request_from_factory(self):
"""
The request factory returns a templated response for a GET request.
"""
request = self.request_factory.get('/somewhere/')
response = get_view(request)
self.assertContains(response, 'This is a test')
def test_trace_request_from_factory(self):
"""The request factory returns an echo response for a TRACE request."""
url_path = '/somewhere/'
request = self.request_factory.trace(url_path)
response = trace_view(request)
protocol = request.META["SERVER_PROTOCOL"]
echoed_request_line = "TRACE {} {}".format(url_path, protocol)
self.assertContains(response, echoed_request_line)
|
|
#!/usr/bin/env python3
# tertiary Helper
# Unless absolutely necessary, do not use self.controller.send(...)
# Implement the method in micron.py and call that instead
# Abstraction yo
# Advanced level functions combining multiple basic functions are to be implemented here
# Methods involving multiple functions in this script should generally be implemented in a separate script and not implemented here, unless it is a very common function
# stage-control to interact with the NanoLab Microcontroller
# Microcontroller Model: Micos 1860SMC Basic
# Made 2019, Sun Yudong, Wu Mingsong
# sunyudong [at] outlook [dot] sg, mingonsgwu [at] outlook [dot] sg
# github.com/sunjerry019/photonLauncher
# Change code here if for e.g. sounds needs to be played BEFORE the completion of the raster
import micron
import playsound
import numpy as np
import math
import time
import datetime
import threading
from extraFunctions import ThreadWithExc
import jukebox
class InputError(Exception):
# Error in user input -> To be caught and flagged accordingly
pass
class StageControl():
def __init__(self, noinvertx = 1, noinverty = 1, GUI_Object = None, jukeboxKWArgs = {}, noFinishTone = True, **kwargs):
# noinvertx can take values 1 and -1
assert noinvertx in (-1, 1), "No invertx can only take -1 or 1"
assert noinverty in (-1, 1), "No inverty can only take -1 or 1"
self.controller = micron.Micos(GUI_Object = GUI_Object, **kwargs)
self.GUI_Object = GUI_Object
self.noinvertx = noinvertx
self.noinverty = noinverty
# Generate filename based on the serial number of the model
self.serial = self.controller.getSerial()
self.noFinishTone = noFinishTone
# define contants
self.UP, self.RIGHT, self.DOWN, self.LEFT = 0, 1, 2, 3
# music thread
self.musicProcess = None
# jukeboxKWArgs.update({
# "profile": "alarm"
# })
self.jukebox = jukebox.JukeBox(**jukeboxKWArgs) # playmusic = True,
def finishTone(self):
# Play sound to let user know that the action is completed
# To stop, call self.musicProcess.terminate()
self.musicProcess = ThreadWithExc(target = self.jukebox.playmusic, kwargs = { "quiet": self.noFinishTone })
self.musicProcess.start()
if self.GUI_Object:
self.GUI_Object.operationDone.emit()
# ARCHIVE CODE
# , jukeboxKWArgs = {}
# target=self.jukeboxThread, kwargs=jukeboxKWArgs, args=(,)
# def jukeboxThread(self, **jukeboxKWArgs):
# return
# implement cardinal direction movement definitions, the input cases arent actually necessary once we have buttons paired to commands on guimicro
def rcardinal(self, direction, distance):
if (direction == self.LEFT):
return self.controller.rmove(x = distance * self.noinvertx, y = 0)
elif (direction == self.RIGHT):
return self.controller.rmove(x = -distance * self.noinvertx, y = 0)
elif (direction == self.UP):
return self.controller.rmove(y = distance * self.noinverty, x = 0)
elif (direction == self.DOWN):
return self.controller.rmove(y = -distance * self.noinverty, x = 0)
else:
return False
def rdiagonal(self, distance):
# implement drawing of diagonals
# implement button for relative move directly
distance /= 1.414213562373095
self.controller.rmove(x = distance * self.invertx, y = distance * self.inverty)
# most basic, single rectangle cut rastering
def singleraster(self, velocity, xDist, yDist, rasterSettings, returnToOrigin = False, estimateTime = True, onlyEstimate = False, quietLog = False, verboseLog = False):
# Raster in a rectangle
# rasterSettings = {
# "direction": "x" || "y" || "xy", # Order matters here xy vs yx
# "step": 1 # If set to xy, step is not necessary
# }
# setting onlyEstimate will return the estimated time for the action
# xy/yx = Draw a rectangle with sides xDist and yDist
# x = horizontal lines will be drawn while scanning down/up
# y = vertical lines will be drawn while scanning right/left
# i.e. to say axis = continuous on which axis
# Negative distance to raster in the opposite direction
# Step must be positive
# We check if everything is valid
try:
assert isinstance(velocity, (int, float)) , "Velocity must be integer or float"
assert isinstance(xDist, (int, float)) , "xDist must be integer or float"
assert isinstance(yDist, (int, float)) , "yDist must be integer or float"
assert isinstance(rasterSettings, dict) , "rasterSettings must be a dictionary"
assert "direction" in rasterSettings , "Raster direction must be in rasterSettings"
assert isinstance(rasterSettings["direction"], str), "Invalid raster direction: {}".format(rasterSettings["direction"])
# rastering x or y
if len(rasterSettings["direction"]) == 1:
assert rasterSettings["direction"] in self.controller.axes, "Invalid raster direction: {}".format(rasterSettings["direction"])
assert "step" in rasterSettings , "Raster step must be in rasterSettings"
assert rasterSettings["step"] > 0 , "Step size must be positive"
else:
assert len(rasterSettings["direction"]) == 2 and (set(rasterSettings["direction"]) == set(self.controller.axes)), "Invalid raster direction {}".format(rasterSettings["direction"])
# Check stage limits
assert self.controller.stage.xlim[0] <= self.controller.stage.x + xDist <= self.controller.stage.xlim[1], "x not within limits"
assert self.controller.stage.ylim[0] <= self.controller.stage.y + yDist <= self.controller.stage.ylim[1], "y not within limits"
except AssertionError as e:
raise InputError(e)
if onlyEstimate:
estimateTime = True
# Set shutter to not output logs
# To ensure the timing is displayed
self.controller.shutter.quietLog = True
# ACTUAL FUNCTION
self.controller.setvel(velocity)
# Get the current position of the stage
oX, oY = self.controller.stage.x, self.controller.stage.y
# Get index of the first direction to raster
# We change to axes A and B because it could be xy or yx
a = self.controller.axes.index(rasterSettings["direction"][0])
b = a ^ 1
distances = [xDist, yDist]
# Check the raster step
if len(rasterSettings["direction"]) > 1:
# Rastering a square
if estimateTime:
_totalTime = 2 * micron.Micos.getDeltaTime(distances[a], 0, velocity) + \
2 * micron.Micos.getDeltaTime(distances[b], 0, velocity)
# We always return to origin, so need not calculate
if onlyEstimate:
return _totalTime
_doneTime = datetime.datetime.now() + datetime.timedelta(seconds = _totalTime)
if not quietLog:
self.logconsole("Total Time = {} Est Done = {}".format(_totalTime, _doneTime.strftime('%Y-%m-%d %H:%M:%S')))
# Relative moves are blocking, so we can flood the FIFO stack after we are sure all commands have been cleared
self.controller.waitClear()
self.controller.shutter.open()
self.controller.rmove(**{
self.controller.axes[a]: distances[a],
self.controller.axes[b]: 0
})
self.controller.rmove(**{
self.controller.axes[a]: 0,
self.controller.axes[b]: distances[b]
})
self.controller.rmove(**{
self.controller.axes[a]: -distances[a],
self.controller.axes[b]: 0
})
self.controller.rmove(**{
self.controller.axes[a]: 0,
self.controller.axes[b]: -distances[b]
})
self.controller.waitClear()
self.controller.shutter.close()
else:
# Normal rastering
# Since python range doesn't allow for float step sizes, we find the number of times to go raster a line
# DO NOTE THAT THIS PROBABLY WILL CAUSE ROUNDING ERRORS
# Floats are ROUNDED DOWN!
_lines = math.floor(abs(distances[b] / rasterSettings["step"]))
if estimateTime:
# It doesnt matter if its x or y
_bDirTime = micron.Micos.getDeltaTime(rasterSettings["step"], 0, velocity)
_timeperline = micron.Micos.getDeltaTime(distances[a], 0, velocity) + _bDirTime
_totalTime = _lines * _timeperline - _bDirTime
_totalTime += micron.Micos.getDeltaTime(0, 0, 100, shutterCycles = 2, shutterAbsoluteMode = self.controller.shutter.absoluteMode)
if returnToOrigin:
# If even, we end up at the top right of the box // _q = 0
# If odd, we end up at the bottom right of the box // _q = 1
_q = _lines % 2
_totalTime += micron.Micos.getDeltaTime(distances[a] if _q else 0, _lines * rasterSettings["step"] , 1000)
if onlyEstimate:
return _totalTime
_deltaTime = datetime.timedelta(seconds = _totalTime)
_doneTime = datetime.datetime.now() + _deltaTime
# "Time/line =", _timeperline,
if not quietLog:
self.logconsole("Total Time = {} Lines = {} Est Done = {}".format(_deltaTime, _lines, _doneTime.strftime('%Y-%m-%d %H:%M:%S')))
_step = -rasterSettings["step"] if distances[b] < 0 else rasterSettings["step"]
self.controller.shutter.open()
t0 = datetime.datetime.now()
for i in range(_lines):
print("Rastering line ", i)
# If its not the first one, move B-Axis
if i:
self.controller.rmove(**{
self.controller.axes[a]: 0,
self.controller.axes[b]: _step
})
_q = i % 2 # switch directions for rastering every time
self.controller.rmove(**{
# First one moves right
self.controller.axes[a]: distances[a] if not _q else -distances[a],
self.controller.axes[b]: 0
})
# self.controller.waitClear()
# time.sleep(_sleepTime if not i else _sleepTime - _bDirTime)
# MOVED SLEEP TO RMOVE
t1 = datetime.datetime.now()
self.controller.waitClear()
t2 = datetime.datetime.now()
if verboseLog:
self.logconsole("\nTimes = {}, {}".format(t1 - t0, t2 - t0))
print("\nSTATUS = ",self.controller.getStatus(),"\n")
self.controller.shutter.close()
if returnToOrigin:
self.controller.shutter.close()
# we /could/ use self.controller.move() but I don't really trust it
# so...relative move
cX, cY = self.controller.stage.x, self.controller.stage.y
self.controller.setvel(1000)
self.controller.rmove(x = oX - cX, y = oY - cY)
self.controller.setvel(velocity)
self.controller.shutter.quietLog = False
if not quietLog:
self.finishTone()
# overpowered, omni-potent rastering solution for both laser power and velocity
def arrayraster(self, inivel, inipower, x_isVel, ncols, xincrement, xGap, y_isVel, nrows, yincrement, yGap, xDist, yDist, rasterSettings, returnToOrigin = True):
# building parameter mother-of-all-lists (MOAL) to parse through when cutting every individual raster. Raster array will be numbered left to right top to bottom
# info structure: <primary list> <raster1> (initial position tuple1), [velocity, power]</raster1> <raster2> (initial position tuple2), [velocity, power]</raster2> .... </primary list>
# NOTE: This function can CLEARLY be optimized
# Struct [
# [(Init Pos), [velocity, power]], ...
# ]
# Set shutter to not output logs
# To ensure the timing is displayed
self.controller.shutter.quietLog = True
xone = self.controller.stage.x
yone = self.controller.stage.y
moal = []
for i in range(nrows):
for j in range(ncols):
nthsquare = []
axe = xone + j * (xDist + xGap)
why = yone + i * (yDist + yGap)
# gui combobox setting: velocity is True, power is False
if x_isVel and y_isVel:
speed = (inivel + i * yincrement) + xincrement * j
powa = inipower
elif x_isVel and not y_isVel:
speed = inivel + xincrement * j
powa = inipower + yincrement * i
elif not x_isVel and not y_isVel:
speed = inivel
powa = (inivel + i * yincrement) + xincrement * j
elif not x_isVel and y_isVel:
speed = inivel + yincrement * i
powa = inipower + xincrement * j
startpos = (axe , why)
speedpowa = [speed, powa]
nthsquare.append(startpos)
nthsquare.append(speedpowa)
moal.append(nthsquare)
print(moal)
#TODO! have a countdown for all rastering and run timer in separate thread
# Estimate time
totaltime = 0
if len(moal) > 50:
# Sorry, algo is trash
estTimeString = "No time estimate, too many squares"
else:
firstsq = True
for n, square in enumerate(moal):
subtotaltime = 0 if firstsq else micron.Micos.getDeltaTime(x = xGap + xDist, y = 0, velocity = 500)
firstsq = False
if not (n + 1) % ncols:
micron.Micos.getDeltaTime(x = -ncols * (xGap + xDist), y = yDist + yGap, velocity = 500)
rasvelocity = square[1][0]
subtotaltime += self.singleraster(velocity = square[1][0], xDist = xDist, yDist = yDist, rasterSettings = rasterSettings, returnToOrigin = True, onlyEstimate = True)
totaltime += subtotaltime
_deltaTime = datetime.timedelta(seconds = totaltime)
doneTime = datetime.datetime.now() + _deltaTime
estTimeString = "Est time = {} Est Done = {}".format(_deltaTime, doneTime.strftime('%Y-%m-%d %H:%M:%S'))
self.logconsole(estTimeString)
oX, oY = self.controller.stage.x, self.controller.stage.y
# actual rastering
for i in range(nrows):
for u in range(ncols):
self.controller.powerServo.powerstep(moal[u + i*ncols][1][1])
self.singleraster(velocity = moal[u + i*ncols][1][0], xDist = xDist, yDist = yDist, rasterSettings = rasterSettings, returnToOrigin = True, estimateTime = False, quietLog = True)
self.controller.setvel(500)
self.controller.rmove(x = xGap + xDist, y = 0)
self.logconsole(estTimeString + ' ({}, {}) raster completed! :D'.format(i, u))
self.controller.rmove(x = -ncols * (xGap + xDist), y = yDist + yGap)
if returnToOrigin:
self.controller.shutter.close()
# we /could/ use self.controller.move() but I don't really trust it
# so...relative move
cX, cY = self.controller.stage.x, self.controller.stage.y
oV = self.controller.velocity
self.controller.setvel(1000)
self.controller.rmove(x = oX - cX, y = oY - cY)
self.controller.setvel(oV)
# We set the velocity to the last velocity
if self.GUI_Object is not None:
self.GUI_Object.setOperationStatus("Raster Finished. Have a g8 day. Ready.")
else:
print('raster completed, have a gr8 day. Self-destruct sequence initiated (T-10).')
self.controller.shutter.quietLog = False
self.finishTone()
def logconsole(self, msg, printToTerm = True, **printArgs):
if self.GUI_Object is not None:
self.GUI_Object.setOperationStatus(msg, printToTerm, **printArgs)
else:
print(msg, **printArgs)
def __enter__(self):
return self
def __exit__(self, e_type, e_val, traceback):
self.controller.dev.close()
# for WMS's benefit: this allows for interactive interface for this script alone
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-u', '--unit', type = str, help = "Unit, microstep, um, mm, cm, m, in, mil")
parser.add_argument('-c', '--noCtrlCHandler', help="No Ctrl C Handler", action='store_true')
parser.add_argument('-H', '--noHome', help="noHome", action='store_true')
args = parser.parse_args()
with StageControl(noCtrlCHandler = args.noCtrlCHandler, unit = args.unit, noHome = args.noHome) as s:
print("\n\ns = StageControl(); s.controller for controller movements\n\n")
# import pdb; pdb.set_trace()
import code; code.interact(local=locals())
|
|
# coding: utf-8
"""
Swaggy Jenkins
Jenkins API clients generated from Swagger / Open API specification # noqa: E501
The version of the OpenAPI document: 1.1.2-pre.0
Contact: blah@cliffano.com
Generated by: https://openapi-generator.tech
"""
import logging
import ssl
from urllib.parse import urlencode
import typing
import certifi
import urllib3
from urllib3._collections import HTTPHeaderDict
from openapi_client.exceptions import ApiException, ApiValueError
logger = logging.getLogger(__name__)
class RESTClientObject(object):
def __init__(self, configuration, pools_size=4, maxsize=None):
# urllib3.PoolManager will pass all kw parameters to connectionpool
# https://github.com/shazow/urllib3/blob/f9409436f83aeb79fbaf090181cd81b784f1b8ce/urllib3/poolmanager.py#L75 # noqa: E501
# https://github.com/shazow/urllib3/blob/f9409436f83aeb79fbaf090181cd81b784f1b8ce/urllib3/connectionpool.py#L680 # noqa: E501
# maxsize is the number of requests to host that are allowed in parallel # noqa: E501
# Custom SSL certificates and client certificates: http://urllib3.readthedocs.io/en/latest/advanced-usage.html # noqa: E501
# cert_reqs
if configuration.verify_ssl:
cert_reqs = ssl.CERT_REQUIRED
else:
cert_reqs = ssl.CERT_NONE
# ca_certs
if configuration.ssl_ca_cert:
ca_certs = configuration.ssl_ca_cert
else:
# if not set certificate file, use Mozilla's root certificates.
ca_certs = certifi.where()
addition_pool_args = {}
if configuration.assert_hostname is not None:
addition_pool_args['assert_hostname'] = configuration.assert_hostname # noqa: E501
if configuration.retries is not None:
addition_pool_args['retries'] = configuration.retries
if configuration.socket_options is not None:
addition_pool_args['socket_options'] = configuration.socket_options
if maxsize is None:
if configuration.connection_pool_maxsize is not None:
maxsize = configuration.connection_pool_maxsize
else:
maxsize = 4
# https pool manager
if configuration.proxy:
self.pool_manager = urllib3.ProxyManager(
num_pools=pools_size,
maxsize=maxsize,
cert_reqs=cert_reqs,
ca_certs=ca_certs,
cert_file=configuration.cert_file,
key_file=configuration.key_file,
proxy_url=configuration.proxy,
proxy_headers=configuration.proxy_headers,
**addition_pool_args
)
else:
self.pool_manager = urllib3.PoolManager(
num_pools=pools_size,
maxsize=maxsize,
cert_reqs=cert_reqs,
ca_certs=ca_certs,
cert_file=configuration.cert_file,
key_file=configuration.key_file,
**addition_pool_args
)
def request(
self,
method: str,
url: str,
query_params: typing.Optional[typing.Tuple[typing.Tuple[str, str], ...]] = None,
headers: typing.Optional[HTTPHeaderDict] = None,
fields: typing.Optional[typing.Tuple[typing.Tuple[str, typing.Any], ...]] = None,
body: typing.Optional[typing.Union[str, bytes]] = None,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
) -> urllib3.HTTPResponse:
"""Perform requests.
:param method: http request method
:param url: http request url
:param query_params: query parameters in the url
:param headers: http request headers
:param body: request body, for other types
:param fields: request parameters for
`application/x-www-form-urlencoded`
or `multipart/form-data`
:param stream: if True, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is False.
:param timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
"""
method = method.upper()
assert method in ['GET', 'HEAD', 'DELETE', 'POST', 'PUT',
'PATCH', 'OPTIONS']
if fields and body:
raise ApiValueError(
"body parameter cannot be used with fields parameter."
)
fields = fields or {}
headers = headers or {}
if timeout:
if isinstance(timeout, (int, float)): # noqa: E501,F821
timeout = urllib3.Timeout(total=timeout)
elif (isinstance(timeout, tuple) and
len(timeout) == 2):
timeout = urllib3.Timeout(connect=timeout[0], read=timeout[1])
if 'Content-Type' not in headers:
headers['Content-Type'] = 'application/json'
try:
# For `POST`, `PUT`, `PATCH`, `OPTIONS`, `DELETE`
if method in ['POST', 'PUT', 'PATCH', 'OPTIONS', 'DELETE']:
if query_params:
url += '?' + urlencode(query_params)
if headers['Content-Type'] == 'application/x-www-form-urlencoded': # noqa: E501
r = self.pool_manager.request(
method, url,
fields=fields,
encode_multipart=False,
preload_content=not stream,
timeout=timeout,
headers=headers)
elif headers['Content-Type'] == 'multipart/form-data':
# must del headers['Content-Type'], or the correct
# Content-Type which generated by urllib3 will be
# overwritten.
del headers['Content-Type']
r = self.pool_manager.request(
method, url,
fields=fields,
encode_multipart=True,
preload_content=not stream,
timeout=timeout,
headers=headers)
# Pass a `string` parameter directly in the body to support
# other content types than Json when `body` argument is
# provided in serialized form
elif isinstance(body, str) or isinstance(body, bytes):
request_body = body
r = self.pool_manager.request(
method, url,
body=request_body,
preload_content=not stream,
timeout=timeout,
headers=headers)
else:
# Cannot generate the request from given parameters
msg = """Cannot prepare a request message for provided
arguments. Please check that your arguments match
declared content type."""
raise ApiException(status=0, reason=msg)
# For `GET`, `HEAD`
else:
r = self.pool_manager.request(method, url,
fields=query_params,
preload_content=not stream,
timeout=timeout,
headers=headers)
except urllib3.exceptions.SSLError as e:
msg = "{0}\n{1}".format(type(e).__name__, str(e))
raise ApiException(status=0, reason=msg)
if not stream:
# log response body
logger.debug("response body: %s", r.data)
return r
def GET(self, url, headers=None, query_params=None, stream=False,
timeout=None, fields=None) -> urllib3.HTTPResponse:
return self.request("GET", url,
headers=headers,
stream=stream,
timeout=timeout,
query_params=query_params, fields=fields)
def HEAD(self, url, headers=None, query_params=None, stream=False,
timeout=None, fields=None) -> urllib3.HTTPResponse:
return self.request("HEAD", url,
headers=headers,
stream=stream,
timeout=timeout,
query_params=query_params, fields=fields)
def OPTIONS(self, url, headers=None, query_params=None,
body=None, stream=False, timeout=None, fields=None) -> urllib3.HTTPResponse:
return self.request("OPTIONS", url,
headers=headers,
query_params=query_params,
stream=stream,
timeout=timeout,
body=body, fields=fields)
def DELETE(self, url, headers=None, query_params=None, body=None,
stream=False, timeout=None, fields=None) -> urllib3.HTTPResponse:
return self.request("DELETE", url,
headers=headers,
query_params=query_params,
stream=stream,
timeout=timeout,
body=body, fields=fields)
def POST(self, url, headers=None, query_params=None,
body=None, stream=False, timeout=None, fields=None) -> urllib3.HTTPResponse:
return self.request("POST", url,
headers=headers,
query_params=query_params,
stream=stream,
timeout=timeout,
body=body, fields=fields)
def PUT(self, url, headers=None, query_params=None,
body=None, stream=False, timeout=None, fields=None) -> urllib3.HTTPResponse:
return self.request("PUT", url,
headers=headers,
query_params=query_params,
stream=stream,
timeout=timeout,
body=body, fields=fields)
def PATCH(self, url, headers=None, query_params=None,
body=None, stream=False, timeout=None, fields=None) -> urllib3.HTTPResponse:
return self.request("PATCH", url,
headers=headers,
query_params=query_params,
stream=stream,
timeout=timeout,
body=body, fields=fields)
|
|
"""
homeassistant.components.recorder
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Component that records all events and state changes. Allows other components
to query this database.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/recorder/
"""
from contextlib import closing
import json
import logging
import queue
import sqlite3
import threading
from datetime import date, datetime
import homeassistant.util.dt as dt_util
from homeassistant.const import (
EVENT_HOMEASSISTANT_START, EVENT_HOMEASSISTANT_STOP, EVENT_STATE_CHANGED,
EVENT_TIME_CHANGED, MATCH_ALL)
from homeassistant.core import Event, EventOrigin, State
from homeassistant.remote import JSONEncoder
DOMAIN = "recorder"
DB_FILE = 'home-assistant.db'
RETURN_ROWCOUNT = "rowcount"
RETURN_LASTROWID = "lastrowid"
RETURN_ONE_ROW = "one_row"
_INSTANCE = None
_LOGGER = logging.getLogger(__name__)
def query(sql_query, arguments=None):
""" Query the database. """
_verify_instance()
return _INSTANCE.query(sql_query, arguments)
def query_states(state_query, arguments=None):
""" Query the database and return a list of states. """
return [
row for row in
(row_to_state(row) for row in query(state_query, arguments))
if row is not None]
def query_events(event_query, arguments=None):
""" Query the database and return a list of states. """
return [
row for row in
(row_to_event(row) for row in query(event_query, arguments))
if row is not None]
def row_to_state(row):
""" Convert a database row to a state. """
try:
return State(
row[1], row[2], json.loads(row[3]),
dt_util.utc_from_timestamp(row[4]),
dt_util.utc_from_timestamp(row[5]))
except ValueError:
# When json.loads fails
_LOGGER.exception("Error converting row to state: %s", row)
return None
def row_to_event(row):
""" Convert a databse row to an event. """
try:
return Event(row[1], json.loads(row[2]), EventOrigin(row[3]),
dt_util.utc_from_timestamp(row[5]))
except ValueError:
# When json.loads fails
_LOGGER.exception("Error converting row to event: %s", row)
return None
def run_information(point_in_time=None):
"""
Returns information about current run or the run that covers point_in_time.
"""
_verify_instance()
if point_in_time is None or point_in_time > _INSTANCE.recording_start:
return RecorderRun()
run = _INSTANCE.query(
"SELECT * FROM recorder_runs WHERE start<? AND END>?",
(point_in_time, point_in_time), return_value=RETURN_ONE_ROW)
return RecorderRun(run) if run else None
def setup(hass, config):
""" Setup the recorder. """
# pylint: disable=global-statement
global _INSTANCE
_INSTANCE = Recorder(hass)
return True
class RecorderRun(object):
""" Represents a recorder run. """
def __init__(self, row=None):
self.end = None
if row is None:
self.start = _INSTANCE.recording_start
self.closed_incorrect = False
else:
self.start = dt_util.utc_from_timestamp(row[1])
if row[2] is not None:
self.end = dt_util.utc_from_timestamp(row[2])
self.closed_incorrect = bool(row[3])
def entity_ids(self, point_in_time=None):
"""
Return the entity ids that existed in this run.
Specify point_in_time if you want to know which existed at that point
in time inside the run.
"""
where = self.where_after_start_run
where_data = []
if point_in_time is not None or self.end is not None:
where += "AND created < ? "
where_data.append(point_in_time or self.end)
return [row[0] for row in query(
"SELECT entity_id FROM states WHERE {}"
"GROUP BY entity_id".format(where), where_data)]
@property
def where_after_start_run(self):
"""
Returns SQL WHERE clause to select rows created after the start of the
run.
"""
return "created >= {} ".format(_adapt_datetime(self.start))
@property
def where_limit_to_run(self):
""" Return a SQL WHERE clause to limit results to this run. """
where = self.where_after_start_run
if self.end is not None:
where += "AND created < {} ".format(_adapt_datetime(self.end))
return where
class Recorder(threading.Thread):
""" Threaded recorder class """
def __init__(self, hass):
threading.Thread.__init__(self)
self.hass = hass
self.conn = None
self.queue = queue.Queue()
self.quit_object = object()
self.lock = threading.Lock()
self.recording_start = dt_util.utcnow()
self.utc_offset = dt_util.now().utcoffset().total_seconds()
def start_recording(event):
""" Start recording. """
self.start()
hass.bus.listen_once(EVENT_HOMEASSISTANT_START, start_recording)
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, self.shutdown)
hass.bus.listen(MATCH_ALL, self.event_listener)
def run(self):
""" Start processing events to save. """
with self._make_connection():
self._setup_connection()
self._setup_run()
while True:
event = self.queue.get()
if event == self.quit_object:
self._close_run()
self.conn.close()
self.queue.task_done()
return
elif event.event_type == EVENT_TIME_CHANGED:
self.queue.task_done()
continue
event_id = self.record_event(event)
if event.event_type == EVENT_STATE_CHANGED:
self.record_state(
event.data['entity_id'], event.data.get('new_state'),
event_id)
self.queue.task_done()
def event_listener(self, event):
"""
Listens for new events on the EventBus and puts them in the process
queue.
"""
self.queue.put(event)
def shutdown(self, event):
""" Tells the recorder to shut down. """
self.queue.put(self.quit_object)
self.block_till_done()
def record_state(self, entity_id, state, event_id):
""" Save a state to the database. """
now = dt_util.utcnow()
# State got deleted
if state is None:
state_state = ''
state_domain = ''
state_attr = '{}'
last_changed = last_updated = now
else:
state_domain = state.domain
state_state = state.state
state_attr = json.dumps(dict(state.attributes))
last_changed = state.last_changed
last_updated = state.last_updated
info = (
entity_id, state_domain, state_state, state_attr,
last_changed, last_updated,
now, self.utc_offset, event_id)
self.query(
"""
INSERT INTO states (
entity_id, domain, state, attributes, last_changed, last_updated,
created, utc_offset, event_id)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)
""",
info)
def record_event(self, event):
""" Save an event to the database. """
info = (
event.event_type, json.dumps(event.data, cls=JSONEncoder),
str(event.origin), dt_util.utcnow(), event.time_fired,
self.utc_offset
)
return self.query(
"INSERT INTO events ("
"event_type, event_data, origin, created, time_fired, utc_offset"
") VALUES (?, ?, ?, ?, ?, ?)", info, RETURN_LASTROWID)
def query(self, sql_query, data=None, return_value=None):
""" Query the database. """
try:
with self.conn, self.lock:
_LOGGER.debug("Running query %s", sql_query)
cur = self.conn.cursor()
if data is not None:
cur.execute(sql_query, data)
else:
cur.execute(sql_query)
if return_value == RETURN_ROWCOUNT:
return cur.rowcount
elif return_value == RETURN_LASTROWID:
return cur.lastrowid
elif return_value == RETURN_ONE_ROW:
return cur.fetchone()
else:
return cur.fetchall()
except (sqlite3.IntegrityError, sqlite3.OperationalError,
sqlite3.ProgrammingError):
_LOGGER.exception(
"Error querying the database using: %s", sql_query)
return []
def block_till_done(self):
""" Blocks till all events processed. """
self.queue.join()
def _make_connection(self):
db_path = self.hass.config.path(DB_FILE)
conn = self.conn = sqlite3.connect(db_path, check_same_thread=False)
conn.row_factory = sqlite3.Row
return closing(conn)
def _setup_connection(self):
""" Ensure database is ready to fly. """
# Have datetime objects be saved as integers
sqlite3.register_adapter(date, _adapt_datetime)
sqlite3.register_adapter(datetime, _adapt_datetime)
# Validate we are on the correct schema or that we have to migrate
cur = self.conn.cursor()
def save_migration(migration_id):
""" Save and commit a migration to the database. """
cur.execute('INSERT INTO schema_version VALUES (?, ?)',
(migration_id, dt_util.utcnow()))
self.conn.commit()
_LOGGER.info("Database migrated to version %d", migration_id)
try:
cur.execute('SELECT max(migration_id) FROM schema_version;')
migration_id = cur.fetchone()[0] or 0
except sqlite3.OperationalError:
# The table does not exist
cur.execute('CREATE TABLE schema_version ('
'migration_id integer primary key, performed integer)')
migration_id = 0
if migration_id < 1:
cur.execute("""
CREATE TABLE recorder_runs (
run_id integer primary key,
start integer,
end integer,
closed_incorrect integer default 0,
created integer)
""")
cur.execute("""
CREATE TABLE events (
event_id integer primary key,
event_type text,
event_data text,
origin text,
created integer)
""")
cur.execute(
'CREATE INDEX events__event_type ON events(event_type)')
cur.execute("""
CREATE TABLE states (
state_id integer primary key,
entity_id text,
state text,
attributes text,
last_changed integer,
last_updated integer,
created integer)
""")
cur.execute('CREATE INDEX states__entity_id ON states(entity_id)')
save_migration(1)
if migration_id < 2:
cur.execute("""
ALTER TABLE events
ADD COLUMN time_fired integer
""")
cur.execute('UPDATE events SET time_fired=created')
save_migration(2)
if migration_id < 3:
utc_offset = self.utc_offset
cur.execute("""
ALTER TABLE recorder_runs
ADD COLUMN utc_offset integer
""")
cur.execute("""
ALTER TABLE events
ADD COLUMN utc_offset integer
""")
cur.execute("""
ALTER TABLE states
ADD COLUMN utc_offset integer
""")
cur.execute("UPDATE recorder_runs SET utc_offset=?", [utc_offset])
cur.execute("UPDATE events SET utc_offset=?", [utc_offset])
cur.execute("UPDATE states SET utc_offset=?", [utc_offset])
save_migration(3)
if migration_id < 4:
# We had a bug where we did not save utc offset for recorder runs
cur.execute(
"""UPDATE recorder_runs SET utc_offset=?
WHERE utc_offset IS NULL""", [self.utc_offset])
cur.execute("""
ALTER TABLE states
ADD COLUMN event_id integer
""")
save_migration(4)
if migration_id < 5:
# Add domain so that thermostat graphs look right
try:
cur.execute("""
ALTER TABLE states
ADD COLUMN domain text
""")
except sqlite3.OperationalError:
# We had a bug in this migration for a while on dev
# Without this, dev-users will have to throw away their db
pass
# TravisCI has Python compiled against an old version of SQLite3
# which misses the instr method.
self.conn.create_function(
"instr", 2,
lambda string, substring: string.find(substring) + 1)
# populate domain with defaults
cur.execute("""
UPDATE states
set domain=substr(entity_id, 0, instr(entity_id, '.'))
""")
# add indexes we are going to use a lot on selects
cur.execute("""
CREATE INDEX states__state_changes ON
states (last_changed, last_updated, entity_id)""")
cur.execute("""
CREATE INDEX states__significant_changes ON
states (domain, last_updated, entity_id)""")
save_migration(5)
def _setup_run(self):
""" Log the start of the current run. """
if self.query("""UPDATE recorder_runs SET end=?, closed_incorrect=1
WHERE end IS NULL""", (self.recording_start, ),
return_value=RETURN_ROWCOUNT):
_LOGGER.warning("Found unfinished sessions")
self.query(
"""INSERT INTO recorder_runs (start, created, utc_offset)
VALUES (?, ?, ?)""",
(self.recording_start, dt_util.utcnow(), self.utc_offset))
def _close_run(self):
""" Save end time for current run. """
self.query(
"UPDATE recorder_runs SET end=? WHERE start=?",
(dt_util.utcnow(), self.recording_start))
def _adapt_datetime(datetimestamp):
""" Turn a datetime into an integer for in the DB. """
dt = dt_util.as_utc(datetimestamp.replace(microsecond=0))
return (dt - datetime(1970, 1, 1, tzinfo=dt_util.UTC)).total_seconds()
def _verify_instance():
""" Throws error if recorder not initialized. """
if _INSTANCE is None:
raise RuntimeError("Recorder not initialized.")
|
|
import datetime
import numpy as np
import pytest
import pytz
import pandas as pd
from pandas import Timedelta, merge_asof, read_csv, to_datetime
from pandas.core.reshape.merge import MergeError
from pandas.util.testing import assert_frame_equal
class TestAsOfMerge:
def read_data(self, datapath, name, dedupe=False):
path = datapath("reshape", "merge", "data", name)
x = read_csv(path)
if dedupe:
x = x.drop_duplicates(["time", "ticker"], keep="last").reset_index(
drop=True
)
x.time = to_datetime(x.time)
return x
@pytest.fixture(autouse=True)
def setup_method(self, datapath):
self.trades = self.read_data(datapath, "trades.csv")
self.quotes = self.read_data(datapath, "quotes.csv", dedupe=True)
self.asof = self.read_data(datapath, "asof.csv")
self.tolerance = self.read_data(datapath, "tolerance.csv")
self.allow_exact_matches = self.read_data(datapath, "allow_exact_matches.csv")
self.allow_exact_matches_and_tolerance = self.read_data(
datapath, "allow_exact_matches_and_tolerance.csv"
)
def test_examples1(self):
""" doc-string examples """
left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]})
right = pd.DataFrame({"a": [1, 2, 3, 6, 7], "right_val": [1, 2, 3, 6, 7]})
expected = pd.DataFrame(
{"a": [1, 5, 10], "left_val": ["a", "b", "c"], "right_val": [1, 3, 7]}
)
result = pd.merge_asof(left, right, on="a")
assert_frame_equal(result, expected)
def test_examples2(self):
""" doc-string examples """
trades = pd.DataFrame(
{
"time": pd.to_datetime(
[
"20160525 13:30:00.023",
"20160525 13:30:00.038",
"20160525 13:30:00.048",
"20160525 13:30:00.048",
"20160525 13:30:00.048",
]
),
"ticker": ["MSFT", "MSFT", "GOOG", "GOOG", "AAPL"],
"price": [51.95, 51.95, 720.77, 720.92, 98.00],
"quantity": [75, 155, 100, 100, 100],
},
columns=["time", "ticker", "price", "quantity"],
)
quotes = pd.DataFrame(
{
"time": pd.to_datetime(
[
"20160525 13:30:00.023",
"20160525 13:30:00.023",
"20160525 13:30:00.030",
"20160525 13:30:00.041",
"20160525 13:30:00.048",
"20160525 13:30:00.049",
"20160525 13:30:00.072",
"20160525 13:30:00.075",
]
),
"ticker": [
"GOOG",
"MSFT",
"MSFT",
"MSFT",
"GOOG",
"AAPL",
"GOOG",
"MSFT",
],
"bid": [720.50, 51.95, 51.97, 51.99, 720.50, 97.99, 720.50, 52.01],
"ask": [720.93, 51.96, 51.98, 52.00, 720.93, 98.01, 720.88, 52.03],
},
columns=["time", "ticker", "bid", "ask"],
)
pd.merge_asof(trades, quotes, on="time", by="ticker")
pd.merge_asof(
trades, quotes, on="time", by="ticker", tolerance=pd.Timedelta("2ms")
)
expected = pd.DataFrame(
{
"time": pd.to_datetime(
[
"20160525 13:30:00.023",
"20160525 13:30:00.038",
"20160525 13:30:00.048",
"20160525 13:30:00.048",
"20160525 13:30:00.048",
]
),
"ticker": ["MSFT", "MSFT", "GOOG", "GOOG", "AAPL"],
"price": [51.95, 51.95, 720.77, 720.92, 98.00],
"quantity": [75, 155, 100, 100, 100],
"bid": [np.nan, 51.97, np.nan, np.nan, np.nan],
"ask": [np.nan, 51.98, np.nan, np.nan, np.nan],
},
columns=["time", "ticker", "price", "quantity", "bid", "ask"],
)
result = pd.merge_asof(
trades,
quotes,
on="time",
by="ticker",
tolerance=pd.Timedelta("10ms"),
allow_exact_matches=False,
)
assert_frame_equal(result, expected)
def test_examples3(self):
""" doc-string examples """
# GH14887
left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]})
right = pd.DataFrame({"a": [1, 2, 3, 6, 7], "right_val": [1, 2, 3, 6, 7]})
expected = pd.DataFrame(
{"a": [1, 5, 10], "left_val": ["a", "b", "c"], "right_val": [1, 6, np.nan]}
)
result = pd.merge_asof(left, right, on="a", direction="forward")
assert_frame_equal(result, expected)
def test_examples4(self):
""" doc-string examples """
# GH14887
left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]})
right = pd.DataFrame({"a": [1, 2, 3, 6, 7], "right_val": [1, 2, 3, 6, 7]})
expected = pd.DataFrame(
{"a": [1, 5, 10], "left_val": ["a", "b", "c"], "right_val": [1, 6, 7]}
)
result = pd.merge_asof(left, right, on="a", direction="nearest")
assert_frame_equal(result, expected)
def test_basic(self):
expected = self.asof
trades = self.trades
quotes = self.quotes
result = merge_asof(trades, quotes, on="time", by="ticker")
assert_frame_equal(result, expected)
def test_basic_categorical(self):
expected = self.asof
trades = self.trades.copy()
trades.ticker = trades.ticker.astype("category")
quotes = self.quotes.copy()
quotes.ticker = quotes.ticker.astype("category")
expected.ticker = expected.ticker.astype("category")
result = merge_asof(trades, quotes, on="time", by="ticker")
assert_frame_equal(result, expected)
def test_basic_left_index(self):
# GH14253
expected = self.asof
trades = self.trades.set_index("time")
quotes = self.quotes
result = merge_asof(
trades, quotes, left_index=True, right_on="time", by="ticker"
)
# left-only index uses right"s index, oddly
expected.index = result.index
# time column appears after left"s columns
expected = expected[result.columns]
assert_frame_equal(result, expected)
def test_basic_right_index(self):
expected = self.asof
trades = self.trades
quotes = self.quotes.set_index("time")
result = merge_asof(
trades, quotes, left_on="time", right_index=True, by="ticker"
)
assert_frame_equal(result, expected)
def test_basic_left_index_right_index(self):
expected = self.asof.set_index("time")
trades = self.trades.set_index("time")
quotes = self.quotes.set_index("time")
result = merge_asof(
trades, quotes, left_index=True, right_index=True, by="ticker"
)
assert_frame_equal(result, expected)
def test_multi_index(self):
# MultiIndex is prohibited
trades = self.trades.set_index(["time", "price"])
quotes = self.quotes.set_index("time")
with pytest.raises(MergeError):
merge_asof(trades, quotes, left_index=True, right_index=True)
trades = self.trades.set_index("time")
quotes = self.quotes.set_index(["time", "bid"])
with pytest.raises(MergeError):
merge_asof(trades, quotes, left_index=True, right_index=True)
def test_on_and_index(self):
# "on" parameter and index together is prohibited
trades = self.trades.set_index("time")
quotes = self.quotes.set_index("time")
with pytest.raises(MergeError):
merge_asof(
trades, quotes, left_on="price", left_index=True, right_index=True
)
trades = self.trades.set_index("time")
quotes = self.quotes.set_index("time")
with pytest.raises(MergeError):
merge_asof(
trades, quotes, right_on="bid", left_index=True, right_index=True
)
def test_basic_left_by_right_by(self):
# GH14253
expected = self.asof
trades = self.trades
quotes = self.quotes
result = merge_asof(
trades, quotes, on="time", left_by="ticker", right_by="ticker"
)
assert_frame_equal(result, expected)
def test_missing_right_by(self):
expected = self.asof
trades = self.trades
quotes = self.quotes
q = quotes[quotes.ticker != "MSFT"]
result = merge_asof(trades, q, on="time", by="ticker")
expected.loc[expected.ticker == "MSFT", ["bid", "ask"]] = np.nan
assert_frame_equal(result, expected)
def test_multiby(self):
# GH13936
trades = pd.DataFrame(
{
"time": pd.to_datetime(
[
"20160525 13:30:00.023",
"20160525 13:30:00.023",
"20160525 13:30:00.046",
"20160525 13:30:00.048",
"20160525 13:30:00.050",
]
),
"ticker": ["MSFT", "MSFT", "GOOG", "GOOG", "AAPL"],
"exch": ["ARCA", "NSDQ", "NSDQ", "BATS", "NSDQ"],
"price": [51.95, 51.95, 720.77, 720.92, 98.00],
"quantity": [75, 155, 100, 100, 100],
},
columns=["time", "ticker", "exch", "price", "quantity"],
)
quotes = pd.DataFrame(
{
"time": pd.to_datetime(
[
"20160525 13:30:00.023",
"20160525 13:30:00.023",
"20160525 13:30:00.030",
"20160525 13:30:00.041",
"20160525 13:30:00.045",
"20160525 13:30:00.049",
]
),
"ticker": ["GOOG", "MSFT", "MSFT", "MSFT", "GOOG", "AAPL"],
"exch": ["BATS", "NSDQ", "ARCA", "ARCA", "NSDQ", "ARCA"],
"bid": [720.51, 51.95, 51.97, 51.99, 720.50, 97.99],
"ask": [720.92, 51.96, 51.98, 52.00, 720.93, 98.01],
},
columns=["time", "ticker", "exch", "bid", "ask"],
)
expected = pd.DataFrame(
{
"time": pd.to_datetime(
[
"20160525 13:30:00.023",
"20160525 13:30:00.023",
"20160525 13:30:00.046",
"20160525 13:30:00.048",
"20160525 13:30:00.050",
]
),
"ticker": ["MSFT", "MSFT", "GOOG", "GOOG", "AAPL"],
"exch": ["ARCA", "NSDQ", "NSDQ", "BATS", "NSDQ"],
"price": [51.95, 51.95, 720.77, 720.92, 98.00],
"quantity": [75, 155, 100, 100, 100],
"bid": [np.nan, 51.95, 720.50, 720.51, np.nan],
"ask": [np.nan, 51.96, 720.93, 720.92, np.nan],
},
columns=["time", "ticker", "exch", "price", "quantity", "bid", "ask"],
)
result = pd.merge_asof(trades, quotes, on="time", by=["ticker", "exch"])
assert_frame_equal(result, expected)
def test_multiby_heterogeneous_types(self):
# GH13936
trades = pd.DataFrame(
{
"time": pd.to_datetime(
[
"20160525 13:30:00.023",
"20160525 13:30:00.023",
"20160525 13:30:00.046",
"20160525 13:30:00.048",
"20160525 13:30:00.050",
]
),
"ticker": [0, 0, 1, 1, 2],
"exch": ["ARCA", "NSDQ", "NSDQ", "BATS", "NSDQ"],
"price": [51.95, 51.95, 720.77, 720.92, 98.00],
"quantity": [75, 155, 100, 100, 100],
},
columns=["time", "ticker", "exch", "price", "quantity"],
)
quotes = pd.DataFrame(
{
"time": pd.to_datetime(
[
"20160525 13:30:00.023",
"20160525 13:30:00.023",
"20160525 13:30:00.030",
"20160525 13:30:00.041",
"20160525 13:30:00.045",
"20160525 13:30:00.049",
]
),
"ticker": [1, 0, 0, 0, 1, 2],
"exch": ["BATS", "NSDQ", "ARCA", "ARCA", "NSDQ", "ARCA"],
"bid": [720.51, 51.95, 51.97, 51.99, 720.50, 97.99],
"ask": [720.92, 51.96, 51.98, 52.00, 720.93, 98.01],
},
columns=["time", "ticker", "exch", "bid", "ask"],
)
expected = pd.DataFrame(
{
"time": pd.to_datetime(
[
"20160525 13:30:00.023",
"20160525 13:30:00.023",
"20160525 13:30:00.046",
"20160525 13:30:00.048",
"20160525 13:30:00.050",
]
),
"ticker": [0, 0, 1, 1, 2],
"exch": ["ARCA", "NSDQ", "NSDQ", "BATS", "NSDQ"],
"price": [51.95, 51.95, 720.77, 720.92, 98.00],
"quantity": [75, 155, 100, 100, 100],
"bid": [np.nan, 51.95, 720.50, 720.51, np.nan],
"ask": [np.nan, 51.96, 720.93, 720.92, np.nan],
},
columns=["time", "ticker", "exch", "price", "quantity", "bid", "ask"],
)
result = pd.merge_asof(trades, quotes, on="time", by=["ticker", "exch"])
assert_frame_equal(result, expected)
def test_multiby_indexed(self):
# GH15676
left = pd.DataFrame(
[
[pd.to_datetime("20160602"), 1, "a"],
[pd.to_datetime("20160602"), 2, "a"],
[pd.to_datetime("20160603"), 1, "b"],
[pd.to_datetime("20160603"), 2, "b"],
],
columns=["time", "k1", "k2"],
).set_index("time")
right = pd.DataFrame(
[
[pd.to_datetime("20160502"), 1, "a", 1.0],
[pd.to_datetime("20160502"), 2, "a", 2.0],
[pd.to_datetime("20160503"), 1, "b", 3.0],
[pd.to_datetime("20160503"), 2, "b", 4.0],
],
columns=["time", "k1", "k2", "value"],
).set_index("time")
expected = pd.DataFrame(
[
[pd.to_datetime("20160602"), 1, "a", 1.0],
[pd.to_datetime("20160602"), 2, "a", 2.0],
[pd.to_datetime("20160603"), 1, "b", 3.0],
[pd.to_datetime("20160603"), 2, "b", 4.0],
],
columns=["time", "k1", "k2", "value"],
).set_index("time")
result = pd.merge_asof(
left, right, left_index=True, right_index=True, by=["k1", "k2"]
)
assert_frame_equal(expected, result)
with pytest.raises(MergeError):
pd.merge_asof(
left,
right,
left_index=True,
right_index=True,
left_by=["k1", "k2"],
right_by=["k1"],
)
def test_basic2(self, datapath):
expected = self.read_data(datapath, "asof2.csv")
trades = self.read_data(datapath, "trades2.csv")
quotes = self.read_data(datapath, "quotes2.csv", dedupe=True)
result = merge_asof(trades, quotes, on="time", by="ticker")
assert_frame_equal(result, expected)
def test_basic_no_by(self):
f = (
lambda x: x[x.ticker == "MSFT"]
.drop("ticker", axis=1)
.reset_index(drop=True)
)
# just use a single ticker
expected = f(self.asof)
trades = f(self.trades)
quotes = f(self.quotes)
result = merge_asof(trades, quotes, on="time")
assert_frame_equal(result, expected)
def test_valid_join_keys(self):
trades = self.trades
quotes = self.quotes
with pytest.raises(MergeError):
merge_asof(trades, quotes, left_on="time", right_on="bid", by="ticker")
with pytest.raises(MergeError):
merge_asof(trades, quotes, on=["time", "ticker"], by="ticker")
with pytest.raises(MergeError):
merge_asof(trades, quotes, by="ticker")
def test_with_duplicates(self, datapath):
q = (
pd.concat([self.quotes, self.quotes])
.sort_values(["time", "ticker"])
.reset_index(drop=True)
)
result = merge_asof(self.trades, q, on="time", by="ticker")
expected = self.read_data(datapath, "asof.csv")
assert_frame_equal(result, expected)
def test_with_duplicates_no_on(self):
df1 = pd.DataFrame({"key": [1, 1, 3], "left_val": [1, 2, 3]})
df2 = pd.DataFrame({"key": [1, 2, 2], "right_val": [1, 2, 3]})
result = merge_asof(df1, df2, on="key")
expected = pd.DataFrame(
{"key": [1, 1, 3], "left_val": [1, 2, 3], "right_val": [1, 1, 3]}
)
assert_frame_equal(result, expected)
def test_valid_allow_exact_matches(self):
trades = self.trades
quotes = self.quotes
with pytest.raises(MergeError):
merge_asof(
trades, quotes, on="time", by="ticker", allow_exact_matches="foo"
)
def test_valid_tolerance(self):
trades = self.trades
quotes = self.quotes
# dti
merge_asof(trades, quotes, on="time", by="ticker", tolerance=Timedelta("1s"))
# integer
merge_asof(
trades.reset_index(),
quotes.reset_index(),
on="index",
by="ticker",
tolerance=1,
)
# incompat
with pytest.raises(MergeError):
merge_asof(trades, quotes, on="time", by="ticker", tolerance=1)
# invalid
with pytest.raises(MergeError):
merge_asof(
trades.reset_index(),
quotes.reset_index(),
on="index",
by="ticker",
tolerance=1.0,
)
# invalid negative
with pytest.raises(MergeError):
merge_asof(
trades, quotes, on="time", by="ticker", tolerance=-Timedelta("1s")
)
with pytest.raises(MergeError):
merge_asof(
trades.reset_index(),
quotes.reset_index(),
on="index",
by="ticker",
tolerance=-1,
)
def test_non_sorted(self):
trades = self.trades.sort_values("time", ascending=False)
quotes = self.quotes.sort_values("time", ascending=False)
# we require that we are already sorted on time & quotes
assert not trades.time.is_monotonic
assert not quotes.time.is_monotonic
with pytest.raises(ValueError):
merge_asof(trades, quotes, on="time", by="ticker")
trades = self.trades.sort_values("time")
assert trades.time.is_monotonic
assert not quotes.time.is_monotonic
with pytest.raises(ValueError):
merge_asof(trades, quotes, on="time", by="ticker")
quotes = self.quotes.sort_values("time")
assert trades.time.is_monotonic
assert quotes.time.is_monotonic
# ok, though has dupes
merge_asof(trades, self.quotes, on="time", by="ticker")
@pytest.mark.parametrize(
"tolerance",
[
Timedelta("1day"),
pytest.param(
datetime.timedelta(days=1),
marks=pytest.mark.xfail(reason="not implemented", strict=True),
),
],
ids=["pd.Timedelta", "datetime.timedelta"],
)
def test_tolerance(self, tolerance):
trades = self.trades
quotes = self.quotes
result = merge_asof(trades, quotes, on="time", by="ticker", tolerance=tolerance)
expected = self.tolerance
assert_frame_equal(result, expected)
def test_tolerance_forward(self):
# GH14887
left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]})
right = pd.DataFrame({"a": [1, 2, 3, 7, 11], "right_val": [1, 2, 3, 7, 11]})
expected = pd.DataFrame(
{"a": [1, 5, 10], "left_val": ["a", "b", "c"], "right_val": [1, np.nan, 11]}
)
result = pd.merge_asof(left, right, on="a", direction="forward", tolerance=1)
assert_frame_equal(result, expected)
def test_tolerance_nearest(self):
# GH14887
left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]})
right = pd.DataFrame({"a": [1, 2, 3, 7, 11], "right_val": [1, 2, 3, 7, 11]})
expected = pd.DataFrame(
{"a": [1, 5, 10], "left_val": ["a", "b", "c"], "right_val": [1, np.nan, 11]}
)
result = pd.merge_asof(left, right, on="a", direction="nearest", tolerance=1)
assert_frame_equal(result, expected)
def test_tolerance_tz(self):
# GH 14844
left = pd.DataFrame(
{
"date": pd.date_range(
start=pd.to_datetime("2016-01-02"),
freq="D",
periods=5,
tz=pytz.timezone("UTC"),
),
"value1": np.arange(5),
}
)
right = pd.DataFrame(
{
"date": pd.date_range(
start=pd.to_datetime("2016-01-01"),
freq="D",
periods=5,
tz=pytz.timezone("UTC"),
),
"value2": list("ABCDE"),
}
)
result = pd.merge_asof(left, right, on="date", tolerance=pd.Timedelta("1 day"))
expected = pd.DataFrame(
{
"date": pd.date_range(
start=pd.to_datetime("2016-01-02"),
freq="D",
periods=5,
tz=pytz.timezone("UTC"),
),
"value1": np.arange(5),
"value2": list("BCDEE"),
}
)
assert_frame_equal(result, expected)
def test_tolerance_float(self):
# GH22981
left = pd.DataFrame({"a": [1.1, 3.5, 10.9], "left_val": ["a", "b", "c"]})
right = pd.DataFrame(
{"a": [1.0, 2.5, 3.3, 7.5, 11.5], "right_val": [1.0, 2.5, 3.3, 7.5, 11.5]}
)
expected = pd.DataFrame(
{
"a": [1.1, 3.5, 10.9],
"left_val": ["a", "b", "c"],
"right_val": [1, 3.3, np.nan],
}
)
result = pd.merge_asof(left, right, on="a", direction="nearest", tolerance=0.5)
assert_frame_equal(result, expected)
def test_index_tolerance(self):
# GH 15135
expected = self.tolerance.set_index("time")
trades = self.trades.set_index("time")
quotes = self.quotes.set_index("time")
result = pd.merge_asof(
trades,
quotes,
left_index=True,
right_index=True,
by="ticker",
tolerance=pd.Timedelta("1day"),
)
assert_frame_equal(result, expected)
def test_allow_exact_matches(self):
result = merge_asof(
self.trades, self.quotes, on="time", by="ticker", allow_exact_matches=False
)
expected = self.allow_exact_matches
assert_frame_equal(result, expected)
def test_allow_exact_matches_forward(self):
# GH14887
left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]})
right = pd.DataFrame({"a": [1, 2, 3, 7, 11], "right_val": [1, 2, 3, 7, 11]})
expected = pd.DataFrame(
{"a": [1, 5, 10], "left_val": ["a", "b", "c"], "right_val": [2, 7, 11]}
)
result = pd.merge_asof(
left, right, on="a", direction="forward", allow_exact_matches=False
)
assert_frame_equal(result, expected)
def test_allow_exact_matches_nearest(self):
# GH14887
left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]})
right = pd.DataFrame({"a": [1, 2, 3, 7, 11], "right_val": [1, 2, 3, 7, 11]})
expected = pd.DataFrame(
{"a": [1, 5, 10], "left_val": ["a", "b", "c"], "right_val": [2, 3, 11]}
)
result = pd.merge_asof(
left, right, on="a", direction="nearest", allow_exact_matches=False
)
assert_frame_equal(result, expected)
def test_allow_exact_matches_and_tolerance(self):
result = merge_asof(
self.trades,
self.quotes,
on="time",
by="ticker",
tolerance=Timedelta("100ms"),
allow_exact_matches=False,
)
expected = self.allow_exact_matches_and_tolerance
assert_frame_equal(result, expected)
def test_allow_exact_matches_and_tolerance2(self):
# GH 13695
df1 = pd.DataFrame(
{"time": pd.to_datetime(["2016-07-15 13:30:00.030"]), "username": ["bob"]}
)
df2 = pd.DataFrame(
{
"time": pd.to_datetime(
["2016-07-15 13:30:00.000", "2016-07-15 13:30:00.030"]
),
"version": [1, 2],
}
)
result = pd.merge_asof(df1, df2, on="time")
expected = pd.DataFrame(
{
"time": pd.to_datetime(["2016-07-15 13:30:00.030"]),
"username": ["bob"],
"version": [2],
}
)
assert_frame_equal(result, expected)
result = pd.merge_asof(df1, df2, on="time", allow_exact_matches=False)
expected = pd.DataFrame(
{
"time": pd.to_datetime(["2016-07-15 13:30:00.030"]),
"username": ["bob"],
"version": [1],
}
)
assert_frame_equal(result, expected)
result = pd.merge_asof(
df1,
df2,
on="time",
allow_exact_matches=False,
tolerance=pd.Timedelta("10ms"),
)
expected = pd.DataFrame(
{
"time": pd.to_datetime(["2016-07-15 13:30:00.030"]),
"username": ["bob"],
"version": [np.nan],
}
)
assert_frame_equal(result, expected)
def test_allow_exact_matches_and_tolerance3(self):
# GH 13709
df1 = pd.DataFrame(
{
"time": pd.to_datetime(
["2016-07-15 13:30:00.030", "2016-07-15 13:30:00.030"]
),
"username": ["bob", "charlie"],
}
)
df2 = pd.DataFrame(
{
"time": pd.to_datetime(
["2016-07-15 13:30:00.000", "2016-07-15 13:30:00.030"]
),
"version": [1, 2],
}
)
result = pd.merge_asof(
df1,
df2,
on="time",
allow_exact_matches=False,
tolerance=pd.Timedelta("10ms"),
)
expected = pd.DataFrame(
{
"time": pd.to_datetime(
["2016-07-15 13:30:00.030", "2016-07-15 13:30:00.030"]
),
"username": ["bob", "charlie"],
"version": [np.nan, np.nan],
}
)
assert_frame_equal(result, expected)
def test_allow_exact_matches_and_tolerance_forward(self):
# GH14887
left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]})
right = pd.DataFrame({"a": [1, 3, 4, 6, 11], "right_val": [1, 3, 4, 6, 11]})
expected = pd.DataFrame(
{"a": [1, 5, 10], "left_val": ["a", "b", "c"], "right_val": [np.nan, 6, 11]}
)
result = pd.merge_asof(
left,
right,
on="a",
direction="forward",
allow_exact_matches=False,
tolerance=1,
)
assert_frame_equal(result, expected)
def test_allow_exact_matches_and_tolerance_nearest(self):
# GH14887
left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]})
right = pd.DataFrame({"a": [1, 3, 4, 6, 11], "right_val": [1, 3, 4, 7, 11]})
expected = pd.DataFrame(
{"a": [1, 5, 10], "left_val": ["a", "b", "c"], "right_val": [np.nan, 4, 11]}
)
result = pd.merge_asof(
left,
right,
on="a",
direction="nearest",
allow_exact_matches=False,
tolerance=1,
)
assert_frame_equal(result, expected)
def test_forward_by(self):
# GH14887
left = pd.DataFrame(
{
"a": [1, 5, 10, 12, 15],
"b": ["X", "X", "Y", "Z", "Y"],
"left_val": ["a", "b", "c", "d", "e"],
}
)
right = pd.DataFrame(
{
"a": [1, 6, 11, 15, 16],
"b": ["X", "Z", "Y", "Z", "Y"],
"right_val": [1, 6, 11, 15, 16],
}
)
expected = pd.DataFrame(
{
"a": [1, 5, 10, 12, 15],
"b": ["X", "X", "Y", "Z", "Y"],
"left_val": ["a", "b", "c", "d", "e"],
"right_val": [1, np.nan, 11, 15, 16],
}
)
result = pd.merge_asof(left, right, on="a", by="b", direction="forward")
assert_frame_equal(result, expected)
def test_nearest_by(self):
# GH14887
left = pd.DataFrame(
{
"a": [1, 5, 10, 12, 15],
"b": ["X", "X", "Z", "Z", "Y"],
"left_val": ["a", "b", "c", "d", "e"],
}
)
right = pd.DataFrame(
{
"a": [1, 6, 11, 15, 16],
"b": ["X", "Z", "Z", "Z", "Y"],
"right_val": [1, 6, 11, 15, 16],
}
)
expected = pd.DataFrame(
{
"a": [1, 5, 10, 12, 15],
"b": ["X", "X", "Z", "Z", "Y"],
"left_val": ["a", "b", "c", "d", "e"],
"right_val": [1, 1, 11, 11, 16],
}
)
result = pd.merge_asof(left, right, on="a", by="b", direction="nearest")
assert_frame_equal(result, expected)
def test_by_int(self):
# we specialize by type, so test that this is correct
df1 = pd.DataFrame(
{
"time": pd.to_datetime(
[
"20160525 13:30:00.020",
"20160525 13:30:00.030",
"20160525 13:30:00.040",
"20160525 13:30:00.050",
"20160525 13:30:00.060",
]
),
"key": [1, 2, 1, 3, 2],
"value1": [1.1, 1.2, 1.3, 1.4, 1.5],
},
columns=["time", "key", "value1"],
)
df2 = pd.DataFrame(
{
"time": pd.to_datetime(
[
"20160525 13:30:00.015",
"20160525 13:30:00.020",
"20160525 13:30:00.025",
"20160525 13:30:00.035",
"20160525 13:30:00.040",
"20160525 13:30:00.055",
"20160525 13:30:00.060",
"20160525 13:30:00.065",
]
),
"key": [2, 1, 1, 3, 2, 1, 2, 3],
"value2": [2.1, 2.2, 2.3, 2.4, 2.5, 2.6, 2.7, 2.8],
},
columns=["time", "key", "value2"],
)
result = pd.merge_asof(df1, df2, on="time", by="key")
expected = pd.DataFrame(
{
"time": pd.to_datetime(
[
"20160525 13:30:00.020",
"20160525 13:30:00.030",
"20160525 13:30:00.040",
"20160525 13:30:00.050",
"20160525 13:30:00.060",
]
),
"key": [1, 2, 1, 3, 2],
"value1": [1.1, 1.2, 1.3, 1.4, 1.5],
"value2": [2.2, 2.1, 2.3, 2.4, 2.7],
},
columns=["time", "key", "value1", "value2"],
)
assert_frame_equal(result, expected)
def test_on_float(self):
# mimics how to determine the minimum-price variation
df1 = pd.DataFrame(
{
"price": [5.01, 0.0023, 25.13, 340.05, 30.78, 1040.90, 0.0078],
"symbol": list("ABCDEFG"),
},
columns=["symbol", "price"],
)
df2 = pd.DataFrame(
{"price": [0.0, 1.0, 100.0], "mpv": [0.0001, 0.01, 0.05]},
columns=["price", "mpv"],
)
df1 = df1.sort_values("price").reset_index(drop=True)
result = pd.merge_asof(df1, df2, on="price")
expected = pd.DataFrame(
{
"symbol": list("BGACEDF"),
"price": [0.0023, 0.0078, 5.01, 25.13, 30.78, 340.05, 1040.90],
"mpv": [0.0001, 0.0001, 0.01, 0.01, 0.01, 0.05, 0.05],
},
columns=["symbol", "price", "mpv"],
)
assert_frame_equal(result, expected)
def test_on_specialized_type(self, any_real_dtype):
# see gh-13936
dtype = np.dtype(any_real_dtype).type
df1 = pd.DataFrame(
{"value": [5, 2, 25, 100, 78, 120, 79], "symbol": list("ABCDEFG")},
columns=["symbol", "value"],
)
df1.value = dtype(df1.value)
df2 = pd.DataFrame(
{"value": [0, 80, 120, 125], "result": list("xyzw")},
columns=["value", "result"],
)
df2.value = dtype(df2.value)
df1 = df1.sort_values("value").reset_index(drop=True)
result = pd.merge_asof(df1, df2, on="value")
expected = pd.DataFrame(
{
"symbol": list("BACEGDF"),
"value": [2, 5, 25, 78, 79, 100, 120],
"result": list("xxxxxyz"),
},
columns=["symbol", "value", "result"],
)
expected.value = dtype(expected.value)
assert_frame_equal(result, expected)
def test_on_specialized_type_by_int(self, any_real_dtype):
# see gh-13936
dtype = np.dtype(any_real_dtype).type
df1 = pd.DataFrame(
{
"value": [5, 2, 25, 100, 78, 120, 79],
"key": [1, 2, 3, 2, 3, 1, 2],
"symbol": list("ABCDEFG"),
},
columns=["symbol", "key", "value"],
)
df1.value = dtype(df1.value)
df2 = pd.DataFrame(
{"value": [0, 80, 120, 125], "key": [1, 2, 2, 3], "result": list("xyzw")},
columns=["value", "key", "result"],
)
df2.value = dtype(df2.value)
df1 = df1.sort_values("value").reset_index(drop=True)
result = pd.merge_asof(df1, df2, on="value", by="key")
expected = pd.DataFrame(
{
"symbol": list("BACEGDF"),
"key": [2, 1, 3, 3, 2, 2, 1],
"value": [2, 5, 25, 78, 79, 100, 120],
"result": [np.nan, "x", np.nan, np.nan, np.nan, "y", "x"],
},
columns=["symbol", "key", "value", "result"],
)
expected.value = dtype(expected.value)
assert_frame_equal(result, expected)
def test_on_float_by_int(self):
# type specialize both "by" and "on" parameters
df1 = pd.DataFrame(
{
"symbol": list("AAABBBCCC"),
"exch": [1, 2, 3, 1, 2, 3, 1, 2, 3],
"price": [
3.26,
3.2599,
3.2598,
12.58,
12.59,
12.5,
378.15,
378.2,
378.25,
],
},
columns=["symbol", "exch", "price"],
)
df2 = pd.DataFrame(
{
"exch": [1, 1, 1, 2, 2, 2, 3, 3, 3],
"price": [0.0, 1.0, 100.0, 0.0, 5.0, 100.0, 0.0, 5.0, 1000.0],
"mpv": [0.0001, 0.01, 0.05, 0.0001, 0.01, 0.1, 0.0001, 0.25, 1.0],
},
columns=["exch", "price", "mpv"],
)
df1 = df1.sort_values("price").reset_index(drop=True)
df2 = df2.sort_values("price").reset_index(drop=True)
result = pd.merge_asof(df1, df2, on="price", by="exch")
expected = pd.DataFrame(
{
"symbol": list("AAABBBCCC"),
"exch": [3, 2, 1, 3, 1, 2, 1, 2, 3],
"price": [
3.2598,
3.2599,
3.26,
12.5,
12.58,
12.59,
378.15,
378.2,
378.25,
],
"mpv": [0.0001, 0.0001, 0.01, 0.25, 0.01, 0.01, 0.05, 0.1, 0.25],
},
columns=["symbol", "exch", "price", "mpv"],
)
assert_frame_equal(result, expected)
def test_merge_datatype_error_raises(self):
msg = r"incompatible merge keys \[0\] .*, must be the same type"
left = pd.DataFrame({"left_val": [1, 5, 10], "a": ["a", "b", "c"]})
right = pd.DataFrame({"right_val": [1, 2, 3, 6, 7], "a": [1, 2, 3, 6, 7]})
with pytest.raises(MergeError, match=msg):
merge_asof(left, right, on="a")
def test_merge_datatype_categorical_error_raises(self):
msg = (
r"incompatible merge keys \[0\] .* both sides category, "
"but not equal ones"
)
left = pd.DataFrame(
{"left_val": [1, 5, 10], "a": pd.Categorical(["a", "b", "c"])}
)
right = pd.DataFrame(
{
"right_val": [1, 2, 3, 6, 7],
"a": pd.Categorical(["a", "X", "c", "X", "b"]),
}
)
with pytest.raises(MergeError, match=msg):
merge_asof(left, right, on="a")
@pytest.mark.parametrize(
"func", [lambda x: x, lambda x: to_datetime(x)], ids=["numeric", "datetime"]
)
@pytest.mark.parametrize("side", ["left", "right"])
def test_merge_on_nans(self, func, side):
# GH 23189
msg = "Merge keys contain null values on {} side".format(side)
nulls = func([1.0, 5.0, np.nan])
non_nulls = func([1.0, 5.0, 10.0])
df_null = pd.DataFrame({"a": nulls, "left_val": ["a", "b", "c"]})
df = pd.DataFrame({"a": non_nulls, "right_val": [1, 6, 11]})
with pytest.raises(ValueError, match=msg):
if side == "left":
merge_asof(df_null, df, on="a")
else:
merge_asof(df, df_null, on="a")
def test_merge_by_col_tz_aware(self):
# GH 21184
left = pd.DataFrame(
{
"by_col": pd.DatetimeIndex(["2018-01-01"]).tz_localize("UTC"),
"on_col": [2],
"values": ["a"],
}
)
right = pd.DataFrame(
{
"by_col": pd.DatetimeIndex(["2018-01-01"]).tz_localize("UTC"),
"on_col": [1],
"values": ["b"],
}
)
result = pd.merge_asof(left, right, by="by_col", on="on_col")
expected = pd.DataFrame(
[[pd.Timestamp("2018-01-01", tz="UTC"), 2, "a", "b"]],
columns=["by_col", "on_col", "values_x", "values_y"],
)
assert_frame_equal(result, expected)
def test_by_mixed_tz_aware(self):
# GH 26649
left = pd.DataFrame(
{
"by_col1": pd.DatetimeIndex(["2018-01-01"]).tz_localize("UTC"),
"by_col2": ["HELLO"],
"on_col": [2],
"value": ["a"],
}
)
right = pd.DataFrame(
{
"by_col1": pd.DatetimeIndex(["2018-01-01"]).tz_localize("UTC"),
"by_col2": ["WORLD"],
"on_col": [1],
"value": ["b"],
}
)
result = pd.merge_asof(left, right, by=["by_col1", "by_col2"], on="on_col")
expected = pd.DataFrame(
[[pd.Timestamp("2018-01-01", tz="UTC"), "HELLO", 2, "a"]],
columns=["by_col1", "by_col2", "on_col", "value_x"],
)
expected["value_y"] = np.array([np.nan], dtype=object)
assert_frame_equal(result, expected)
def test_timedelta_tolerance_nearest(self):
# GH 27642
left = pd.DataFrame(
list(zip([0, 5, 10, 15, 20, 25], [0, 1, 2, 3, 4, 5])),
columns=["time", "left"],
)
left["time"] = pd.to_timedelta(left["time"], "ms")
right = pd.DataFrame(
list(zip([0, 3, 9, 12, 15, 18], [0, 1, 2, 3, 4, 5])),
columns=["time", "right"],
)
right["time"] = pd.to_timedelta(right["time"], "ms")
expected = pd.DataFrame(
list(
zip(
[0, 5, 10, 15, 20, 25],
[0, 1, 2, 3, 4, 5],
[0, np.nan, 2, 4, np.nan, np.nan],
)
),
columns=["time", "left", "right"],
)
expected["time"] = pd.to_timedelta(expected["time"], "ms")
result = pd.merge_asof(
left, right, on="time", tolerance=Timedelta("1ms"), direction="nearest"
)
assert_frame_equal(result, expected)
|
|
# Copyright 2015 Planet Labs, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
from memoized_property import memoized_property
from datalake.common import DatalakeRecord
import base64
import simplejson as json
import time
'''the maximum number of results to return to the user
dynamodb will return a max of 1MB to us. And our documents could be
~2kB. Keeping MAX_RESULTS at 100 keeps us from hitting this limit.
'''
MAX_RESULTS = 100
'''the default number of days to lookback for latest files
We do not index the latest files as such. Instead, we naively scan backwards
through each time bucket looking for the expected file. We will not look
arbitrarily far back, though, because this makes the failing case terribly
slow and expensive.
'''
DEFAULT_LOOKBACK_DAYS = 14
_ONE_DAY_MS = 24 * 60 * 60 * 1000
class InvalidCursor(Exception):
pass
class Cursor(dict):
'''a cursor to retrieve the next page of results in a query
We never return more than MAX_RESULTS to the user. For work_id-based
queries, we achive this by passing Limit=MAX_RESULTS to dynamodb. If we get
back a non-null LastEvaluated, we stash that in the cursor so we can pass
it as ExclusiveStartKey. The LastEvaluated key contains the range key which
contains the last ID that we saw. We use this to prevent sending duplicate
records from page to page. This scheme is not perfect. For example, if
there are many files with the same work-id that span many time buckets we
will fail to deduplicate them. But this is a rare case.
Time-based queries are a bit more complicated because we make one query to
dynamodb for each time bucket. We query each bucket with
Limit=MAX_RESULTS/2 until we have more than MAX_RESULTS/2 total results, or
until we get a non-null LastEvaluated. We encode the current time bucket
and LastEvaluated into the cursor. There's just no good way to guarantee
that we deduplicate across pages. Minimally, we'd have to encode the
last_id for every "where" in each batch into the cursor. This could get
pretty unweildy. We still use the last ID that we saw to de-duplicate the
(common?) case in which only a single "where" is in play.
'''
def __init__(self, **kwargs):
'''create a new cursor
Args:
last_evaluated: The LastEvaluated value from a query with partial
results.
current_time_bucket: The time bucket being queried when the result
limit was hit (not expected for work_id-based queries).
last_id: The id of the last returned record. This is used to prevent
duplication when the first record in the next page is the same as the
last record in the previous page.
'''
super(Cursor, self).__init__(**kwargs)
self._validate()
def _validate(self):
if 'last_evaluated' not in self and 'current_time_bucket' not in self:
raise InvalidCursor('cursor missing required fields')
@classmethod
def from_serialized(cls, serialized):
try:
b64 = cls._apply_padding(serialized)
j = base64.b64decode(b64)
d = json.loads(j)
return cls(**d)
except json.JSONDecodeError:
raise InvalidCursor('Failed to decode cursor ' + serialized)
@staticmethod
def _apply_padding(b64):
padding_length = len(b64) % 4
return b64 + '=' * padding_length
@memoized_property
def serialized(self):
# the serialized representation of the cursor is a base64-encoded json
# with the padding '=' stripped off the end. This makes it cleaner for
# urls.
b64 = base64.b64encode(self._json)
return b64.rstrip('=')
@memoized_property
def _json(self):
return json.dumps(self)
@property
def last_id(self):
if 'last_id' in self:
return self['last_id']
elif self.last_evaluated:
return self.last_evaluated['range_key'].split(':')[1]
else:
return None
@property
def last_evaluated(self):
return self.get('last_evaluated')
@property
def current_time_bucket(self):
return self.get('current_time_bucket')
class QueryResults(list):
def __init__(self, results, cursor=None):
results = self._deduplicate_and_unpack(results)
super(QueryResults, self).__init__(results)
self.cursor = cursor
def _deduplicate_and_unpack(self, records):
# http://stackoverflow.com/questions/480214/how-do-you-remove-duplicates-from-a-list-in-python-whilst-preserving-order
seen = set()
seen_add = seen.add
unpack = self._unpack
def _already_seen(r):
id = r['metadata']['id']
return id in seen or seen_add(id)
return [unpack(r) for r in records if not _already_seen(r)]
def _unpack(self, result):
r = dict(url=result['url'],
metadata=result['metadata'])
# some fields were added later. Tolerate their absence to buy migration
# time.
for extra in ['create_time', 'size']:
if extra in result:
r[extra] = result[extra]
# make sure metadata has an 'end' key
r['metadata'].setdefault('end', None)
return r
class ArchiveQuerier(object):
def __init__(self, table_name, dynamodb=None):
self.table_name = table_name
self.dynamodb = dynamodb
def query_by_work_id(self, work_id, what, where=None, cursor=None):
kwargs = self._prepare_work_id_kwargs(work_id, what)
if where is not None:
self._add_range_key_condition(kwargs, where)
if cursor is not None:
self._add_cursor_conditions(kwargs, cursor)
response = self._table.query(**kwargs)
cursor = self._cursor_for_work_id_query(response)
return QueryResults(response['Items'], cursor)
def _prepare_work_id_kwargs(self, work_id, what):
i = work_id + ':' + what
return {
'IndexName': 'work-id-index',
'ExpressionAttributeNames': {
'#n0': 'work_id_index_key',
},
'ExpressionAttributeValues': {
':v0': i
},
'KeyConditionExpression': '#n0 = :v0',
'Limit': MAX_RESULTS,
}
def _add_range_key_condition(self, kwargs, where):
kwargs['KeyConditionExpression'] = \
'(#n0 = :v0 AND begins_with(#n1, :v1))'
kwargs['ExpressionAttributeNames']['#n1'] = 'range_key'
kwargs['ExpressionAttributeValues'][':v1'] = where + ':'
def _cursor_for_work_id_query(self, response):
last_evaluated = response.get('LastEvaluatedKey')
if last_evaluated is None:
return None
return Cursor(last_evaluated=last_evaluated)
def _add_cursor_conditions(self, kwargs, cursor):
last_evaluated = cursor.get('last_evaluated')
if last_evaluated is not None:
kwargs['ExclusiveStartKey'] = last_evaluated
if cursor.last_id is not None:
# here we filter the known probable duplicate
kwargs['FilterExpression'] = "(NOT #n2.#n3 = :v2)"
kwargs["ExpressionAttributeNames"]["#n2"] = "metadata"
kwargs["ExpressionAttributeNames"]["#n3"] = "id"
kwargs["ExpressionAttributeValues"][":v2"] = cursor.last_id
def query_by_time(self, start, end, what, where=None, cursor=None):
results = []
buckets = DatalakeRecord.get_time_buckets(start, end)
if cursor:
current_bucket = cursor['current_time_bucket']
i = buckets.index(current_bucket)
buckets = buckets[i:]
for b in buckets:
cursor = self._query_time_bucket(b, results, start, end, what,
where, cursor)
if cursor and \
cursor.current_time_bucket and \
cursor.current_time_bucket > buckets[-1]:
# this is a corner case. It means that the next query would take us
# into the next bucket, but the next bucket is beyond the time of
# interest. Just clear the cursor in this case.
cursor = None
return QueryResults(results, cursor)
def _query_time_bucket(self, bucket, results, start, end, what,
where=None, cursor=None):
headroom = MAX_RESULTS - len(results)
new_results = []
while headroom > 0:
kwargs = self._prepare_time_bucket_kwargs(bucket, what,
limit=headroom)
if where is not None:
self._add_range_key_condition(kwargs, where)
if cursor is not None:
self._add_cursor_conditions(kwargs, cursor)
response = self._table.query(**kwargs)
new_results = self._exclude_outside(response['Items'], start, end)
results += new_results
# we _could_ deduplicate the results here to make more headroom
# for another bucket.
cursor = self._cursor_for_time_query(response, results, bucket)
if cursor is None:
# no more results in the bucket
break
headroom = MAX_RESULTS - len(results)
return cursor
def _exclude_outside(self, records, start, end):
return [r for r in records if self._intersects_time(r, start, end)]
def _intersects_time(self, record, start, end):
'''return true if a record intersects the specified time interval
Note: the record may not have an 'end', or the 'end' may be None. In
these cases, we only need to consider the 'start'.
'''
m = record['metadata']
if 'end' not in m or m['end'] is None:
if m['start'] < start or m['start'] > end:
return False
else:
return True
if m['end'] < start or m['start'] > end:
return False
return True
def _prepare_time_bucket_kwargs(self, bucket, what, limit=None):
i = str(bucket) + ':' + what
kwargs = {
'ExpressionAttributeNames': {
'#n0': 'time_index_key',
},
'ExpressionAttributeValues': {
':v0': i
},
'KeyConditionExpression': '#n0 = :v0',
}
if limit is not None:
kwargs.update(Limit=limit)
return kwargs
def _cursor_for_time_query(self, response, results, current_bucket):
last_evaluated = response.get('LastEvaluatedKey')
if last_evaluated is None:
if len(results) < MAX_RESULTS:
# There are no more results in this bucket, but there's enough
# headroom for records from another bucket.
return None
else:
# there are no more results in this bucket. So the next cursor
# will start at the next bucket. It is possible that the next
# bucket is not relevant to this query. We leave this up to a
# higher level to figure out.
last_id = results[-1]['metadata']['id']
return Cursor(current_time_bucket=current_bucket + 1,
last_id=last_id)
else:
# Results from this time bucket did not fit in the page. Prepare
# the cursor
return Cursor(last_evaluated=last_evaluated,
current_time_bucket=current_bucket)
@memoized_property
def _table(self):
return self.dynamodb.Table(self.table_name)
def query_latest(self, what, where, lookback_days=DEFAULT_LOOKBACK_DAYS):
current = int(time.time() * 1000)
end = current - lookback_days * _ONE_DAY_MS
while current >= end:
bucket = current/DatalakeRecord.TIME_BUCKET_SIZE_IN_MS
r = self._get_latest_record_in_bucket(bucket, what, where)
if r is not None:
return r
current -= _ONE_DAY_MS
return None
def _get_latest_record_in_bucket(self, bucket, what, where):
kwargs = self._prepare_time_bucket_kwargs(bucket, what)
self._add_range_key_condition(kwargs, where)
records = self._get_all_records_in_bucket(bucket, **kwargs)
if not records:
return None
records = sorted(records,
key=lambda r: (r['metadata']['start'],
r['create_time']))
result = records[-1]
return dict(url=result['url'], metadata=result['metadata'])
def _get_all_records_in_bucket(self, bucket, **kwargs):
records = []
while True:
response = self._table.query(**kwargs)
records += response['Items']
if 'LastEvaluatedKey' not in response:
break
kwargs['ExclusiveStartKey'] = response['LastEvaluatedKey']
return records
|
|
# -*- coding: utf-8 -*-
"""
S3 Microsoft Excel codec
@copyright: 2011 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
__all__ = ["S3XLS"]
try:
from cStringIO import StringIO # Faster, where available
except:
from StringIO import StringIO
from gluon import *
from gluon.storage import Storage
from gluon.contenttype import contenttype
try:
from lxml import etree
except ImportError:
import sys
print >> sys.stderr, "ERROR: lxml module needed for XML handling"
raise
from ..s3codec import S3Codec
# =============================================================================
class S3XLS(S3Codec):
"""
Simple Microsoft Excel format codec
"""
# Customizable styles
COL_WIDTH_MULTIPLIER = 360
LARGE_HEADER_COLOUR = 0x2C
HEADER_COLOUR = 0x2C
SUB_HEADER_COLOUR = 0x18
ROW_ALTERNATING_COLOURS = [0x2A, 0x2B]
# -------------------------------------------------------------------------
def __init__(self):
"""
Constructor
"""
# Error codes
T = current.T
self.ERROR = Storage(
XLRD_ERROR = T("ERROR: Running Python needs the xlrd module installed for XLS export"),
XLWT_ERROR = T("ERROR: Running Python needs the xlwt module installed for XLS export")
)
# -------------------------------------------------------------------------
def extractResource(self, resource, list_fields, report_groupby):
"""
Extract the items from the resource
@param resource: the resource
@param list_fields: fields to include in list views
@param report_groupby: a Field object of the field to group the records by
"""
s3 = current.response.s3
# List fields
if not list_fields:
fields = resource.readable_fields()
list_fields = [f.name for f in fields if f != "id"]
indices = self.indices
list_fields = [f for f in list_fields if f not in indices]
# Filter
if s3.filter is not None:
resource.add_filter(s3.filter)
# Retrieve the resource contents
table = resource.table
lfields, joins, left, distinct = resource.resolve_selectors(list_fields)
# Use the title_list CRUD string for the title
name = "title_list"
tablename = resource.tablename
crud_strings = s3.crud_strings.get(tablename, s3.crud_strings)
not_found = s3.crud_strings.get(name, current.request.function)
title = str(crud_strings.get(name, not_found))
# Only include fields that can be read.
# - doesn't work with virtual fields and anyway list_fields should override readable
#headers = [f.label for f in lfields if (f.show and f.field and f.field.readable)]
headers = [f.label for f in lfields if f.show]
# Doesn't work with Virtual Fields
#types = [f.field.type for f in lfields if f.show]
types = []
for f in lfields:
if f.show:
if f.field:
types.append(f.field.type)
else:
# Virtual Field
types.append("string")
orderby = report_groupby
if not orderby:
# @ToDo: Some central function (where does HRM List get it's orderby from?)
if "person_id" in list_fields:
orderby = "pr_person.first_name"
list_fields.append("person_id$first_name")
headers.append("Sort")
types.append("sort")
elif "organisation_id" in list_fields:
orderby = "org_organisation.name"
list_fields.append("organisation_id$name")
headers.append("Sort")
types.append("sort")
items = resource.sqltable(fields=list_fields,
start=None,
limit=None,
orderby=orderby,
no_ids=True,
as_page=True)
if items is None:
items = []
return (title, types, headers, items)
# -------------------------------------------------------------------------
def encode(self, data_source, **attr):
"""
Export data as a Microsoft Excel spreadsheet
@param data_source: the source of the data that is to be encoded
as a spreadsheet. This may be:
resource: the resource
item: a list of pre-fetched values
the headings are in the first row
the data types are in the second row
@param attr: dictionary of parameters:
* title: The main title of the report
* list_fields: Fields to include in list views
* report_groupby: Used to create a grouping of the result:
either a Field object of the resource
or a string which matches a value in the heading
* use_colour: True to add colour to the cells. default False
"""
import datetime
try:
import xlwt
except ImportError:
current.session.error = self.ERROR.XLWT_ERROR
redirect(URL(extension=""))
try:
from xlrd.xldate import xldate_from_date_tuple, \
xldate_from_time_tuple, \
xldate_from_datetime_tuple
except ImportError:
current.session.error = self.ERROR.XLRD_ERROR
redirect(URL(extension=""))
# Environment
request = current.request
# The xlwt library supports a maximum of 182 character in a single cell
max_cell_size = 182
# Get the attributes
title = attr.get("title")
list_fields = attr.get("list_fields")
report_groupby = attr.get("report_groupby")
use_colour = attr.get("use_colour", False)
# Extract the data from the data_source
if isinstance(data_source, (list, tuple)):
headers = data_source[0]
types = data_source[1]
items = data_source[2:]
else:
(title, types, headers, items) = self.extractResource(data_source,
list_fields,
report_groupby)
if len(items) > 0 and len(headers) != len(items[0]):
from ..s3utils import s3_debug
msg = """modules/s3/codecs/xls: There is an error in the list_items, a field doesn't exist"
requesting url %s
Headers = %d, Data Items = %d
Headers %s
List Fields %s""" % (request.url, len(headers), len(items[0]), headers, list_fields)
s3_debug(msg)
if report_groupby != None:
if isinstance(report_groupby, Field):
groupby_label = report_groupby.label
else:
groupby_label = report_groupby
# Date/Time formats from L10N deployment settings
settings = current.deployment_settings
date_format = S3XLS.dt_format_translate(settings.get_L10n_date_format())
time_format = S3XLS.dt_format_translate(settings.get_L10n_time_format())
datetime_format = S3XLS.dt_format_translate(settings.get_L10n_datetime_format())
# Initialize output
output = StringIO()
# Create the workbook and a sheet in it
book = xlwt.Workbook(encoding="utf-8")
# The spreadsheet doesn't like a / in the sheet name, so replace any with a space
sheet1 = book.add_sheet(str(title.replace("/"," ")))
# Styles
styleLargeHeader = xlwt.XFStyle()
styleLargeHeader.font.bold = True
styleLargeHeader.font.height = 400
if use_colour:
styleLargeHeader.alignment.horz = styleLargeHeader.alignment.HORZ_CENTER
styleLargeHeader.pattern.pattern = styleLargeHeader.pattern.SOLID_PATTERN
styleLargeHeader.pattern.pattern_fore_colour = S3XLS.LARGE_HEADER_COLOUR
styleNotes = xlwt.XFStyle()
styleNotes.font.italic = True
styleNotes.font.height = 160 # 160 Twips = 8point
styleNotes.num_format_str = datetime_format
styleHeader = xlwt.XFStyle()
styleHeader.font.bold = True
styleHeader.num_format_str = datetime_format
if use_colour:
styleHeader.pattern.pattern = styleHeader.pattern.SOLID_PATTERN
styleHeader.pattern.pattern_fore_colour = S3XLS.HEADER_COLOUR
styleSubHeader = xlwt.XFStyle()
styleSubHeader.font.bold = True
if use_colour:
styleSubHeader.pattern.pattern = styleHeader.pattern.SOLID_PATTERN
styleSubHeader.pattern.pattern_fore_colour = S3XLS.SUB_HEADER_COLOUR
styleOdd = xlwt.XFStyle()
if use_colour:
styleOdd.pattern.pattern = styleOdd.pattern.SOLID_PATTERN
styleOdd.pattern.pattern_fore_colour = S3XLS.ROW_ALTERNATING_COLOURS[0]
styleEven = xlwt.XFStyle()
if use_colour:
styleEven.pattern.pattern = styleEven.pattern.SOLID_PATTERN
styleEven.pattern.pattern_fore_colour = S3XLS.ROW_ALTERNATING_COLOURS[1]
# Header row
colCnt = -1
headerRow = sheet1.row(2)
fieldWidth=[]
for label in headers:
if label == "Sort":
continue
if report_groupby != None:
if label == groupby_label:
continue
colCnt += 1
headerRow.write(colCnt, str(label), styleHeader)
width = len(label) * S3XLS.COL_WIDTH_MULTIPLIER
fieldWidth.append(width)
sheet1.col(colCnt).width = width
# Title row
currentRow = sheet1.row(0)
if colCnt > 0:
sheet1.write_merge(0, 0, 0, colCnt, str(title),
styleLargeHeader)
currentRow = sheet1.row(1)
currentRow.height = 440
currentRow.write(colCnt, request.now, styleNotes)
# fix the size of the last column to display the date
if 16 * S3XLS.COL_WIDTH_MULTIPLIER > width:
sheet1.col(colCnt).width = 16 * S3XLS.COL_WIDTH_MULTIPLIER
# Initialize counters
totalCols = colCnt
rowCnt = 3
colCnt = 0
subheading = None
for item in items:
# Item details
rowCnt += 1
currentRow = sheet1.row(rowCnt)
colCnt = 0
if rowCnt % 2 == 0:
style = styleEven
else:
style = styleOdd
for represent in item:
coltype=types[colCnt]
if coltype == "sort":
continue
label = headers[colCnt]
if type(represent) is not str:
represent = unicode(represent)
if len(represent) > max_cell_size:
represent = represent[:max_cell_size]
# Strip away markup from representation
try:
markup = etree.XML(str(represent))
text = markup.xpath(".//text()")
if text:
text = " ".join(text)
else:
text = ""
represent = text
except:
pass
if report_groupby != None:
if label == groupby_label:
if subheading != represent:
subheading = represent
sheet1.write_merge(rowCnt, rowCnt, 0, totalCols,
represent, styleSubHeader)
rowCnt += 1
currentRow = sheet1.row(rowCnt)
if rowCnt % 2 == 0:
style = styleEven
else:
style = styleOdd
continue
value = represent
if coltype == "date":
try:
format = str(settings.get_L10n_date_format())
cell_datetime = datetime.datetime.strptime(value,
format)
date_tuple = (cell_datetime.year,
cell_datetime.month,
cell_datetime.day
)
value = xldate_from_date_tuple(date_tuple, 0)
style.num_format_str = date_format
except:
pass
elif coltype == "datetime":
try:
format = str(settings.get_L10n_date_format())
cell_datetime = datetime.datetime.strptime(value,
format)
date_tuple = (cell_datetime.year,
cell_datetime.month,
cell_datetime.day,
cell_datetime.hour,
cell_datetime.minute,
cell_datetime.second,
)
value = xldate_from_datetime_tuple(date_tuple, 0)
style.num_format_str = datetime_format
except:
pass
elif coltype == "time":
try:
format = str(settings.get_L10n_date_format())
cell_datetime = datetime.datetime.strptime(value,
format)
date_tuple = (cell_datetime.hour,
cell_datetime.minute,
cell_datetime.second,
)
value = xldate_from_time_tuple(date_tuple)
style.num_format_str = time_format
except:
pass
elif coltype == "integer":
try:
value = int(value)
style.num_format_str = "0"
except:
pass
elif coltype == "double":
try:
value = float(value)
style.num_format_str = "0.00"
except:
pass
currentRow.write(colCnt, value, style)
width = len(represent) * S3XLS.COL_WIDTH_MULTIPLIER
if width > fieldWidth[colCnt]:
fieldWidth[colCnt] = width
sheet1.col(colCnt).width = width
colCnt += 1
sheet1.panes_frozen = True
sheet1.horz_split_pos = 3
book.save(output)
# Response headers
filename = "%s_%s.xls" % (request.env.server_name, str(title))
disposition = "attachment; filename=\"%s\"" % filename
response = current.response
response.headers["Content-Type"] = contenttype(".xls")
response.headers["Content-disposition"] = disposition
output.seek(0)
return output.read()
# -------------------------------------------------------------------------
@staticmethod
def dt_format_translate(pyfmt):
"""
Translate a Python datetime format string into an
Excel datetime format string
@param pyfmt: the Python format string
"""
translate = {"%a": "ddd",
"%A": "dddd",
"%b": "mmm",
"%B": "mmmm",
"%c": "",
"%d": "dd",
"%f": "",
"%H": "hh",
"%I": "hh",
"%j": "",
"%m": "mm",
"%M": "mm",
"%p": "AM/PM",
"%S": "ss",
"%U": "",
"%w": "",
"%W": "",
"%x": "",
"%X": "",
"%y": "yy",
"%Y": "yyyy",
"%z": "",
"%Z": "",
"%%": "%"}
xlfmt = str(pyfmt)
for item in translate:
if item in xlfmt:
xlfmt = xlfmt.replace(item, translate[item])
return xlfmt
# End =========================================================================
|
|
# Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""JSON Web Tokens
Provides support for creating (encoding) and verifying (decoding) JWTs,
especially JWTs generated and consumed by Google infrastructure.
See `rfc7519`_ for more details on JWTs.
To encode a JWT use :func:`encode`::
from google.auth import crypt
from google.auth import jwt
signer = crypt.Signer(private_key)
payload = {'some': 'payload'}
encoded = jwt.encode(signer, payload)
To decode a JWT and verify claims use :func:`decode`::
claims = jwt.decode(encoded, certs=public_certs)
You can also skip verification::
claims = jwt.decode(encoded, verify=False)
.. _rfc7519: https://tools.ietf.org/html/rfc7519
"""
import collections
import copy
import datetime
import json
import cachetools
import six
from six.moves import urllib
from google.auth import _helpers
from google.auth import _service_account_info
from google.auth import crypt
from google.auth import exceptions
import google.auth.credentials
_DEFAULT_TOKEN_LIFETIME_SECS = 3600 # 1 hour in seconds
_DEFAULT_MAX_CACHE_SIZE = 10
def encode(signer, payload, header=None, key_id=None):
"""Make a signed JWT.
Args:
signer (google.auth.crypt.Signer): The signer used to sign the JWT.
payload (Mapping[str, str]): The JWT payload.
header (Mapping[str, str]): Additional JWT header payload.
key_id (str): The key id to add to the JWT header. If the
signer has a key id it will be used as the default. If this is
specified it will override the signer's key id.
Returns:
bytes: The encoded JWT.
"""
if header is None:
header = {}
if key_id is None:
key_id = signer.key_id
header.update({'typ': 'JWT', 'alg': 'RS256'})
if key_id is not None:
header['kid'] = key_id
segments = [
_helpers.unpadded_urlsafe_b64encode(
json.dumps(header).encode('utf-8')
),
_helpers.unpadded_urlsafe_b64encode(
json.dumps(payload).encode('utf-8')
),
]
signing_input = b'.'.join(segments)
signature = signer.sign(signing_input)
segments.append(
_helpers.unpadded_urlsafe_b64encode(signature)
)
return b'.'.join(segments)
def _decode_jwt_segment(encoded_section):
"""Decodes a single JWT segment."""
section_bytes = _helpers.padded_urlsafe_b64decode(encoded_section)
try:
return json.loads(section_bytes.decode('utf-8'))
except ValueError as caught_exc:
new_exc = ValueError('Can\'t parse segment: {0}'.format(section_bytes))
six.raise_from(new_exc, caught_exc)
def _unverified_decode(token):
"""Decodes a token and does no verification.
Args:
token (Union[str, bytes]): The encoded JWT.
Returns:
Tuple[str, str, str, str]: header, payload, signed_section, and
signature.
Raises:
ValueError: if there are an incorrect amount of segments in the token.
"""
token = _helpers.to_bytes(token)
if token.count(b'.') != 2:
raise ValueError(
'Wrong number of segments in token: {0}'.format(token))
encoded_header, encoded_payload, signature = token.split(b'.')
signed_section = encoded_header + b'.' + encoded_payload
signature = _helpers.padded_urlsafe_b64decode(signature)
# Parse segments
header = _decode_jwt_segment(encoded_header)
payload = _decode_jwt_segment(encoded_payload)
return header, payload, signed_section, signature
def decode_header(token):
"""Return the decoded header of a token.
No verification is done. This is useful to extract the key id from
the header in order to acquire the appropriate certificate to verify
the token.
Args:
token (Union[str, bytes]): the encoded JWT.
Returns:
Mapping: The decoded JWT header.
"""
header, _, _, _ = _unverified_decode(token)
return header
def _verify_iat_and_exp(payload):
"""Verifies the ``iat`` (Issued At) and ``exp`` (Expires) claims in a token
payload.
Args:
payload (Mapping[str, str]): The JWT payload.
Raises:
ValueError: if any checks failed.
"""
now = _helpers.datetime_to_secs(_helpers.utcnow())
# Make sure the iat and exp claims are present.
for key in ('iat', 'exp'):
if key not in payload:
raise ValueError(
'Token does not contain required claim {}'.format(key))
# Make sure the token wasn't issued in the future.
iat = payload['iat']
# Err on the side of accepting a token that is slightly early to account
# for clock skew.
earliest = iat - _helpers.CLOCK_SKEW_SECS
if now < earliest:
raise ValueError('Token used too early, {} < {}'.format(now, iat))
# Make sure the token wasn't issued in the past.
exp = payload['exp']
# Err on the side of accepting a token that is slightly out of date
# to account for clow skew.
latest = exp + _helpers.CLOCK_SKEW_SECS
if latest < now:
raise ValueError('Token expired, {} < {}'.format(latest, now))
def decode(token, certs=None, verify=True, audience=None):
"""Decode and verify a JWT.
Args:
token (str): The encoded JWT.
certs (Union[str, bytes, Mapping[str, Union[str, bytes]]]): The
certificate used to validate the JWT signature. If bytes or string,
it must the the public key certificate in PEM format. If a mapping,
it must be a mapping of key IDs to public key certificates in PEM
format. The mapping must contain the same key ID that's specified
in the token's header.
verify (bool): Whether to perform signature and claim validation.
Verification is done by default.
audience (str): The audience claim, 'aud', that this JWT should
contain. If None then the JWT's 'aud' parameter is not verified.
Returns:
Mapping[str, str]: The deserialized JSON payload in the JWT.
Raises:
ValueError: if any verification checks failed.
"""
header, payload, signed_section, signature = _unverified_decode(token)
if not verify:
return payload
# If certs is specified as a dictionary of key IDs to certificates, then
# use the certificate identified by the key ID in the token header.
if isinstance(certs, collections.Mapping):
key_id = header.get('kid')
if key_id:
if key_id not in certs:
raise ValueError(
'Certificate for key id {} not found.'.format(key_id))
certs_to_check = [certs[key_id]]
# If there's no key id in the header, check against all of the certs.
else:
certs_to_check = certs.values()
else:
certs_to_check = certs
# Verify that the signature matches the message.
if not crypt.verify_signature(signed_section, signature, certs_to_check):
raise ValueError('Could not verify token signature.')
# Verify the issued at and created times in the payload.
_verify_iat_and_exp(payload)
# Check audience.
if audience is not None:
claim_audience = payload.get('aud')
if audience != claim_audience:
raise ValueError(
'Token has wrong audience {}, expected {}'.format(
claim_audience, audience))
return payload
class Credentials(google.auth.credentials.Signing,
google.auth.credentials.Credentials):
"""Credentials that use a JWT as the bearer token.
These credentials require an "audience" claim. This claim identifies the
intended recipient of the bearer token.
The constructor arguments determine the claims for the JWT that is
sent with requests. Usually, you'll construct these credentials with
one of the helper constructors as shown in the next section.
To create JWT credentials using a Google service account private key
JSON file::
audience = 'https://pubsub.googleapis.com/google.pubsub.v1.Publisher'
credentials = jwt.Credentials.from_service_account_file(
'service-account.json',
audience=audience)
If you already have the service account file loaded and parsed::
service_account_info = json.load(open('service_account.json'))
credentials = jwt.Credentials.from_service_account_info(
service_account_info,
audience=audience)
Both helper methods pass on arguments to the constructor, so you can
specify the JWT claims::
credentials = jwt.Credentials.from_service_account_file(
'service-account.json',
audience=audience,
additional_claims={'meta': 'data'})
You can also construct the credentials directly if you have a
:class:`~google.auth.crypt.Signer` instance::
credentials = jwt.Credentials(
signer,
issuer='your-issuer',
subject='your-subject',
audience=audience)
The claims are considered immutable. If you want to modify the claims,
you can easily create another instance using :meth:`with_claims`::
new_audience = (
'https://pubsub.googleapis.com/google.pubsub.v1.Subscriber')
new_credentials = credentials.with_claims(audience=new_audience)
"""
def __init__(self, signer, issuer, subject, audience,
additional_claims=None,
token_lifetime=_DEFAULT_TOKEN_LIFETIME_SECS):
"""
Args:
signer (google.auth.crypt.Signer): The signer used to sign JWTs.
issuer (str): The `iss` claim.
subject (str): The `sub` claim.
audience (str): the `aud` claim. The intended audience for the
credentials.
additional_claims (Mapping[str, str]): Any additional claims for
the JWT payload.
token_lifetime (int): The amount of time in seconds for
which the token is valid. Defaults to 1 hour.
"""
super(Credentials, self).__init__()
self._signer = signer
self._issuer = issuer
self._subject = subject
self._audience = audience
self._token_lifetime = token_lifetime
if additional_claims is None:
additional_claims = {}
self._additional_claims = additional_claims
@classmethod
def _from_signer_and_info(cls, signer, info, **kwargs):
"""Creates a Credentials instance from a signer and service account
info.
Args:
signer (google.auth.crypt.Signer): The signer used to sign JWTs.
info (Mapping[str, str]): The service account info.
kwargs: Additional arguments to pass to the constructor.
Returns:
google.auth.jwt.Credentials: The constructed credentials.
Raises:
ValueError: If the info is not in the expected format.
"""
kwargs.setdefault('subject', info['client_email'])
kwargs.setdefault('issuer', info['client_email'])
return cls(signer, **kwargs)
@classmethod
def from_service_account_info(cls, info, **kwargs):
"""Creates an Credentials instance from a dictionary.
Args:
info (Mapping[str, str]): The service account info in Google
format.
kwargs: Additional arguments to pass to the constructor.
Returns:
google.auth.jwt.Credentials: The constructed credentials.
Raises:
ValueError: If the info is not in the expected format.
"""
signer = _service_account_info.from_dict(
info, require=['client_email'])
return cls._from_signer_and_info(signer, info, **kwargs)
@classmethod
def from_service_account_file(cls, filename, **kwargs):
"""Creates a Credentials instance from a service account .json file
in Google format.
Args:
filename (str): The path to the service account .json file.
kwargs: Additional arguments to pass to the constructor.
Returns:
google.auth.jwt.Credentials: The constructed credentials.
"""
info, signer = _service_account_info.from_filename(
filename, require=['client_email'])
return cls._from_signer_and_info(signer, info, **kwargs)
@classmethod
def from_signing_credentials(cls, credentials, audience, **kwargs):
"""Creates a new :class:`google.auth.jwt.Credentials` instance from an
existing :class:`google.auth.credentials.Signing` instance.
The new instance will use the same signer as the existing instance and
will use the existing instance's signer email as the issuer and
subject by default.
Example::
svc_creds = service_account.Credentials.from_service_account_file(
'service_account.json')
audience = (
'https://pubsub.googleapis.com/google.pubsub.v1.Publisher')
jwt_creds = jwt.Credentials.from_signing_credentials(
svc_creds, audience=audience)
Args:
credentials (google.auth.credentials.Signing): The credentials to
use to construct the new credentials.
audience (str): the `aud` claim. The intended audience for the
credentials.
kwargs: Additional arguments to pass to the constructor.
Returns:
google.auth.jwt.Credentials: A new Credentials instance.
"""
kwargs.setdefault('issuer', credentials.signer_email)
kwargs.setdefault('subject', credentials.signer_email)
return cls(
credentials.signer,
audience=audience,
**kwargs)
def with_claims(self, issuer=None, subject=None, audience=None,
additional_claims=None):
"""Returns a copy of these credentials with modified claims.
Args:
issuer (str): The `iss` claim. If unspecified the current issuer
claim will be used.
subject (str): The `sub` claim. If unspecified the current subject
claim will be used.
audience (str): the `aud` claim. If unspecified the current
audience claim will be used.
additional_claims (Mapping[str, str]): Any additional claims for
the JWT payload. This will be merged with the current
additional claims.
Returns:
google.auth.jwt.Credentials: A new credentials instance.
"""
new_additional_claims = copy.deepcopy(self._additional_claims)
new_additional_claims.update(additional_claims or {})
return self.__class__(
self._signer,
issuer=issuer if issuer is not None else self._issuer,
subject=subject if subject is not None else self._subject,
audience=audience if audience is not None else self._audience,
additional_claims=new_additional_claims)
def _make_jwt(self):
"""Make a signed JWT.
Returns:
Tuple[bytes, datetime]: The encoded JWT and the expiration.
"""
now = _helpers.utcnow()
lifetime = datetime.timedelta(seconds=self._token_lifetime)
expiry = now + lifetime
payload = {
'iss': self._issuer,
'sub': self._subject,
'iat': _helpers.datetime_to_secs(now),
'exp': _helpers.datetime_to_secs(expiry),
'aud': self._audience,
}
payload.update(self._additional_claims)
jwt = encode(self._signer, payload)
return jwt, expiry
def refresh(self, request):
"""Refreshes the access token.
Args:
request (Any): Unused.
"""
# pylint: disable=unused-argument
# (pylint doesn't correctly recognize overridden methods.)
self.token, self.expiry = self._make_jwt()
@_helpers.copy_docstring(google.auth.credentials.Signing)
def sign_bytes(self, message):
return self._signer.sign(message)
@property
@_helpers.copy_docstring(google.auth.credentials.Signing)
def signer_email(self):
return self._issuer
@property
@_helpers.copy_docstring(google.auth.credentials.Signing)
def signer(self):
return self._signer
class OnDemandCredentials(
google.auth.credentials.Signing,
google.auth.credentials.Credentials):
"""On-demand JWT credentials.
Like :class:`Credentials`, this class uses a JWT as the bearer token for
authentication. However, this class does not require the audience at
construction time. Instead, it will generate a new token on-demand for
each request using the request URI as the audience. It caches tokens
so that multiple requests to the same URI do not incur the overhead
of generating a new token every time.
This behavior is especially useful for `gRPC`_ clients. A gRPC service may
have multiple audience and gRPC clients may not know all of the audiences
required for accessing a particular service. With these credentials,
no knowledge of the audiences is required ahead of time.
.. _grpc: http://www.grpc.io/
"""
def __init__(self, signer, issuer, subject,
additional_claims=None,
token_lifetime=_DEFAULT_TOKEN_LIFETIME_SECS,
max_cache_size=_DEFAULT_MAX_CACHE_SIZE):
"""
Args:
signer (google.auth.crypt.Signer): The signer used to sign JWTs.
issuer (str): The `iss` claim.
subject (str): The `sub` claim.
additional_claims (Mapping[str, str]): Any additional claims for
the JWT payload.
token_lifetime (int): The amount of time in seconds for
which the token is valid. Defaults to 1 hour.
max_cache_size (int): The maximum number of JWT tokens to keep in
cache. Tokens are cached using :class:`cachetools.LRUCache`.
"""
super(OnDemandCredentials, self).__init__()
self._signer = signer
self._issuer = issuer
self._subject = subject
self._token_lifetime = token_lifetime
if additional_claims is None:
additional_claims = {}
self._additional_claims = additional_claims
self._cache = cachetools.LRUCache(maxsize=max_cache_size)
@classmethod
def _from_signer_and_info(cls, signer, info, **kwargs):
"""Creates an OnDemandCredentials instance from a signer and service
account info.
Args:
signer (google.auth.crypt.Signer): The signer used to sign JWTs.
info (Mapping[str, str]): The service account info.
kwargs: Additional arguments to pass to the constructor.
Returns:
google.auth.jwt.OnDemandCredentials: The constructed credentials.
Raises:
ValueError: If the info is not in the expected format.
"""
kwargs.setdefault('subject', info['client_email'])
kwargs.setdefault('issuer', info['client_email'])
return cls(signer, **kwargs)
@classmethod
def from_service_account_info(cls, info, **kwargs):
"""Creates an OnDemandCredentials instance from a dictionary.
Args:
info (Mapping[str, str]): The service account info in Google
format.
kwargs: Additional arguments to pass to the constructor.
Returns:
google.auth.jwt.OnDemandCredentials: The constructed credentials.
Raises:
ValueError: If the info is not in the expected format.
"""
signer = _service_account_info.from_dict(
info, require=['client_email'])
return cls._from_signer_and_info(signer, info, **kwargs)
@classmethod
def from_service_account_file(cls, filename, **kwargs):
"""Creates an OnDemandCredentials instance from a service account .json
file in Google format.
Args:
filename (str): The path to the service account .json file.
kwargs: Additional arguments to pass to the constructor.
Returns:
google.auth.jwt.OnDemandCredentials: The constructed credentials.
"""
info, signer = _service_account_info.from_filename(
filename, require=['client_email'])
return cls._from_signer_and_info(signer, info, **kwargs)
@classmethod
def from_signing_credentials(cls, credentials, **kwargs):
"""Creates a new :class:`google.auth.jwt.OnDemandCredentials` instance
from an existing :class:`google.auth.credentials.Signing` instance.
The new instance will use the same signer as the existing instance and
will use the existing instance's signer email as the issuer and
subject by default.
Example::
svc_creds = service_account.Credentials.from_service_account_file(
'service_account.json')
jwt_creds = jwt.OnDemandCredentials.from_signing_credentials(
svc_creds)
Args:
credentials (google.auth.credentials.Signing): The credentials to
use to construct the new credentials.
kwargs: Additional arguments to pass to the constructor.
Returns:
google.auth.jwt.Credentials: A new Credentials instance.
"""
kwargs.setdefault('issuer', credentials.signer_email)
kwargs.setdefault('subject', credentials.signer_email)
return cls(credentials.signer, **kwargs)
def with_claims(self, issuer=None, subject=None, additional_claims=None):
"""Returns a copy of these credentials with modified claims.
Args:
issuer (str): The `iss` claim. If unspecified the current issuer
claim will be used.
subject (str): The `sub` claim. If unspecified the current subject
claim will be used.
additional_claims (Mapping[str, str]): Any additional claims for
the JWT payload. This will be merged with the current
additional claims.
Returns:
google.auth.jwt.OnDemandCredentials: A new credentials instance.
"""
new_additional_claims = copy.deepcopy(self._additional_claims)
new_additional_claims.update(additional_claims or {})
return self.__class__(
self._signer,
issuer=issuer if issuer is not None else self._issuer,
subject=subject if subject is not None else self._subject,
additional_claims=new_additional_claims,
max_cache_size=self._cache.maxsize)
@property
def valid(self):
"""Checks the validity of the credentials.
These credentials are always valid because it generates tokens on
demand.
"""
return True
def _make_jwt_for_audience(self, audience):
"""Make a new JWT for the given audience.
Args:
audience (str): The intended audience.
Returns:
Tuple[bytes, datetime]: The encoded JWT and the expiration.
"""
now = _helpers.utcnow()
lifetime = datetime.timedelta(seconds=self._token_lifetime)
expiry = now + lifetime
payload = {
'iss': self._issuer,
'sub': self._subject,
'iat': _helpers.datetime_to_secs(now),
'exp': _helpers.datetime_to_secs(expiry),
'aud': audience,
}
payload.update(self._additional_claims)
jwt = encode(self._signer, payload)
return jwt, expiry
def _get_jwt_for_audience(self, audience):
"""Get a JWT For a given audience.
If there is already an existing, non-expired token in the cache for
the audience, that token is used. Otherwise, a new token will be
created.
Args:
audience (str): The intended audience.
Returns:
bytes: The encoded JWT.
"""
token, expiry = self._cache.get(audience, (None, None))
if token is None or expiry < _helpers.utcnow():
token, expiry = self._make_jwt_for_audience(audience)
self._cache[audience] = token, expiry
return token
def refresh(self, request):
"""Raises an exception, these credentials can not be directly
refreshed.
Args:
request (Any): Unused.
Raises:
google.auth.RefreshError
"""
# pylint: disable=unused-argument
# (pylint doesn't correctly recognize overridden methods.)
raise exceptions.RefreshError(
'OnDemandCredentials can not be directly refreshed.')
def before_request(self, request, method, url, headers):
"""Performs credential-specific before request logic.
Args:
request (Any): Unused. JWT credentials do not need to make an
HTTP request to refresh.
method (str): The request's HTTP method.
url (str): The request's URI. This is used as the audience claim
when generating the JWT.
headers (Mapping): The request's headers.
"""
# pylint: disable=unused-argument
# (pylint doesn't correctly recognize overridden methods.)
parts = urllib.parse.urlsplit(url)
# Strip query string and fragment
audience = urllib.parse.urlunsplit(
(parts.scheme, parts.netloc, parts.path, "", ""))
token = self._get_jwt_for_audience(audience)
self.apply(headers, token=token)
@_helpers.copy_docstring(google.auth.credentials.Signing)
def sign_bytes(self, message):
return self._signer.sign(message)
@property
@_helpers.copy_docstring(google.auth.credentials.Signing)
def signer_email(self):
return self._issuer
@property
@_helpers.copy_docstring(google.auth.credentials.Signing)
def signer(self):
return self._signer
|
|
#!/usr/bin/env python
# Copyright (C) 2012 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Manage jobs in Jenkins server
import errno
import os
import operator
import sys
import hashlib
import yaml
import xml.etree.ElementTree as XML
import xml
from xml.dom import minidom
import jenkins
import re
from pprint import pformat
import logging
from jenkins_jobs.constants import MAGIC_MANAGE_STRING
from jenkins_jobs.parser import YamlParser
logger = logging.getLogger(__name__)
# Python 2.6's minidom toprettyxml produces broken output by adding extraneous
# whitespace around data. This patches the broken implementation with one taken
# from Python > 2.7.3
def writexml(self, writer, indent="", addindent="", newl=""):
# indent = current indentation
# addindent = indentation to add to higher levels
# newl = newline string
writer.write(indent + "<" + self.tagName)
attrs = self._get_attributes()
a_names = attrs.keys()
a_names.sort()
for a_name in a_names:
writer.write(" %s=\"" % a_name)
minidom._write_data(writer, attrs[a_name].value)
writer.write("\"")
if self.childNodes:
writer.write(">")
if (len(self.childNodes) == 1 and
self.childNodes[0].nodeType == minidom.Node.TEXT_NODE):
self.childNodes[0].writexml(writer, '', '', '')
else:
writer.write(newl)
for node in self.childNodes:
node.writexml(writer, indent + addindent, addindent, newl)
writer.write(indent)
writer.write("</%s>%s" % (self.tagName, newl))
else:
writer.write("/>%s" % (newl))
# PyXML xml.__name__ is _xmlplus. Check that if we don't have the default
# system version of the minidom, then patch the writexml method
if sys.version_info[:3] < (2, 7, 3) or xml.__name__ != 'xml':
minidom.Element.writexml = writexml
class CacheStorage(object):
# ensure each instance of the class has a reference to the required
# modules so that they are available to be used when the destructor
# is being called since python will not guarantee that it won't have
# removed global module references during teardown.
_yaml = yaml
_logger = logger
def __init__(self, jenkins_url, flush=False):
cache_dir = self.get_cache_dir()
# One cache per remote Jenkins URL:
host_vary = re.sub('[^A-Za-z0-9\-\~]', '_', jenkins_url)
self.cachefilename = os.path.join(
cache_dir, 'cache-host-jobs-' + host_vary + '.yml')
if flush or not os.path.isfile(self.cachefilename):
self.data = {}
else:
with open(self.cachefilename, 'r') as yfile:
self.data = yaml.load(yfile)
logger.debug("Using cache: '{0}'".format(self.cachefilename))
@staticmethod
def get_cache_dir():
home = os.path.expanduser('~')
if home == '~':
raise OSError('Could not locate home folder')
xdg_cache_home = os.environ.get('XDG_CACHE_HOME') or \
os.path.join(home, '.cache')
path = os.path.join(xdg_cache_home, 'jenkins_jobs')
if not os.path.isdir(path):
os.makedirs(path)
return path
def set(self, job, md5):
self.data[job] = md5
def is_cached(self, job):
if job in self.data:
return True
return False
def has_changed(self, job, md5):
if job in self.data and self.data[job] == md5:
return False
return True
def save(self):
# check we initialized sufficiently in case called via __del__
# due to an exception occurring in the __init__
if getattr(self, 'data', None) is not None:
try:
with open(self.cachefilename, 'w') as yfile:
self._yaml.dump(self.data, yfile)
except Exception as e:
self._logger.error("Failed to write to cache file '%s' on "
"exit: %s" % (self.cachefilename, e))
else:
self._logger.info("Cache saved")
self._logger.debug("Cache written out to '%s'" %
self.cachefilename)
def __del__(self):
self.save()
class Jenkins(object):
def __init__(self, url, user, password):
self.jenkins = jenkins.Jenkins(url, user, password)
self._jobs = None
self._job_list = None
@property
def jobs(self):
if self._jobs is None:
# populate jobs
self._jobs = self.jenkins.get_jobs()
return self._jobs
@property
def job_list(self):
if self._job_list is None:
self._job_list = set(job['name'] for job in self.jobs)
return self._job_list
def update_job(self, job_name, xml):
if self.is_job(job_name):
logger.info("Reconfiguring jenkins job {0}".format(job_name))
self.jenkins.reconfig_job(job_name, xml)
else:
logger.info("Creating jenkins job {0}".format(job_name))
self.jenkins.create_job(job_name, xml)
def is_job(self, job_name):
# first use cache
if job_name in self.job_list:
return True
# if not exists, use jenkins
return self.jenkins.job_exists(job_name)
def get_job_md5(self, job_name):
xml = self.jenkins.get_job_config(job_name)
return hashlib.md5(xml).hexdigest()
def delete_job(self, job_name):
if self.is_job(job_name):
logger.info("Deleting jenkins job {0}".format(job_name))
self.jenkins.delete_job(job_name)
def get_plugins_info(self):
""" Return a list of plugin_info dicts, one for each plugin on the
Jenkins instance.
"""
try:
plugins_list = self.jenkins.get_plugins_info()
except jenkins.JenkinsException as e:
if re.search("Connection refused", str(e)):
logger.warn("Unable to retrieve Jenkins Plugin Info from {0},"
" using default empty plugins info list.".format(
self.jenkins.server))
plugins_list = [{'shortName': '',
'version': '',
'longName': ''}]
else:
raise e
logger.debug("Jenkins Plugin Info {0}".format(pformat(plugins_list)))
return plugins_list
def get_jobs(self, cache=True):
if not cache:
self._jobs = None
self._job_list = None
return self.jobs
def is_managed(self, job_name):
xml = self.jenkins.get_job_config(job_name)
try:
out = XML.fromstring(xml)
description = out.find(".//description").text
return description.endswith(MAGIC_MANAGE_STRING)
except (TypeError, AttributeError):
pass
return False
class Builder(object):
def __init__(self, jenkins_url, jenkins_user, jenkins_password,
config=None, ignore_cache=False, flush_cache=False,
plugins_list=None):
self.jenkins = Jenkins(jenkins_url, jenkins_user, jenkins_password)
self.cache = CacheStorage(jenkins_url, flush=flush_cache)
self.global_config = config
self.ignore_cache = ignore_cache
self._plugins_list = plugins_list
@property
def plugins_list(self):
if self._plugins_list is None:
self._plugins_list = self.jenkins.get_plugins_info()
return self._plugins_list
def load_files(self, fn):
self.parser = YamlParser(self.global_config, self.plugins_list)
# handle deprecated behavior
if not hasattr(fn, '__iter__'):
logger.warning(
'Passing single elements for the `fn` argument in '
'Builder.load_files is deprecated. Please update your code '
'to use a list as support for automatic conversion will be '
'removed in a future version.')
fn = [fn]
files_to_process = []
for path in fn:
if os.path.isdir(path):
files_to_process.extend([os.path.join(path, f)
for f in os.listdir(path)
if (f.endswith('.yml')
or f.endswith('.yaml'))])
else:
files_to_process.append(path)
# symlinks used to allow loading of sub-dirs can result in duplicate
# definitions of macros and templates when loading all from top-level
unique_files = []
for f in files_to_process:
rpf = os.path.realpath(f)
if rpf not in unique_files:
unique_files.append(rpf)
else:
logger.warning("File '%s' already added as '%s', ignoring "
"reference to avoid duplicating yaml "
"definitions." % (f, rpf))
for in_file in unique_files:
# use of ask-for-permissions instead of ask-for-forgiveness
# performs better when low use cases.
if hasattr(in_file, 'name'):
fname = in_file.name
else:
fname = in_file
logger.debug("Parsing YAML file {0}".format(fname))
if hasattr(in_file, 'read'):
self.parser.parse_fp(in_file)
else:
self.parser.parse(in_file)
def delete_old_managed(self, keep):
jobs = self.jenkins.get_jobs()
deleted_jobs = 0
for job in jobs:
if job['name'] not in keep and \
self.jenkins.is_managed(job['name']):
logger.info("Removing obsolete jenkins job {0}"
.format(job['name']))
self.delete_job(job['name'])
deleted_jobs += 1
else:
logger.debug("Ignoring unmanaged jenkins job %s",
job['name'])
return deleted_jobs
def delete_job(self, jobs_glob, fn=None):
if fn:
self.load_files(fn)
self.parser.expandYaml([jobs_glob])
jobs = [j['name'] for j in self.parser.jobs]
else:
jobs = [jobs_glob]
if jobs is not None:
logger.info("Removing jenkins job(s): %s" % ", ".join(jobs))
for job in jobs:
self.jenkins.delete_job(job)
if(self.cache.is_cached(job)):
self.cache.set(job, '')
def delete_all_jobs(self):
jobs = self.jenkins.get_jobs()
logger.info("Number of jobs to delete: %d", len(jobs))
for job in jobs:
self.delete_job(job['name'])
def update_job(self, input_fn, jobs_glob=None, output=None):
self.load_files(input_fn)
self.parser.expandYaml(jobs_glob)
self.parser.generateXML()
logger.info("Number of jobs generated: %d", len(self.parser.xml_jobs))
self.parser.xml_jobs.sort(key=operator.attrgetter('name'))
if (output and not hasattr(output, 'write')
and not os.path.isdir(output)):
logger.info("Creating directory %s" % output)
try:
os.makedirs(output)
except OSError:
if not os.path.isdir(output):
raise
updated_jobs = 0
for job in self.parser.xml_jobs:
if output:
if hasattr(output, 'write'):
# `output` is a file-like object
logger.info("Job name: %s", job.name)
logger.debug("Writing XML to '{0}'".format(output))
try:
output.write(job.output())
except IOError as exc:
if exc.errno == errno.EPIPE:
# EPIPE could happen if piping output to something
# that doesn't read the whole input (e.g.: the UNIX
# `head` command)
return
raise
continue
output_fn = os.path.join(output, job.name)
logger.debug("Writing XML to '{0}'".format(output_fn))
f = open(output_fn, 'w')
f.write(job.output())
f.close()
continue
md5 = job.md5()
if (self.jenkins.is_job(job.name)
and not self.cache.is_cached(job.name)):
old_md5 = self.jenkins.get_job_md5(job.name)
self.cache.set(job.name, old_md5)
if self.cache.has_changed(job.name, md5) or self.ignore_cache:
self.jenkins.update_job(job.name, job.output())
updated_jobs += 1
self.cache.set(job.name, md5)
else:
logger.debug("'{0}' has not changed".format(job.name))
return self.parser.xml_jobs, updated_jobs
|
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from datetime import datetime
from django.contrib.gis.db import models
from django.contrib.auth.models import User
from picklefield.fields import PickledObjectField
from model_utils.managers import QueryManager
from .behaviours import Publishable, Expireable, Timestampable, Orderable
class SearchPrice(models.Model):
SEARCH_PRICE_LETTING = 1
SEARCH_PRICE_SALE = 2
SEARCH_PRICE_TYPES = (
(SEARCH_PRICE_LETTING, 'Letting'),
(SEARCH_PRICE_SALE, 'Sale')
)
type = models.IntegerField(choices=SEARCH_PRICE_TYPES)
label = models.CharField(max_length=20)
price = models.IntegerField()
def __str__(self):
return self.label
class Meta:
pass
class PropertyType(Publishable, models.Model):
name = models.CharField(max_length=50)
slug = models.SlugField(max_length=50)
objects = models.Manager()
active = QueryManager(status=Publishable.STATUS_CHOICE_ACTIVE)
inactive = QueryManager(status=Publishable.STATUS_CHOICE_INACTIVE)
def __str__(self):
return self.name
class Meta:
pass
class Branch(Publishable, Timestampable, models.Model):
name = models.CharField(max_length=200)
slug = models.SlugField(max_length=200)
address_1 = models.CharField(max_length=100)
address_2 = models.CharField(max_length=100, blank=True, null=True)
address_3 = models.CharField(max_length=100, blank=True, null=True)
town_city = models.CharField(max_length=50)
county = models.CharField(max_length=50)
postcode = models.CharField(max_length=10)
location = models.PointField()
telephone = models.CharField(max_length=20)
email = models.EmailField()
details = models.TextField()
opening_hours = models.TextField()
objects = models.Manager()
active = QueryManager(status=Publishable.STATUS_CHOICE_ACTIVE)
inactive = QueryManager(status=Publishable.STATUS_CHOICE_INACTIVE)
def __str__(self):
return self.name
class Meta:
ordering = ['name']
verbose_name = "branch"
verbose_name_plural = "branches"
class Property(Publishable, Expireable, Timestampable, models.Model):
branch = models.ForeignKey(Branch)
property_type = models.ForeignKey(PropertyType)
title = models.CharField(max_length=200)
slug = models.SlugField(max_length=200)
address_1 = models.CharField(max_length=100)
address_2 = models.CharField(max_length=100, blank=True, null=True)
address_3 = models.CharField(max_length=100, blank=True, null=True)
town_city = models.CharField(max_length=50)
county = models.CharField(max_length=50)
postcode = models.CharField(max_length=10)
location = models.PointField()
display_address = models.CharField(max_length=200, blank=True, null=True)
bedrooms = models.IntegerField()
en_suites = models.IntegerField()
receptions = models.IntegerField()
details = models.TextField()
summary = models.TextField(max_length=2000)
garden = models.BooleanField(default=False)
parking = models.BooleanField(default=False)
retirement = models.BooleanField(default=False)
objects = models.Manager()
def __str__(self):
return self.title
class Meta:
abstract = True
ordering = ['-updated_at']
verbose_name = "property"
verbose_name_plural = "properties"
class Feature(Timestampable, Orderable, models.Model):
text = models.CharField(max_length=200)
objects = models.Manager()
def __str__(self):
return self.text
class Meta:
abstract = True
class Picture(Publishable, Timestampable, Orderable, models.Model):
caption = models.CharField(max_length=200)
attachment = models.ImageField(upload_to='pictures/')
objects = models.Manager()
active = QueryManager(status=Publishable.STATUS_CHOICE_ACTIVE)
inactive = QueryManager(status=Publishable.STATUS_CHOICE_INACTIVE)
def __str__(self):
return self.caption
class Meta:
abstract = True
class MediaType(Publishable, models.Model):
name = models.CharField(max_length=50)
slug = models.SlugField(max_length=50)
objects = models.Manager()
active = QueryManager(status=Publishable.STATUS_CHOICE_ACTIVE)
inactive = QueryManager(status=Publishable.STATUS_CHOICE_INACTIVE)
def __str__(self):
return self.name
class Meta:
pass
class Media(Publishable, models.Model):
media_type = models.ForeignKey(MediaType)
description = models.CharField(max_length=200)
attachment = models.FileField(upload_to='media/')
objects = models.Manager()
active = QueryManager(status=Publishable.STATUS_CHOICE_ACTIVE)
inactive = QueryManager(status=Publishable.STATUS_CHOICE_INACTIVE)
def __str__(self):
return self.description
class Meta:
abstract = True
class PropertyTenure(Publishable, models.Model):
name = models.CharField(max_length=50)
slug = models.SlugField(max_length=50)
objects = models.Manager()
active = QueryManager(status=Publishable.STATUS_CHOICE_ACTIVE)
inactive = QueryManager(status=Publishable.STATUS_CHOICE_INACTIVE)
def __str__(self):
return self.name
class Meta:
pass
class Contact(Timestampable, models.Model):
more_details = models.BooleanField(default=True)
view_property = models.BooleanField(default=True)
title = models.CharField(max_length=10)
forename = models.CharField(max_length=50)
surname = models.CharField(max_length=50)
message = models.CharField(max_length=2000)
telephone = models.CharField(max_length=20)
email = models.EmailField()
country = models.CharField(max_length=50)
postcode = models.CharField(max_length=10)
def __str__(self):
return "%s %s (%s)" % (self.forename, self.surname, self.email)
class Meta:
abstract = True
class Note(Timestampable, models.Model):
text = models.TextField()
def __str__(self):
return self.text
class Meta:
abstract = True
class Favourite(Timestampable, models.Model):
user = models.ForeignKey(User)
def __str__(self):
return self.user.username
class Meta:
abstract = True
class Alert(Timestampable, models.Model):
user = models.ForeignKey(User)
key = models.CharField(max_length=40)
criteria = PickledObjectField()
def __str__(self):
return self.user.username
class Meta:
pass
|
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
from astropy.extern import six
from .inputvalidation import as_iterable, clamp, as_unsigned_integer
if six.PY2: # pragma: no cover
from future_builtins import zip
__all__ = ['create_slices', 'expand_multi_dims', 'is_numeric_array',
'mgrid_for_array', 'pad', 'new_masked_elements']
# Boolean, unsigned integer, signed integer, float, complex.
_NUMERIC_KINDS = set('buifc')
def is_numeric_array(array):
"""Determine whether the argument has a numeric datatype, when
converted to a NumPy array.
Booleans, unsigned integers, signed integers, floats and complex
numbers are the kinds of numeric datatype.
Parameters
----------
array : `numpy.ndarray`-like
The array to check.
Returns
-------
is_numeric : `bool`
True if the array has a numeric datatype, False if not.
"""
try:
return array.dtype.kind in _NUMERIC_KINDS
except AttributeError:
return np.asarray(array).dtype.kind in _NUMERIC_KINDS
def create_slices(point, shape, origin='start'):
"""Create `slice` to index an array starting from a specified position and\
with specified shape.
Parameters
----------
point : `int`, `tuple` of integers
The position represents the starting/central/end point (inclusive) of
the slice. The interpretation of the point is controlled by the
``origin`` parameter. Negative values for the point are considered off
the grid by this amount and not like normal ``Python`` which interprets
negative indices as indices counting from the end.
shape : positive `int`, `tuple` of positive integers
The shape represents the extend of the slice. The ``shape`` can also be
a `numpy.ndarray` in which case it's shape is used.
.. note::
The ``point`` and ``shape`` should contain as many integer as
the target array has dimensions. In case it is a flat (1D) array
the parameters don't need to be tuples but can also be single
integer. **But** both parameters must be the same type and contain
the same number of elements.
origin : `str` {"start" | "end" | "center"}, optional
Defines the interpretation of the ``point`` parameter:
- ``"start"``, first point included in the slice.
- ``"end"``, last point included in the slice.
- ``"center"``, central point of the slice. Odd shapes have as many
elements before and after the center while even shapes have one more
element before.
Default is ``"start"``.
Returns
-------
slices : `tuple` of slices
The returned object can be used to index (slice) an array and get the
specified parts of the array.
.. warning::
The return is always wrapped as **tuple** which is the basis for
``NumPy`` slicing but the basic ``Python`` structures like `list`,
`str`, ... **do not** support this. If you want to index these you
need to get the appropriate element (most probably the first) from
the return.
Raises
------
ValueError
If the ``origin`` is a not allowed type or string.
TypeError
If ``point`` and ``shape`` are of different type (one scalar and one
tuple) or contain different number of elements.
See also
--------
nddata.nddata.mixins.NDSlicingMixin.slice
Examples
--------
Given an two dimensional 5x10 array::
>>> from nddata.utils.numpyutils import create_slices
>>> import numpy as np
>>> array = np.arange(50).reshape(5, 10)
For example to get a 3x3 part of the array centered at 2, 5::
>>> array = np.arange(50).reshape(5, 10)
>>> slices = create_slices(point=(2, 5), shape=(3, 4), origin='center')
>>> array[slices]
array([[13, 14, 15, 16],
[23, 24, 25, 26],
[33, 34, 35, 36]])
Or a 7 element one-dimensional array ending with index 10::
>>> array = np.arange(15)
>>> array[create_slices(point=10, shape=7, origin='end')]
array([ 4, 5, 6, 7, 8, 9, 10])
These can also be used to insert a small array into a bigger one using a
central index::
>>> array2 = np.arange(5)
>>> array[create_slices(point=4, shape=array2, origin='center')] = array2
>>> array
array([ 0, 1, 0, 1, 2, 3, 4, 7, 8, 9, 10, 11, 12, 13, 14])
"""
# In case the shape is a numpy array we take it's shape. This allows the
# user to pass in the array which should be inserted.
try:
shape = shape.shape
except AttributeError:
pass
else:
# If we have a numpy array do a quick check that the point is also a
# tuple (or iterable). This is a bit annoying but .shape always returns
# a tuple even if the array is 1D or a scalar.
point = as_iterable(point)
# Zip the point and shape. We require them to be of equal length or integer
# if they are integer we need to wrap them into iterables before zipping.
try:
zips = zip(point, shape)
except TypeError:
zips = zip(as_iterable(point), as_iterable(shape))
# Depending on the origin determine the appropriate slices. Start is
# "normal" slicing but "end" and "center" require some more calculations.
# Important: Negative indices are not allowed when using this kind of
# slice creation since that could/would create problems later on. Thus each
# slice will contain the "maximum" of the computed value or 0. This ensures
# that the smallest start/stop is 0.
# TODO: Maybe it is possible also to allow negative indices but that would
# require that the position is first interpreted. Positive indices leading
# to indices >= 0 and neative indices always resulting in indices < 0.
# But for now this would be too much convolution.
if origin == 'start':
# If we start from the ankor is the starting point the slices can be
# calculated by adding the shape of each dimension to the starting
# point.
return tuple(slice(clamp(pos, 0), clamp(pos + length, 0))
for pos, length in zips)
elif origin == 'end':
# If the position is the end value we need to subtract the length from
# the position to get the starting point. But one also needs to add
# one to the start and the end because otherwise the end point would
# not be included.
return tuple(slice(clamp(pos - length + 1, 0), clamp(pos + 1, 0))
for pos, length in zips)
elif origin == 'center':
# Using the center as ankor is more complicated because we might have
# even and off shapes. We start by calculating an intermediate
# generator containing the position and the shapes divided by 2. We
# use floor division and keep the modulo
zips = ((pos, length // 2, length % 2) for pos, length in zips)
# The starting point is always just the position minus the result of
# the floor division, while the stop is the pos plus the result of the
# floor division AND the modulo. This ensures that off length shapes
# have as many elements before and after center while even arrays
# contain one more element before than after.
return tuple(slice(clamp(pos - half_len, 0),
clamp(pos + half_len + mod, 0))
for pos, half_len, mod in zips)
else:
raise ValueError('origin must be one of "start", "stop" or "center".')
def pad(array, offsets, mode='constant', constant_values=0):
"""Alternative to :func:`numpy.pad` but only with ``mode=constant``.
The :func:`numpy.pad` function is very powerful but very slow for small
inputs and even if it scales exactly like this function it is approximatly
a factor of 3 slower. This function serves as fast alternative for the
constant mode with a scalar fill value and as a compatibility layer for
``NumPy 1.6`` which did not have a pad function (and some failures on
``NumPy 1.7-1.9``).
Parameters
----------
array : `numpy.ndarray`-like
The array to pad. If it's a subclass it will be cast to a plain array.
offsets : `tuple` of `tuple`
This should be a tuple containing a tuple for each dimension. The first
element should be the padding before and the second the padding at the
end. For example ``((1,2),)`` for padding a one-dimensional array with
one element at the start and two at the end. The convenience options
of :func:`numpy.pad` are not integrated.
mode : `str`
Should be ``"constant"``. Other modes are avaiable using
:func:`numpy.pad`.
Default is ``"constant"``.
constant_values : number
The value with which to pad the array. Must be a scalar. The more
advanced options of `numpy.pad` are not allowed.
Default is ``0``.
Returns
-------
padded_array : `numpy.ndarray`
The padded array.
Examples
--------
To pad a one-dimensional array::
>>> from nddata.utils.numpyutils import pad
>>> import numpy as np
>>> pad([1,2,3], (1, 2), 'constant', 0)
array([0, 1, 2, 3, 0, 0])
But also arbitary dimensional arrays can be padded::
>>> pad(np.ones((3,3), int), ((0, 1), (2, 1)), 'constant', 0)
array([[0, 0, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0]])
"""
# Convert it an array
array = np.asarray(array)
# Scalar values should not be padded.
if array.shape == ():
raise ValueError('cannot pad scalars.')
# This is a compatibility function with much less options and optimized
# just to do constant padding with one value. If that's not enough use
# np.pad - even though I had some test failures for numpy 1.7-1.9
# which could mean that there were some changes recently. This is just a
# way to deal with my common case and ignore the wide range of np.lin.pad
# possibilities:
# TL; DR; Only allow constants and only if the value is a scalar.
if mode != 'constant':
raise ValueError('pad function can only use mode=constant')
# In case a 1d array is given the offsets could be a tuple with two
# elements or a tuple of a tuple of 2 elements. In case it's the first we
# wrap it inside another tuple so the following parts can remain the same
# for 1d and multi-d.
# In case one just happens to enter a ((1,1),(1,1)) as offset this will
# break calculating the finalshape because of the extra wrapping. But
# trying to pad a 1D array in 2 dimensions is kinda wrong.
if array.ndim == 1:
if len(offsets) == 2:
offsets = (offsets, )
# Calculate the finalshape as tuple by adding the current shape to the
# sum of offsets in this dimension.
finalshape = tuple(i + offsets[idx][0] + offsets[idx][1]
for idx, i in enumerate(array.shape))
# unfortunatly np.full is only avaiable until numpy 1.8 as long as 1.7 is
# supported this cannot work.
# TODO: Use this as soon as numpy 1.7 isn't supported anymore
# result = np.full(finalshape, dtype=array.dtype,
# fill_value=constant_values)
result = np.empty(finalshape, dtype=array.dtype)
result.fill(constant_values)
# Calculate the position where to insert the array. This can be done by
# using the create_slices function with origin="start" (default therefore
# omitted) and the position is just the first element of the offsets.
# Unfortunatly this requires an intermediate list comprehension but for the
# shape we can simply use the original array. The function will extract the
# shape by itself.
# Then insert the original array in the new one. This will copy the array!
pos = create_slices([i[0] for i in offsets], array)
# Without create_slices this would be:
# pos = tuple(slice(offsets[dim][0], offsets[dim][0]+array.shape[dim], 1)
# for dim in range(array.ndim))
result[pos] = array
return result
def expand_multi_dims(array, axis, ndims):
"""Add a variable number empty dimensions to an array.
Parameters
----------
array : `numpy.ndarray`
The 1D array which should be reshaped.
axis : positive `int`
The dimension along which the array is oriented. This will be the
dimension which **keeps** the original size of the array.
ndims : positive `int`
The total number of dimensions of the returned array.
Returns
-------
reshaped_array : `numpy.ndarray`
The reshaped array.
Raises
------
ValueError
In case the ``array`` has more than one dimension already.
Notes
-----
This function is more general than `numpy.expand_dims` because it can add
multiple dimensions at once. But on the other hand it is more limited
because it cannot handle multidimensional input.
This function's primary purpose is to create an array that broadcasts
correctly to some other data in case the matching dimension is not the
last one.
.. note::
This functions interpretation differs from `numpy.expand_dims` where
the ``axis`` parameter indicates where the empyt dimension should be
appended while `expand_multi_dims` ``axis`` parameter indicates where
the only **not empty** dimension should be.
Examples
--------
For example you have some weights you want to apply along the first
dimension but you have an 2D array::
>>> import numpy as np
>>> weights = np.array([1,2,3])
>>> array = np.ones((3, 5))
these cannot be multiplied directly because broadcasting is along the last
dimension and these differ (5 for array and 3 for weights) this is where
this function comes in::
>>> from nddata.utils.numpyutils import expand_multi_dims
>>> array * expand_multi_dims(weights, axis=0, ndims=2)
array([[ 1., 1., 1., 1., 1.],
[ 2., 2., 2., 2., 2.],
[ 3., 3., 3., 3., 3.]])
We specified ``axis=0`` because we wanted the 1D array to be applied the
against the first axis of the other array and because the final array has
2 dimensions we specified this number as ``ndims=2``.
In case of 1D and 2D arrays this is rather trivial but the same method can
be applied, i.e. to correctly broadcast a 1D array along axis=1 against a
4D array::
>>> array = np.ones((2,3,4,5))
>>> res = array * expand_multi_dims(weights, axis=1, ndims=4)
I neglected displaying the resulting array (120 elements) here but that no
ValueError was raised indicates that the broadcasting worked as expected.
"""
array = np.asanyarray(array)
# If the input has more than one dimension we cannot expand it in here.
if array.ndim != 1:
raise ValueError('cannot expand multiple dimensions of an array with '
'more than one dimension. The array has {0} '
'dimensions'.format(array.ndim))
# The axis must be smaller than the specified final ndims
if axis >= ndims:
raise ValueError('the alignment axis ({0}) is out of bounds for a '
'desired dimensionality ({1})'.format(axis, ndims))
# The final number of dimensions is 1 we don't need to do anything because
# the input is already 1d
if ndims == 1:
return array
# Cast axis and ndims to positive integer.
axis = as_unsigned_integer(axis)
ndims = as_unsigned_integer(ndims)
# Create a list containing the final shape. Use 1 for every dimension that
# is not the specified dimension and the array-size for the the dimension
# along which the result is oriented.
shape = [1 if ax != axis else array.size for ax in range(ndims)]
# Use reshape to this intermediate shape and return the result.
return array.reshape(*shape)
def mgrid_for_array(data):
"""Create a meshgrid for a ``data`` array.
Parameters
----------
data : `numpy.ndarray`-like
The data for which to create a `numpy.mgrid`.
Returns
-------
list of grids : `list` of numpy.ndarrays
Returns a list containing the grids for each dimension.
Notes
-----
One major difference from directly using `numpy.mgrid` is that the result
is not a `numpy.ndarray` but a `list`. This means that the result is
useable for functions taking an arbitary number of dimensions.
Examples
--------
For one-dimensional arrays::
>>> from nddata.utils.numpyutils import mgrid_for_array
>>> import numpy as np
>>> mgrid_for_array(np.ones(10))
[array([[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]])]
For 2d arrays::
>>> mgrid_for_array(np.ones((2, 10)))
[array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]),
array([[0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]])]
But also for arrays having more dimensions::
>>> mgrid_for_array(np.ones((2, 2, 10)))
[array([[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]],
<BLANKLINE>
[[1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]]),
array([[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1]],
<BLANKLINE>
[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]]),
array([[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]],
<BLANKLINE>
[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]]])]
Creating such an mgrid has advantages when dealing with functions that
require some grid coordinates as well as the actual values. The `list`
wrapper allows to just append the values and unpack it into the function
call::
>>> def func(x, y, z):
... pass # just demonstration!
>>> data = np.ones((10, 20))
>>> grid = mgrid_for_array(data)
>>> grid.append(data)
>>> func(*grid)
"""
data = np.asanyarray(data)
grid = np.mgrid[tuple(slice(shape) for shape in data.shape)]
if data.ndim == 1:
grid = [grid]
else:
grid = list(grid)
return grid
def new_masked_elements(oldmask, newmask):
"""Returns a mask indicating the values that are masked in ``newmask`` \
and unmasked in ``oldmask``.
Parameters
----------
oldmask : boolean `numpy.ndarray`
The reference mask.
newmask : boolean `numpy.ndarray`
The new mask.
Returns
-------
new_masked : boolean `numpy.ndarray`
A mask where ``True`` indicates that they are masked in the ``newmask``
but not masked in the ``oldmask`` and ``False`` otherwise.
Examples
--------
This is a convenience function to get a mask indicating elements that are
masked which are not in a reference::
>>> from nddata.utils.numpyutils import new_masked_elements
>>> import numpy as np
>>> mask_old = np.array([False, False, True, True], dtype=bool)
>>> mask_new = np.array([False, True, False, True], dtype=bool)
>>> new_masked_elements(mask_old, mask_new)
array([False, True, False, False], dtype=bool)
This can be useful if one works in-place with a mask but wants to further
process (or simply count) the newly masked elements.
"""
newmask = np.array(newmask, dtype=bool, copy=False)
oldmask = np.array(oldmask, dtype=bool, copy=False)
return (newmask == 1) & (oldmask == 0)
|
|
import numpy
import configs2D
import sino_proj2D
pi = numpy.pi
zeros = numpy.zeros
array = numpy.array
ones = numpy.ones
sin = numpy.sin
cos = numpy.cos
tan = numpy.tan
sqrt = numpy.sqrt
float32 = numpy.float32
float64 = numpy.float64
indicator_type = numpy.int8
#default_type = numpy.float32
default_type = numpy.float64
default_data_shape = 120,128
default_image_shape = 128,128
using_CUDA = 0
if using_CUDA:
import ctypes
csinolib = numpy.ctypeslib.load_library('sino_proj2D_CUDA','.')
cbackproject = csinolib.backproject
crayproj = csinolib.rayproj
setGPUdev = csinolib.setGPUdevice
getGPUdev = csinolib.getGPUdevice
def set_GPU_dev(devnum = 0):
cdevnum = ctypes.c_int()
cdevnum.value = devnum
setGPUdev(cdevnum)
def get_GPU_dev():
cdevnum = ctypes.c_int()
cdevnum.value = getGPUdev()
return cdevnum.value
class sinogram2D(object):
'''2D sinogram array for tomography'''
def __init__(self,\
config_name='circular_fan',\
parms={"source_to_detector":8.,\
"radius":4.},\
shape= default_data_shape,\
slen=pi, ulen=2.0,\
s0=0., u0=-1.0,\
s_include_endpoints=0):
'''Initializes the sinogram array'''
self.ns=shape[0]
self.nu=shape[1]
self.mat=zeros(shape,default_type)
self.indicator=ones(shape,indicator_type)
self.frame_vectors=zeros((self.ns,8) , default_type)
self.config_name=config_name
self.parms=parms
self.frame=configs2D.configs[config_name]
self.slen=slen
self.ulen=ulen
self.s_include_endpoints=s_include_endpoints
self.s0=s0
self.u0=u0
self.ds=self.get_ds()
self.du=self.get_du()
# The following "for" loop creates an array of the configurations from all
# different source location along the trajectory. This array is used later as
# an input for the reconstruction algorithms done by "sinoproj2D.f"
# It uses file config2D.py.
for ip in range(self.ns):
s=self.s0+ip*self.ds
self.frame_vectors[ip]=self.frame(s,self.parms)
def __str__(self):
'''Gives sinogram matrix parameters'''
nset=sum(sum(self.indicator))
frac=(100.0 * nset)/(self.ns * self.nu)
lulu = '''
The sinogram is %f %% full.
ns = %d
nu = %d
ds = %f
du = %f
s0 = %f
u0 = %f''' % (frac,self.ns,self.nu,self.ds,self.du,self.s0,self.u0)
return lulu
def mag(self):
return sqrt( ((self.indicator*self.mat)**2).sum(dtype='float64') )
def dist_to(self,sino):
'''Calculates L2 distance to sino'''
dist = sqrt( (self.indicator*(self.mat-sino.mat)**2).sum(dtype = "float64") )
return dist
def duplicate(self):
new_sino = sinogram2D(\
config_name=self.config_name,\
parms=self.parms,\
shape= (self.ns,self.nu),\
slen=self.slen, ulen=self.ulen,\
s0=self.s0, u0=self.u0,\
s_include_endpoints=self.s_include_endpoints)
new_sino.mat = self.mat.copy()
new_sino.indicator = self.indicator.copy()
new_sino.frame_vectors = self.frame_vectors.copy()
return new_sino
def get_ds(self):
if self.s_include_endpoints:
return self.slen/(self.ns-1.)
else:
return self.slen/self.ns
def get_du(self):
return self.ulen/self.nu
def get_parms(self):
return self.parms
def frame_vecs(self,s):
'''Returns the config vectors for the trajectory parm s'''
clist = self.frame(s,self.parms)
xloc = array(clist[0:2])
detc = array(clist[2:4])
euhat = array(clist[4:6])
ewhat = array(clist[6:8])
return xloc, detc, euhat, ewhat
def back_project_rays_to(self,image):
'''Backproject onto image (ray driven)'''
ns=self.ns
nu=self.nu
ds=self.ds
du=self.du
s0=self.s0
u0=self.u0
config=self.config_name
dx=image.dx
dy=image.dy
x0=image.x0
y0=image.y0
nx=image.nx
ny=image.ny
image.mat=array(\
sino_proj2D.backproject(\
self.mat,\
self.indicator,\
self.frame_vectors,\
ns,ds,nu,du,u0,\
dx,dy,x0,y0,nx,ny),\
order='C')
if using_CUDA:
def cback_project_rays_to(self,image,nblocks=64,blocksize=4):
'''Ray-driven backprojection with CUDA'''
tempsino = self.mat.astype('float32')
tempimage = image.mat.astype('float32')
tempfv = self.frame_vectors.astype('float32')
ns = ctypes.c_int()
ns.value = self.mat.shape[0]
nu = ctypes.c_int()
nu.value = self.mat.shape[1]
ds = ctypes.c_float()
ds.value = float32(self.get_ds())
du = ctypes.c_float()
du.value = float32(self.get_du())
s0 = ctypes.c_float()
s0.value = float32(self.s0)
u0 = ctypes.c_float()
u0.value = float32(self.u0)
nx = ctypes.c_int()
nx.value=image.mat.shape[0]
ny = ctypes.c_int()
ny.value=image.mat.shape[1]
dx = ctypes.c_float()
dx.value=float32(image.get_dx())
dy = ctypes.c_float()
dy.value=float32(image.get_dy())
x0 = ctypes.c_float()
x0.value=float32(image.x0)
y0 = ctypes.c_float()
y0.value=float32(image.y0)
nblk = ctypes.c_uint()
nblk.value=nblocks
blksz = ctypes.c_uint()
blksz.value=blocksize
cbackproject(\
ctypes.c_void_p(tempsino.ctypes.data),\
ctypes.c_void_p(self.indicator.ctypes.data),\
ctypes.c_void_p(tempfv.ctypes.data),\
ns,nu,du,u0,\
ctypes.c_void_p(tempimage.ctypes.data),\
dx,dy,x0,y0,nx,ny,\
nblk,blksz)
image.mat = tempimage.astype(default_type)
def back_project_pix_to(self,image,fov=1.0,xc=0.,yc=0.):
'''Pixel-driven backprojection onto image'''
ns=self.ns
nu=self.nu
ds=self.ds
du=self.du
s0=self.s0
u0=self.u0
dx=image.dx
dy=image.dy
x0=image.x0
y0=image.y0
nx=image.nx
ny=image.ny
image.mat=array(\
sino_proj2D.pixel_driven_backproj(\
self.mat,\
self.indicator,\
self.frame_vectors,\
ds,ns,nu,du,u0,\
image.mat,dx,dy,x0,y0,nx,ny,\
fov,xc,yc),\
order = 'C')
def weighted_back_project_to(self,image,fov=1.0,xc=0.,yc=0.):
'''Backproject onto image with fan-beam weighting for linear-detector'''
ns=self.ns
nu=self.nu
ds=self.ds
du=self.du
s0=self.s0
u0=self.u0
config=self.config_name
dx=image.dx
dy=image.dy
x0=image.x0
y0=image.y0
nx=image.nx
ny=image.ny
image.mat=array(\
sino_proj2D.weighted_pixel_driven_backproj(\
self.mat,\
self.indicator,\
self.frame_vectors,\
ds,ns,nu,du,u0,\
image.mat,dx,dy,x0,y0,nx,ny,\
fov,xc,yc),\
order = 'C')
##############################################################
class image2D(object):
'''2D image array for tomography'''
def __init__(self,\
shape=default_image_shape,\
xlen=2.0,ylen=2.0,\
x0=-1.0,y0=-1.0):
'''Initializes the image array'''
self.mat=zeros(shape,default_type)
self.nx=shape[0]
self.ny=shape[1]
self.x0=x0
self.y0=y0
self.xlen=xlen
self.ylen=ylen
self.dx=self.get_dx()
self.dy=self.get_dy()
def __str__(self):
'''Gives image matrix parameters'''
lulu = '''
nx = %d
ny = %d
dx = %f
dy = %f
x0 = %f
y0 = %f
xlen = %f
ylen = %f''' % (self.nx,self.ny,\
self.dx,self.dy,\
self.x0,self.y0,\
self.xlen,self.ylen)
return lulu
def mag(self):
return sqrt((self.mat**2).sum(dtype='float64'))
def dist_to(self,image):
'''Calculates L2 distance to image'''
dist = sqrt( ((self.mat-image.mat)**2).sum(dtype = "float64") )
return dist
def duplicate(self):
new_image = image2D(\
shape= (self.nx,self.ny),\
xlen=self.xlen, ylen=self.ylen,\
x0=self.x0, y0=self.y0)
new_image.mat = self.mat.copy()
return new_image
def get_dx(self):
return self.xlen/self.nx
def get_dy(self):
return self.ylen/self.ny
def add_shape(self,shape):
'''Puts a 2D shape in the image array
The attenuation value for the shape is added to each pixel
whose center is in the shape'''
# int() in the following might not be correct for centers
# off the image array
dx=self.get_dx()
dy=self.get_dy()
nx=self.mat.shape[0]
ny=self.mat.shape[1]
ncenterx=int((shape.x0-self.x0)/dx)
ncentery=int((shape.y0-self.y0)/dy)
# half_square_len=max(ell.ax,ell.ay)
nhslenx=int(shape.size/dx)+1
nhsleny=int(shape.size/dy)+1
nxi=max(0,ncenterx-nhslenx)
nyi=max(0,ncentery-nhsleny)
nxf=min(nx,ncenterx+nhslenx)
nyf=min(ny,ncentery+nhsleny)
for i in range(nxi,nxf):
for j in range(nyi,nyf):
x=self.x0+(i+0.5)*dx
y=self.y0+(j+0.5)*dy
self.mat[i,j]=self.mat[i,j]+shape.pixval(x,y)
def project_to(self,sino):
'''Ray-driven projection onto sinogram'''
ns=sino.ns
nu=sino.nu
ds=sino.ds
du=sino.du
s0=sino.s0
u0=sino.u0
config=sino.config_name
dx=self.dx
dy=self.dy
x0=self.x0
y0=self.y0
nx=self.nx
ny=self.ny
sino.mat=array(\
sino_proj2D.rayproj(\
sino.indicator,\
sino.frame_vectors,\
ns,nu,du,u0,\
self.mat,dx,dy,x0,y0,nx,ny),\
order='C')
if using_CUDA:
def cproject_to(self,sino,nblocks=64,blocksize=4):
'''CUDA ray-driven projection onto sinogram'''
tempsino = sino.mat.astype('float32')
tempimage = self.mat.astype('float32')
tempfv = sino.frame_vectors.astype('float32')
ns = ctypes.c_int()
ns.value = sino.mat.shape[0]
nu = ctypes.c_int()
nu.value = sino.mat.shape[1]
ds = ctypes.c_float()
ds.value = float32(sino.get_ds())
du = ctypes.c_float()
du.value = float32(sino.get_du())
s0 = ctypes.c_float()
s0.value = float32(sino.s0)
u0 = ctypes.c_float()
u0.value = float32(sino.u0)
nx = ctypes.c_int()
nx.value=self.mat.shape[0]
ny = ctypes.c_int()
ny.value=self.mat.shape[1]
dx = ctypes.c_float()
dx.value=float32(self.get_dx())
dy = ctypes.c_float()
dy.value=float32(self.get_dy())
x0 = ctypes.c_float()
x0.value=float32(self.x0)
y0 = ctypes.c_float()
y0.value=float32(self.y0)
nblk = ctypes.c_uint()
nblk.value=nblocks
blksz = ctypes.c_uint()
blksz.value=blocksize
crayproj(ctypes.c_void_p(tempsino.ctypes.data),\
ctypes.c_void_p(sino.indicator.ctypes.data),\
ctypes.c_void_p(tempfv.ctypes.data),\
ns,nu,du,u0,\
ctypes.c_void_p(tempimage.ctypes.data),\
dx,dy,x0,y0,nx,ny,\
nblk,blksz)
sino.mat = tempsino.astype(default_type)
#######################################################################
class phantom2D(object):
'''A collection of 2D shapes
'''
def __init__(self):
self.num_components = 0
self.components=[]
def __str__(self):
lulu=""
for i in range(1,self.num_components+1):
lulu+="component %d: "%(i)+ self.components[i-1].__str__()+"\n"
return lulu
def clear(self):
self.__init__()
def add_component(self,component):
'''adds either a shape to the phantom'''
self.num_components+=1
self.components.append(component)
def collapse_to(self,image):
'''collapse the 2D phantom to an image array
Note: this function is additive, so be sure to clear the image array first'''
for i in range(0,self.num_components):
image.add_shape(self.components[i])
def project_to(self,sino):
'''Projects phantom onto a 2D sinogram.'''
for i in range(0,self.num_components):
self.components[i].project_to(sino)
#######################################################################
class shape2D(object):
'''geometric shape class for building 2D phantoms'''
def __init__(self,x0=0.,y0=0.,size=0.,att=1.0):
'''(x0,y0) is the center of the shape, and
size is the radius that encompasses the shape'''
self.x0=x0
self.y0=y0
self.size=size
self.att=att
def __str__(self):
lulu="""Shape parms:
attenuation = %f
center = (%f,%f)
size = %f \n""" % (self.att,self.x0,self.y0,self.size)
return lulu
def pixval(self,x,y):
pass
def project_to(self,sino):
pass
########################################################################
class ellipse(shape2D):
def __init__(self,x0=0.,y0=0.,att=1.0,ax=1.0,ay=1.0,gamma=0.0):
self.ax=ax
self.ay=ay
self.gamma=gamma
size=max(ax,ay)
shape2D.__init__(self,x0,y0,size,att)
def __str__(self):
lulu = "An ellipse!\n"+shape2D.__str__(self)+ '''ellipse parms:
ax = %f
ay = %f
gamma = %f \n''' % (self.ax,self.ay,self.gamma)
return lulu
def pixval(self,x,y):
'''Returns attenuation value if x,y is in the ellipse'''
rel_x = x - self.x0
rel_y = y - self.y0
mu=self.att
cg=cos(self.gamma)
sg=sin(self.gamma)
ax=self.ax
ay=self.ay
ellipse_lhs=((rel_x*cg+rel_y*sg)/ax)**2+((rel_y*cg-rel_x*sg)/ay)**2
if ellipse_lhs<=1.0:
return mu
else:
return 0.0
def project_to(self,sino):
'''Project ellipse onto predefined sino object'''
ns=sino.mat.shape[0]
nu=sino.mat.shape[1]
ds=sino.get_ds()
du=sino.get_du()
s0=sino.s0
u0=sino.u0
config=sino.config_name
ax=self.ax
ay=self.ay
x0=self.x0
y0=self.y0
gamma=self.gamma
att=self.att
sino.mat += array(\
sino_proj2D.ellproj(\
sino.indicator,sino.frame_vectors,\
ns,nu,du,u0,\
ax,ay,x0,y0,gamma,att),\
order = 'C')
########################################################
|
|
# Copyright 2013 eBay Inc.
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from xml.dom import minidom
import webob
from cinder.api.contrib import qos_specs_manage
from cinder import exception
from cinder import test
from cinder.tests.api import fakes
from cinder.tests import fake_notifier
from cinder.volume import qos_specs
def stub_qos_specs(id):
res = dict(name='qos_specs_' + str(id))
res.update(dict(consumer='back-end'))
res.update(dict(id=str(id)))
specs = {"key1": "value1",
"key2": "value2",
"key3": "value3",
"key4": "value4",
"key5": "value5"}
res.update(dict(specs=specs))
return res
def stub_qos_associates(id):
return [{
'association_type': 'volume_type',
'name': 'FakeVolTypeName',
'id': 'FakeVolTypeID'}]
def return_qos_specs_get_all(context):
return [
stub_qos_specs(1),
stub_qos_specs(2),
stub_qos_specs(3),
]
def return_qos_specs_get_qos_specs(context, id):
if id == "777":
raise exception.QoSSpecsNotFound(specs_id=id)
return stub_qos_specs(int(id))
def return_qos_specs_delete(context, id, force):
if id == "777":
raise exception.QoSSpecsNotFound(specs_id=id)
elif id == "666":
raise exception.QoSSpecsInUse(specs_id=id)
pass
def return_qos_specs_delete_keys(context, id, keys):
if id == "777":
raise exception.QoSSpecsNotFound(specs_id=id)
if 'foo' in keys:
raise exception.QoSSpecsKeyNotFound(specs_id=id,
specs_key='foo')
def return_qos_specs_update(context, id, specs):
if id == "777":
raise exception.QoSSpecsNotFound(specs_id=id)
elif id == "888":
raise exception.InvalidQoSSpecs(reason=id)
elif id == "999":
raise exception.QoSSpecsUpdateFailed(specs_id=id,
qos_specs=specs)
pass
def return_qos_specs_create(context, name, specs):
if name == "666":
raise exception.QoSSpecsExists(specs_id=name)
elif name == "555":
raise exception.QoSSpecsCreateFailed(name=id, qos_specs=specs)
pass
def return_qos_specs_get_by_name(context, name):
if name == "777":
raise exception.QoSSpecsNotFound(specs_id=name)
return stub_qos_specs(int(name.split("_")[2]))
def return_get_qos_associations(context, id):
if id == "111":
raise exception.QoSSpecsNotFound(specs_id=id)
elif id == "222":
raise exception.CinderException()
return stub_qos_associates(id)
def return_associate_qos_specs(context, id, type_id):
if id == "111":
raise exception.QoSSpecsNotFound(specs_id=id)
elif id == "222":
raise exception.QoSSpecsAssociateFailed(specs_id=id,
type_id=type_id)
elif id == "333":
raise exception.QoSSpecsDisassociateFailed(specs_id=id,
type_id=type_id)
if type_id == "1234":
raise exception.VolumeTypeNotFound(
volume_type_id=type_id)
pass
def return_disassociate_all(context, id):
if id == "111":
raise exception.QoSSpecsNotFound(specs_id=id)
elif id == "222":
raise exception.QoSSpecsDisassociateFailed(specs_id=id,
type_id=None)
class QoSSpecManageApiTest(test.TestCase):
def setUp(self):
super(QoSSpecManageApiTest, self).setUp()
self.flags(host='fake')
self.controller = qos_specs_manage.QoSSpecsController()
#reset notifier drivers left over from other api/contrib tests
# NOTE(flaper87) WTF? ^^^^ Cleanups should happen in each test,
# not the purpose of this patch, though.
fake_notifier.reset()
self.addCleanup(fake_notifier.reset)
def test_index(self):
self.stubs.Set(qos_specs, 'get_all_specs',
return_qos_specs_get_all)
req = fakes.HTTPRequest.blank('/v2/fake/qos-specs')
res = self.controller.index(req)
self.assertEqual(3, len(res['qos_specs']))
names = set()
for item in res['qos_specs']:
self.assertEqual('value1', item['specs']['key1'])
names.add(item['name'])
expected_names = ['qos_specs_1', 'qos_specs_2', 'qos_specs_3']
self.assertEqual(names, set(expected_names))
def test_index_xml_response(self):
self.stubs.Set(qos_specs, 'get_all_specs',
return_qos_specs_get_all)
req = fakes.HTTPRequest.blank('/v2/fake/qos-specs')
res = self.controller.index(req)
req.method = 'GET'
req.headers['Content-Type'] = 'application/xml'
req.headers['Accept'] = 'application/xml'
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 200)
dom = minidom.parseString(res.body)
qos_specs_response = dom.getElementsByTagName('qos_spec')
names = set()
for qos_spec in qos_specs_response:
name = qos_spec.getAttribute('name')
names.add(name)
expected_names = ['qos_specs_1', 'qos_specs_2', 'qos_specs_3']
self.assertEqual(names, set(expected_names))
def test_qos_specs_delete(self):
self.stubs.Set(qos_specs, 'get_qos_specs',
return_qos_specs_get_qos_specs)
self.stubs.Set(qos_specs, 'delete',
return_qos_specs_delete)
req = fakes.HTTPRequest.blank('/v2/fake/qos-specs/1')
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 0)
self.controller.delete(req, 1)
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 1)
def test_qos_specs_delete_not_found(self):
self.stubs.Set(qos_specs, 'get_qos_specs',
return_qos_specs_get_qos_specs)
self.stubs.Set(qos_specs, 'delete',
return_qos_specs_delete)
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 0)
req = fakes.HTTPRequest.blank('/v2/fake/qos-specs/777')
self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete,
req, '777')
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 1)
def test_qos_specs_delete_inuse(self):
self.stubs.Set(qos_specs, 'get_qos_specs',
return_qos_specs_get_qos_specs)
self.stubs.Set(qos_specs, 'delete',
return_qos_specs_delete)
req = fakes.HTTPRequest.blank('/v2/fake/qos-specs/666')
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 0)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.delete,
req, '666')
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 1)
def test_qos_specs_delete_inuse_force(self):
self.stubs.Set(qos_specs, 'get_qos_specs',
return_qos_specs_get_qos_specs)
self.stubs.Set(qos_specs, 'delete',
return_qos_specs_delete)
req = fakes.HTTPRequest.blank('/v2/fake/qos-specs/666?force=True')
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 0)
self.assertRaises(webob.exc.HTTPInternalServerError,
self.controller.delete,
req, '666')
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 1)
def test_qos_specs_delete_keys(self):
self.stubs.Set(qos_specs, 'delete_keys',
return_qos_specs_delete_keys)
body = {"keys": ['bar', 'zoo']}
req = fakes.HTTPRequest.blank('/v2/fake/qos-specs/666/delete_keys')
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 0)
self.controller.delete_keys(req, '666', body)
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 1)
def test_qos_specs_delete_keys_qos_notfound(self):
self.stubs.Set(qos_specs, 'delete_keys',
return_qos_specs_delete_keys)
body = {"keys": ['bar', 'zoo']}
req = fakes.HTTPRequest.blank('/v2/fake/qos-specs/777/delete_keys')
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 0)
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.delete_keys,
req, '777', body)
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 1)
def test_qos_specs_delete_keys_badkey(self):
self.stubs.Set(qos_specs, 'delete_keys',
return_qos_specs_delete_keys)
req = fakes.HTTPRequest.blank('/v2/fake/qos-specs/666/delete_keys')
body = {"keys": ['foo', 'zoo']}
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 0)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.delete_keys,
req, '666', body)
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 1)
def test_create(self):
self.stubs.Set(qos_specs, 'create',
return_qos_specs_create)
self.stubs.Set(qos_specs, 'get_qos_specs_by_name',
return_qos_specs_get_by_name)
body = {"qos_specs": {"name": "qos_specs_1",
"key1": "value1"}}
req = fakes.HTTPRequest.blank('/v2/fake/qos-specs')
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 0)
res_dict = self.controller.create(req, body)
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 1)
self.assertEqual('qos_specs_1', res_dict['qos_specs']['name'])
def test_create_conflict(self):
self.stubs.Set(qos_specs, 'create',
return_qos_specs_create)
self.stubs.Set(qos_specs, 'get_qos_specs_by_name',
return_qos_specs_get_by_name)
body = {"qos_specs": {"name": "666",
"key1": "value1"}}
req = fakes.HTTPRequest.blank('/v2/fake/qos-specs')
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 0)
self.assertRaises(webob.exc.HTTPConflict,
self.controller.create, req, body)
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 1)
def test_create_failed(self):
self.stubs.Set(qos_specs, 'create',
return_qos_specs_create)
self.stubs.Set(qos_specs, 'get_qos_specs_by_name',
return_qos_specs_get_by_name)
body = {"qos_specs": {"name": "555",
"key1": "value1"}}
req = fakes.HTTPRequest.blank('/v2/fake/qos-specs')
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 0)
self.assertRaises(webob.exc.HTTPInternalServerError,
self.controller.create, req, body)
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 1)
def _create_qos_specs_bad_body(self, body):
req = fakes.HTTPRequest.blank('/v2/fake/qos-specs')
req.method = 'POST'
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, req, body)
def test_create_no_body(self):
self._create_qos_specs_bad_body(body=None)
def test_create_missing_specs_name(self):
body = {'foo': {'a': 'b'}}
self._create_qos_specs_bad_body(body=body)
def test_create_malformed_entity(self):
body = {'qos_specs': 'string'}
self._create_qos_specs_bad_body(body=body)
def test_update(self):
self.stubs.Set(qos_specs, 'update',
return_qos_specs_update)
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 0)
req = fakes.HTTPRequest.blank('/v2/fake/qos-specs/555')
body = {'qos_specs': {'key1': 'value1',
'key2': 'value2'}}
res = self.controller.update(req, '555', body)
self.assertDictMatch(res, body)
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 1)
def test_update_not_found(self):
self.stubs.Set(qos_specs, 'update',
return_qos_specs_update)
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 0)
req = fakes.HTTPRequest.blank('/v2/fake/qos-specs/777')
body = {'qos_specs': {'key1': 'value1',
'key2': 'value2'}}
self.assertRaises(webob.exc.HTTPNotFound, self.controller.update,
req, '777', body)
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 1)
def test_update_invalid_input(self):
self.stubs.Set(qos_specs, 'update',
return_qos_specs_update)
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 0)
req = fakes.HTTPRequest.blank('/v2/fake/qos-specs/888')
body = {'qos_specs': {'key1': 'value1',
'key2': 'value2'}}
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update,
req, '888', body)
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 1)
def test_update_failed(self):
self.stubs.Set(qos_specs, 'update',
return_qos_specs_update)
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 0)
req = fakes.HTTPRequest.blank('/v2/fake/qos-specs/999')
body = {'qos_specs': {'key1': 'value1',
'key2': 'value2'}}
self.assertRaises(webob.exc.HTTPInternalServerError,
self.controller.update,
req, '999', body)
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 1)
def test_show(self):
self.stubs.Set(qos_specs, 'get_qos_specs',
return_qos_specs_get_qos_specs)
req = fakes.HTTPRequest.blank('/v2/fake/qos-specs/1')
res_dict = self.controller.show(req, '1')
self.assertEqual('1', res_dict['qos_specs']['id'])
self.assertEqual('qos_specs_1', res_dict['qos_specs']['name'])
def test_show_xml_response(self):
self.stubs.Set(qos_specs, 'get_qos_specs',
return_qos_specs_get_qos_specs)
req = fakes.HTTPRequest.blank('/v2/fake/qos-specs/1')
res = self.controller.show(req, '1')
req.method = 'GET'
req.headers['Content-Type'] = 'application/xml'
req.headers['Accept'] = 'application/xml'
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 200)
dom = minidom.parseString(res.body)
qos_spec_response = dom.getElementsByTagName('qos_spec')
qos_spec = qos_spec_response.item(0)
id = qos_spec.getAttribute('id')
name = qos_spec.getAttribute('name')
consumer = qos_spec.getAttribute('consumer')
self.assertEqual(id, u'1')
self.assertEqual(name, 'qos_specs_1')
self.assertEqual(consumer, 'back-end')
def test_get_associations(self):
self.stubs.Set(qos_specs, 'get_associations',
return_get_qos_associations)
req = fakes.HTTPRequest.blank(
'/v2/fake/qos-specs/1/associations')
res = self.controller.associations(req, '1')
self.assertEqual('FakeVolTypeName',
res['qos_associations'][0]['name'])
self.assertEqual('FakeVolTypeID',
res['qos_associations'][0]['id'])
def test_get_associations_xml_response(self):
self.stubs.Set(qos_specs, 'get_associations',
return_get_qos_associations)
req = fakes.HTTPRequest.blank('/v2/fake/qos-specs/1/associations')
res = self.controller.associations(req, '1')
req.method = 'GET'
req.headers['Content-Type'] = 'application/xml'
req.headers['Accept'] = 'application/xml'
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 200)
dom = minidom.parseString(res.body)
associations_response = dom.getElementsByTagName('associations')
association = associations_response.item(0)
id = association.getAttribute('id')
name = association.getAttribute('name')
association_type = association.getAttribute('association_type')
self.assertEqual(id, 'FakeVolTypeID')
self.assertEqual(name, 'FakeVolTypeName')
self.assertEqual(association_type, 'volume_type')
def test_get_associations_not_found(self):
self.stubs.Set(qos_specs, 'get_associations',
return_get_qos_associations)
req = fakes.HTTPRequest.blank(
'/v2/fake/qos-specs/111/associations')
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.associations,
req, '111')
def test_get_associations_failed(self):
self.stubs.Set(qos_specs, 'get_associations',
return_get_qos_associations)
req = fakes.HTTPRequest.blank(
'/v2/fake/qos-specs/222/associations')
self.assertRaises(webob.exc.HTTPInternalServerError,
self.controller.associations,
req, '222')
def test_associate(self):
self.stubs.Set(qos_specs, 'get_qos_specs',
return_qos_specs_get_qos_specs)
self.stubs.Set(qos_specs, 'associate_qos_with_type',
return_associate_qos_specs)
req = fakes.HTTPRequest.blank(
'/v2/fake/qos-specs/1/associate?vol_type_id=111')
res = self.controller.associate(req, '1')
self.assertEqual(res.status_int, 202)
def test_associate_no_type(self):
self.stubs.Set(qos_specs, 'get_qos_specs',
return_qos_specs_get_qos_specs)
self.stubs.Set(qos_specs, 'associate_qos_with_type',
return_associate_qos_specs)
req = fakes.HTTPRequest.blank(
'/v2/fake/qos-specs/1/associate')
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.associate, req, '1')
def test_associate_not_found(self):
self.stubs.Set(qos_specs, 'get_qos_specs',
return_qos_specs_get_qos_specs)
self.stubs.Set(qos_specs, 'associate_qos_with_type',
return_associate_qos_specs)
req = fakes.HTTPRequest.blank(
'/v2/fake/qos-specs/111/associate?vol_type_id=12')
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.associate, req, '111')
req = fakes.HTTPRequest.blank(
'/v2/fake/qos-specs/1/associate?vol_type_id=1234')
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.associate, req, '1')
def test_associate_fail(self):
self.stubs.Set(qos_specs, 'get_qos_specs',
return_qos_specs_get_qos_specs)
self.stubs.Set(qos_specs, 'associate_qos_with_type',
return_associate_qos_specs)
req = fakes.HTTPRequest.blank(
'/v2/fake/qos-specs/222/associate?vol_type_id=1000')
self.assertRaises(webob.exc.HTTPInternalServerError,
self.controller.associate, req, '222')
def test_disassociate(self):
self.stubs.Set(qos_specs, 'get_qos_specs',
return_qos_specs_get_qos_specs)
self.stubs.Set(qos_specs, 'disassociate_qos_specs',
return_associate_qos_specs)
req = fakes.HTTPRequest.blank(
'/v2/fake/qos-specs/1/disassociate?vol_type_id=111')
res = self.controller.disassociate(req, '1')
self.assertEqual(res.status_int, 202)
def test_disassociate_no_type(self):
self.stubs.Set(qos_specs, 'get_qos_specs',
return_qos_specs_get_qos_specs)
self.stubs.Set(qos_specs, 'disassociate_qos_specs',
return_associate_qos_specs)
req = fakes.HTTPRequest.blank(
'/v2/fake/qos-specs/1/disassociate')
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.disassociate, req, '1')
def test_disassociate_not_found(self):
self.stubs.Set(qos_specs, 'get_qos_specs',
return_qos_specs_get_qos_specs)
self.stubs.Set(qos_specs, 'disassociate_qos_specs',
return_associate_qos_specs)
req = fakes.HTTPRequest.blank(
'/v2/fake/qos-specs/111/disassociate?vol_type_id=12')
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.disassociate, req, '111')
req = fakes.HTTPRequest.blank(
'/v2/fake/qos-specs/1/disassociate?vol_type_id=1234')
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.disassociate, req, '1')
def test_disassociate_failed(self):
self.stubs.Set(qos_specs, 'get_qos_specs',
return_qos_specs_get_qos_specs)
self.stubs.Set(qos_specs, 'disassociate_qos_specs',
return_associate_qos_specs)
req = fakes.HTTPRequest.blank(
'/v2/fake/qos-specs/333/disassociate?vol_type_id=1000')
self.assertRaises(webob.exc.HTTPInternalServerError,
self.controller.disassociate, req, '333')
def test_disassociate_all(self):
self.stubs.Set(qos_specs, 'get_qos_specs',
return_qos_specs_get_qos_specs)
self.stubs.Set(qos_specs, 'disassociate_all',
return_disassociate_all)
req = fakes.HTTPRequest.blank(
'/v2/fake/qos-specs/1/disassociate_all')
res = self.controller.disassociate_all(req, '1')
self.assertEqual(res.status_int, 202)
def test_disassociate_all_not_found(self):
self.stubs.Set(qos_specs, 'get_qos_specs',
return_qos_specs_get_qos_specs)
self.stubs.Set(qos_specs, 'disassociate_all',
return_disassociate_all)
req = fakes.HTTPRequest.blank(
'/v2/fake/qos-specs/111/disassociate_all')
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.disassociate_all, req, '111')
def test_disassociate_all_failed(self):
self.stubs.Set(qos_specs, 'get_qos_specs',
return_qos_specs_get_qos_specs)
self.stubs.Set(qos_specs, 'disassociate_all',
return_disassociate_all)
req = fakes.HTTPRequest.blank(
'/v2/fake/qos-specs/222/disassociate_all')
self.assertRaises(webob.exc.HTTPInternalServerError,
self.controller.disassociate_all, req, '222')
|
|
"""Support for Snips on-device ASR and NLU."""
from datetime import timedelta
import json
import logging
import voluptuous as vol
from homeassistant.components import mqtt
from homeassistant.core import callback
from homeassistant.helpers import config_validation as cv, intent
DOMAIN = "snips"
CONF_INTENTS = "intents"
CONF_ACTION = "action"
CONF_FEEDBACK = "feedback_sounds"
CONF_PROBABILITY = "probability_threshold"
CONF_SITE_IDS = "site_ids"
SERVICE_SAY = "say"
SERVICE_SAY_ACTION = "say_action"
SERVICE_FEEDBACK_ON = "feedback_on"
SERVICE_FEEDBACK_OFF = "feedback_off"
INTENT_TOPIC = "hermes/intent/#"
FEEDBACK_ON_TOPIC = "hermes/feedback/sound/toggleOn"
FEEDBACK_OFF_TOPIC = "hermes/feedback/sound/toggleOff"
ATTR_TEXT = "text"
ATTR_SITE_ID = "site_id"
ATTR_CUSTOM_DATA = "custom_data"
ATTR_CAN_BE_ENQUEUED = "can_be_enqueued"
ATTR_INTENT_FILTER = "intent_filter"
_LOGGER = logging.getLogger(__name__)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Optional(CONF_FEEDBACK): cv.boolean,
vol.Optional(CONF_PROBABILITY, default=0): vol.Coerce(float),
vol.Optional(CONF_SITE_IDS, default=["default"]): vol.All(
cv.ensure_list, [cv.string]
),
}
)
},
extra=vol.ALLOW_EXTRA,
)
INTENT_SCHEMA = vol.Schema(
{
vol.Required("input"): str,
vol.Required("intent"): {vol.Required("intentName"): str},
vol.Optional("slots"): [
{
vol.Required("slotName"): str,
vol.Required("value"): {
vol.Required("kind"): str,
vol.Optional("value"): cv.match_all,
vol.Optional("rawValue"): cv.match_all,
},
}
],
},
extra=vol.ALLOW_EXTRA,
)
SERVICE_SCHEMA_SAY = vol.Schema(
{
vol.Required(ATTR_TEXT): str,
vol.Optional(ATTR_SITE_ID, default="default"): str,
vol.Optional(ATTR_CUSTOM_DATA, default=""): str,
}
)
SERVICE_SCHEMA_SAY_ACTION = vol.Schema(
{
vol.Required(ATTR_TEXT): str,
vol.Optional(ATTR_SITE_ID, default="default"): str,
vol.Optional(ATTR_CUSTOM_DATA, default=""): str,
vol.Optional(ATTR_CAN_BE_ENQUEUED, default=True): cv.boolean,
vol.Optional(ATTR_INTENT_FILTER): vol.All(cv.ensure_list),
}
)
SERVICE_SCHEMA_FEEDBACK = vol.Schema(
{vol.Optional(ATTR_SITE_ID, default="default"): str}
)
async def async_setup(hass, config):
"""Activate Snips component."""
@callback
def async_set_feedback(site_ids, state):
"""Set Feedback sound state."""
site_ids = site_ids if site_ids else config[DOMAIN].get(CONF_SITE_IDS)
topic = FEEDBACK_ON_TOPIC if state else FEEDBACK_OFF_TOPIC
for site_id in site_ids:
payload = json.dumps({"siteId": site_id})
hass.components.mqtt.async_publish(
FEEDBACK_ON_TOPIC, None, qos=0, retain=False
)
hass.components.mqtt.async_publish(
topic, payload, qos=int(state), retain=state
)
if CONF_FEEDBACK in config[DOMAIN]:
async_set_feedback(None, config[DOMAIN][CONF_FEEDBACK])
async def message_received(msg):
"""Handle new messages on MQTT."""
_LOGGER.debug("New intent: %s", msg.payload)
try:
request = json.loads(msg.payload)
except TypeError:
_LOGGER.error("Received invalid JSON: %s", msg.payload)
return
if request["intent"]["confidenceScore"] < config[DOMAIN].get(CONF_PROBABILITY):
_LOGGER.warning(
"Intent below probaility threshold %s < %s",
request["intent"]["confidenceScore"],
config[DOMAIN].get(CONF_PROBABILITY),
)
return
try:
request = INTENT_SCHEMA(request)
except vol.Invalid as err:
_LOGGER.error("Intent has invalid schema: %s. %s", err, request)
return
if request["intent"]["intentName"].startswith("user_"):
intent_type = request["intent"]["intentName"].split("__")[-1]
else:
intent_type = request["intent"]["intentName"].split(":")[-1]
slots = {}
for slot in request.get("slots", []):
slots[slot["slotName"]] = {"value": resolve_slot_values(slot)}
slots["{}_raw".format(slot["slotName"])] = {"value": slot["rawValue"]}
slots["site_id"] = {"value": request.get("siteId")}
slots["session_id"] = {"value": request.get("sessionId")}
slots["confidenceScore"] = {"value": request["intent"]["confidenceScore"]}
try:
intent_response = await intent.async_handle(
hass, DOMAIN, intent_type, slots, request["input"]
)
notification = {"sessionId": request.get("sessionId", "default")}
if "plain" in intent_response.speech:
notification["text"] = intent_response.speech["plain"]["speech"]
_LOGGER.debug("send_response %s", json.dumps(notification))
mqtt.async_publish(
hass, "hermes/dialogueManager/endSession", json.dumps(notification)
)
except intent.UnknownIntent:
_LOGGER.warning(
"Received unknown intent %s", request["intent"]["intentName"]
)
except intent.IntentError:
_LOGGER.exception("Error while handling intent: %s", intent_type)
await hass.components.mqtt.async_subscribe(INTENT_TOPIC, message_received)
async def snips_say(call):
"""Send a Snips notification message."""
notification = {
"siteId": call.data.get(ATTR_SITE_ID, "default"),
"customData": call.data.get(ATTR_CUSTOM_DATA, ""),
"init": {"type": "notification", "text": call.data.get(ATTR_TEXT)},
}
mqtt.async_publish(
hass, "hermes/dialogueManager/startSession", json.dumps(notification)
)
return
async def snips_say_action(call):
"""Send a Snips action message."""
notification = {
"siteId": call.data.get(ATTR_SITE_ID, "default"),
"customData": call.data.get(ATTR_CUSTOM_DATA, ""),
"init": {
"type": "action",
"text": call.data.get(ATTR_TEXT),
"canBeEnqueued": call.data.get(ATTR_CAN_BE_ENQUEUED, True),
"intentFilter": call.data.get(ATTR_INTENT_FILTER, []),
},
}
mqtt.async_publish(
hass, "hermes/dialogueManager/startSession", json.dumps(notification)
)
return
async def feedback_on(call):
"""Turn feedback sounds on."""
async_set_feedback(call.data.get(ATTR_SITE_ID), True)
async def feedback_off(call):
"""Turn feedback sounds off."""
async_set_feedback(call.data.get(ATTR_SITE_ID), False)
hass.services.async_register(
DOMAIN, SERVICE_SAY, snips_say, schema=SERVICE_SCHEMA_SAY
)
hass.services.async_register(
DOMAIN, SERVICE_SAY_ACTION, snips_say_action, schema=SERVICE_SCHEMA_SAY_ACTION
)
hass.services.async_register(
DOMAIN, SERVICE_FEEDBACK_ON, feedback_on, schema=SERVICE_SCHEMA_FEEDBACK
)
hass.services.async_register(
DOMAIN, SERVICE_FEEDBACK_OFF, feedback_off, schema=SERVICE_SCHEMA_FEEDBACK
)
return True
def resolve_slot_values(slot):
"""Convert snips builtin types to usable values."""
if "value" in slot["value"]:
value = slot["value"]["value"]
else:
value = slot["rawValue"]
if slot.get("entity") == "snips/duration":
delta = timedelta(
weeks=slot["value"]["weeks"],
days=slot["value"]["days"],
hours=slot["value"]["hours"],
minutes=slot["value"]["minutes"],
seconds=slot["value"]["seconds"],
)
value = delta.seconds
return value
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import unittest
from base64 import b64encode
from unittest import mock
import pytest
from airflow.exceptions import AirflowException
from airflow.models import DAG, TaskInstance
from airflow.providers.sftp.operators.sftp import SFTPOperation, SFTPOperator
from airflow.providers.ssh.operators.ssh import SSHOperator
from airflow.utils import timezone
from airflow.utils.timezone import datetime
from tests.test_utils.config import conf_vars
TEST_DAG_ID = 'unit_tests_sftp_op'
DEFAULT_DATE = datetime(2017, 1, 1)
TEST_CONN_ID = "conn_id_for_testing"
class TestSFTPOperator(unittest.TestCase):
def setUp(self):
from airflow.providers.ssh.hooks.ssh import SSHHook
hook = SSHHook(ssh_conn_id='ssh_default')
hook.no_host_key_check = True
args = {
'owner': 'airflow',
'start_date': DEFAULT_DATE,
}
dag = DAG(TEST_DAG_ID + 'test_schedule_dag_once', default_args=args)
dag.schedule_interval = '@once'
self.hook = hook
self.dag = dag
self.test_dir = "/tmp"
self.test_local_dir = "/tmp/tmp2"
self.test_remote_dir = "/tmp/tmp1"
self.test_local_filename = 'test_local_file'
self.test_remote_filename = 'test_remote_file'
self.test_local_filepath = f'{self.test_dir}/{self.test_local_filename}'
# Local Filepath with Intermediate Directory
self.test_local_filepath_int_dir = f'{self.test_local_dir}/{self.test_local_filename}'
self.test_remote_filepath = f'{self.test_dir}/{self.test_remote_filename}'
# Remote Filepath with Intermediate Directory
self.test_remote_filepath_int_dir = f'{self.test_remote_dir}/{self.test_remote_filename}'
@conf_vars({('core', 'enable_xcom_pickling'): 'True'})
def test_pickle_file_transfer_put(self):
test_local_file_content = (
b"This is local file content \n which is multiline "
b"continuing....with other character\nanother line here \n this is last line"
)
# create a test file locally
with open(self.test_local_filepath, 'wb') as file:
file.write(test_local_file_content)
# put test file to remote
put_test_task = SFTPOperator(
task_id="put_test_task",
ssh_hook=self.hook,
local_filepath=self.test_local_filepath,
remote_filepath=self.test_remote_filepath,
operation=SFTPOperation.PUT,
create_intermediate_dirs=True,
dag=self.dag,
)
assert put_test_task is not None
ti2 = TaskInstance(task=put_test_task, execution_date=timezone.utcnow())
ti2.run()
# check the remote file content
check_file_task = SSHOperator(
task_id="check_file_task",
ssh_hook=self.hook,
command=f"cat {self.test_remote_filepath}",
do_xcom_push=True,
dag=self.dag,
)
assert check_file_task is not None
ti3 = TaskInstance(task=check_file_task, execution_date=timezone.utcnow())
ti3.run()
assert (
ti3.xcom_pull(task_ids=check_file_task.task_id, key='return_value').strip()
== test_local_file_content
)
@conf_vars({('core', 'enable_xcom_pickling'): 'True'})
def test_file_transfer_no_intermediate_dir_error_put(self):
test_local_file_content = (
b"This is local file content \n which is multiline "
b"continuing....with other character\nanother line here \n this is last line"
)
# create a test file locally
with open(self.test_local_filepath, 'wb') as file:
file.write(test_local_file_content)
# Try to put test file to remote
# This should raise an error with "No such file" as the directory
# does not exist
with pytest.raises(Exception) as ctx:
put_test_task = SFTPOperator(
task_id="test_sftp",
ssh_hook=self.hook,
local_filepath=self.test_local_filepath,
remote_filepath=self.test_remote_filepath_int_dir,
operation=SFTPOperation.PUT,
create_intermediate_dirs=False,
dag=self.dag,
)
assert put_test_task is not None
ti2 = TaskInstance(task=put_test_task, execution_date=timezone.utcnow())
ti2.run()
assert 'No such file' in str(ctx.value)
@conf_vars({('core', 'enable_xcom_pickling'): 'True'})
def test_file_transfer_with_intermediate_dir_put(self):
test_local_file_content = (
b"This is local file content \n which is multiline "
b"continuing....with other character\nanother line here \n this is last line"
)
# create a test file locally
with open(self.test_local_filepath, 'wb') as file:
file.write(test_local_file_content)
# put test file to remote
put_test_task = SFTPOperator(
task_id="test_sftp",
ssh_hook=self.hook,
local_filepath=self.test_local_filepath,
remote_filepath=self.test_remote_filepath_int_dir,
operation=SFTPOperation.PUT,
create_intermediate_dirs=True,
dag=self.dag,
)
assert put_test_task is not None
ti2 = TaskInstance(task=put_test_task, execution_date=timezone.utcnow())
ti2.run()
# check the remote file content
check_file_task = SSHOperator(
task_id="test_check_file",
ssh_hook=self.hook,
command=f"cat {self.test_remote_filepath_int_dir}",
do_xcom_push=True,
dag=self.dag,
)
assert check_file_task is not None
ti3 = TaskInstance(task=check_file_task, execution_date=timezone.utcnow())
ti3.run()
assert (
ti3.xcom_pull(task_ids='test_check_file', key='return_value').strip() == test_local_file_content
)
@conf_vars({('core', 'enable_xcom_pickling'): 'False'})
def test_json_file_transfer_put(self):
test_local_file_content = (
b"This is local file content \n which is multiline "
b"continuing....with other character\nanother line here \n this is last line"
)
# create a test file locally
with open(self.test_local_filepath, 'wb') as file:
file.write(test_local_file_content)
# put test file to remote
put_test_task = SFTPOperator(
task_id="put_test_task",
ssh_hook=self.hook,
local_filepath=self.test_local_filepath,
remote_filepath=self.test_remote_filepath,
operation=SFTPOperation.PUT,
dag=self.dag,
)
assert put_test_task is not None
ti2 = TaskInstance(task=put_test_task, execution_date=timezone.utcnow())
ti2.run()
# check the remote file content
check_file_task = SSHOperator(
task_id="check_file_task",
ssh_hook=self.hook,
command=f"cat {self.test_remote_filepath}",
do_xcom_push=True,
dag=self.dag,
)
assert check_file_task is not None
ti3 = TaskInstance(task=check_file_task, execution_date=timezone.utcnow())
ti3.run()
assert ti3.xcom_pull(task_ids=check_file_task.task_id, key='return_value').strip() == b64encode(
test_local_file_content
).decode('utf-8')
@conf_vars({('core', 'enable_xcom_pickling'): 'True'})
def test_pickle_file_transfer_get(self):
test_remote_file_content = (
"This is remote file content \n which is also multiline "
"another line here \n this is last line. EOF"
)
# create a test file remotely
create_file_task = SSHOperator(
task_id="test_create_file",
ssh_hook=self.hook,
command=f"echo '{test_remote_file_content}' > {self.test_remote_filepath}",
do_xcom_push=True,
dag=self.dag,
)
assert create_file_task is not None
ti1 = TaskInstance(task=create_file_task, execution_date=timezone.utcnow())
ti1.run()
# get remote file to local
get_test_task = SFTPOperator(
task_id="test_sftp",
ssh_hook=self.hook,
local_filepath=self.test_local_filepath,
remote_filepath=self.test_remote_filepath,
operation=SFTPOperation.GET,
dag=self.dag,
)
assert get_test_task is not None
ti2 = TaskInstance(task=get_test_task, execution_date=timezone.utcnow())
ti2.run()
# test the received content
content_received = None
with open(self.test_local_filepath) as file:
content_received = file.read()
assert content_received.strip() == test_remote_file_content
@conf_vars({('core', 'enable_xcom_pickling'): 'False'})
def test_json_file_transfer_get(self):
test_remote_file_content = (
"This is remote file content \n which is also multiline "
"another line here \n this is last line. EOF"
)
# create a test file remotely
create_file_task = SSHOperator(
task_id="test_create_file",
ssh_hook=self.hook,
command=f"echo '{test_remote_file_content}' > {self.test_remote_filepath}",
do_xcom_push=True,
dag=self.dag,
)
assert create_file_task is not None
ti1 = TaskInstance(task=create_file_task, execution_date=timezone.utcnow())
ti1.run()
# get remote file to local
get_test_task = SFTPOperator(
task_id="test_sftp",
ssh_hook=self.hook,
local_filepath=self.test_local_filepath,
remote_filepath=self.test_remote_filepath,
operation=SFTPOperation.GET,
dag=self.dag,
)
assert get_test_task is not None
ti2 = TaskInstance(task=get_test_task, execution_date=timezone.utcnow())
ti2.run()
# test the received content
content_received = None
with open(self.test_local_filepath) as file:
content_received = file.read()
assert content_received.strip() == test_remote_file_content.encode('utf-8').decode('utf-8')
@conf_vars({('core', 'enable_xcom_pickling'): 'True'})
def test_file_transfer_no_intermediate_dir_error_get(self):
test_remote_file_content = (
"This is remote file content \n which is also multiline "
"another line here \n this is last line. EOF"
)
# create a test file remotely
create_file_task = SSHOperator(
task_id="test_create_file",
ssh_hook=self.hook,
command=f"echo '{test_remote_file_content}' > {self.test_remote_filepath}",
do_xcom_push=True,
dag=self.dag,
)
assert create_file_task is not None
ti1 = TaskInstance(task=create_file_task, execution_date=timezone.utcnow())
ti1.run()
# Try to GET test file from remote
# This should raise an error with "No such file" as the directory
# does not exist
with pytest.raises(Exception) as ctx:
get_test_task = SFTPOperator(
task_id="test_sftp",
ssh_hook=self.hook,
local_filepath=self.test_local_filepath_int_dir,
remote_filepath=self.test_remote_filepath,
operation=SFTPOperation.GET,
dag=self.dag,
)
assert get_test_task is not None
ti2 = TaskInstance(task=get_test_task, execution_date=timezone.utcnow())
ti2.run()
assert 'No such file' in str(ctx.value)
@conf_vars({('core', 'enable_xcom_pickling'): 'True'})
def test_file_transfer_with_intermediate_dir_error_get(self):
test_remote_file_content = (
"This is remote file content \n which is also multiline "
"another line here \n this is last line. EOF"
)
# create a test file remotely
create_file_task = SSHOperator(
task_id="test_create_file",
ssh_hook=self.hook,
command=f"echo '{test_remote_file_content}' > {self.test_remote_filepath}",
do_xcom_push=True,
dag=self.dag,
)
assert create_file_task is not None
ti1 = TaskInstance(task=create_file_task, execution_date=timezone.utcnow())
ti1.run()
# get remote file to local
get_test_task = SFTPOperator(
task_id="test_sftp",
ssh_hook=self.hook,
local_filepath=self.test_local_filepath_int_dir,
remote_filepath=self.test_remote_filepath,
operation=SFTPOperation.GET,
create_intermediate_dirs=True,
dag=self.dag,
)
assert get_test_task is not None
ti2 = TaskInstance(task=get_test_task, execution_date=timezone.utcnow())
ti2.run()
# test the received content
content_received = None
with open(self.test_local_filepath_int_dir) as file:
content_received = file.read()
assert content_received.strip() == test_remote_file_content
@mock.patch.dict('os.environ', {'AIRFLOW_CONN_' + TEST_CONN_ID.upper(): "ssh://test_id@localhost"})
def test_arg_checking(self):
# Exception should be raised if neither ssh_hook nor ssh_conn_id is provided
with pytest.raises(AirflowException, match="Cannot operate without ssh_hook or ssh_conn_id."):
task_0 = SFTPOperator(
task_id="test_sftp_0",
local_filepath=self.test_local_filepath,
remote_filepath=self.test_remote_filepath,
operation=SFTPOperation.PUT,
dag=self.dag,
)
task_0.execute(None)
# if ssh_hook is invalid/not provided, use ssh_conn_id to create SSHHook
task_1 = SFTPOperator(
task_id="test_sftp_1",
ssh_hook="string_rather_than_SSHHook", # invalid ssh_hook
ssh_conn_id=TEST_CONN_ID,
local_filepath=self.test_local_filepath,
remote_filepath=self.test_remote_filepath,
operation=SFTPOperation.PUT,
dag=self.dag,
)
try:
task_1.execute(None)
except Exception:
pass
assert task_1.ssh_hook.ssh_conn_id == TEST_CONN_ID
task_2 = SFTPOperator(
task_id="test_sftp_2",
ssh_conn_id=TEST_CONN_ID, # no ssh_hook provided
local_filepath=self.test_local_filepath,
remote_filepath=self.test_remote_filepath,
operation=SFTPOperation.PUT,
dag=self.dag,
)
try:
task_2.execute(None)
except Exception:
pass
assert task_2.ssh_hook.ssh_conn_id == TEST_CONN_ID
# if both valid ssh_hook and ssh_conn_id are provided, ignore ssh_conn_id
task_3 = SFTPOperator(
task_id="test_sftp_3",
ssh_hook=self.hook,
ssh_conn_id=TEST_CONN_ID,
local_filepath=self.test_local_filepath,
remote_filepath=self.test_remote_filepath,
operation=SFTPOperation.PUT,
dag=self.dag,
)
try:
task_3.execute(None)
except Exception:
pass
assert task_3.ssh_hook.ssh_conn_id == self.hook.ssh_conn_id
def delete_local_resource(self):
if os.path.exists(self.test_local_filepath):
os.remove(self.test_local_filepath)
if os.path.exists(self.test_local_filepath_int_dir):
os.remove(self.test_local_filepath_int_dir)
if os.path.exists(self.test_local_dir):
os.rmdir(self.test_local_dir)
def delete_remote_resource(self):
if os.path.exists(self.test_remote_filepath):
# check the remote file content
remove_file_task = SSHOperator(
task_id="test_check_file",
ssh_hook=self.hook,
command=f"rm {self.test_remote_filepath}",
do_xcom_push=True,
dag=self.dag,
)
assert remove_file_task is not None
ti3 = TaskInstance(task=remove_file_task, execution_date=timezone.utcnow())
ti3.run()
if os.path.exists(self.test_remote_filepath_int_dir):
os.remove(self.test_remote_filepath_int_dir)
if os.path.exists(self.test_remote_dir):
os.rmdir(self.test_remote_dir)
def tearDown(self):
self.delete_local_resource()
self.delete_remote_resource()
|
|
from base import Base as BaseTestCase
from roletester.actions.glance import image_create
from roletester.actions.glance import image_wait_for_status
from roletester.actions.nova import server_create
from roletester.actions.nova import server_delete
from roletester.actions.nova import server_wait_for_status
from roletester.actions.neutron import network_create
from roletester.actions.neutron import network_show
from roletester.actions.neutron import network_delete
from roletester.actions.neutron import subnet_create
from roletester.actions.neutron import subnet_show
from roletester.actions.neutron import subnet_delete
from roletester.actions.neutron import subnet_update
from roletester.actions.neutron import router_create
from roletester.actions.neutron import router_show
from roletester.actions.neutron import router_add_interface
from roletester.actions.neutron import router_remove_interface
from roletester.actions.neutron import router_update
from roletester.actions.neutron import router_delete
from roletester.actions.neutron import security_group_create
from roletester.actions.neutron import security_group_show
from roletester.actions.neutron import security_group_add_to_server
from roletester.actions.neutron import security_group_remove_from_server
from roletester.actions.neutron import security_group_rule_create
from roletester.actions.neutron import security_group_delete
from roletester.actions.neutron import security_group_rule_delete
from roletester.actions.neutron import floatingip_associate
from roletester.actions.neutron import floatingip_create
from roletester.actions.neutron import floatingip_delete
from roletester.actions.neutron import floatingip_disassociate
from roletester.actions.neutron import floatingip_show
from roletester.actions.neutron import port_create
from roletester.exc import KeystoneUnauthorized
from roletester.exc import NeutronForbidden
from neutronclient.common.exceptions import NetworkNotFoundClient
from roletester.exc import NeutronNotFound
from roletester.scenario import ScenarioFactory as Factory
from roletester.utils import randomname
from roletester.log import logging
logger = logging.getLogger("roletester.neutron")
class SampleFactory(Factory):
_ACTIONS = [
network_create,
network_show,
subnet_create,
subnet_show,
server_create,
server_wait_for_status,
security_group_create,
security_group_show,
security_group_rule_create,
security_group_add_to_server,
security_group_remove_from_server,
security_group_rule_delete,
security_group_delete,
server_delete,
router_create,
router_show,
router_add_interface,
router_remove_interface,
router_delete,
subnet_delete,
network_delete
]
NETWORK_CREATE = 0
NETWORK_SHOW = 1
SUBNET_CREATE = 2
SUBNET_SHOW = 3
SERVER_CREATE = 4
SERVER_WAIT = 5
SECURITY_GROUP_CREATE = 6
SECURITY_GROUP_SHOW = 7
SECURITY_GROUP_RULE_CREATE = 8
SECURITY_GROUP_ADD_TO_SERVER = 9
SECURITY_GROUP_REMOVE_FROM_SERVER = 10
SECURITY_GROUP_RULE_DELETE = 11
SECURITY_GROUP_DELETE = 12
SERVER_DELETE = 13
ROUTER_CREATE = 14
ROUTER_SHOW = 15
ROUTER_ADD_INTERFACE = 16
ROUTER_REMOVE_INTERFACE = 17
ROUTER_DELETE = 18
SUBNET_DELETE = 19
NETWORK_DELETE = 20
class SecgroupAddFactory(Factory):
_ACTIONS = [
network_create,
network_show,
subnet_create,
subnet_show,
server_create,
server_wait_for_status,
security_group_create,
security_group_show,
security_group_rule_create,
security_group_add_to_server,
]
NETWORK_CREATE = 0
NETWORK_SHOW = 1
SUBNET_CREATE = 2
SUBNET_SHOW = 3
SERVER_CREATE = 4
SERVER_WAIT = 5
SECURITY_GROUP_CREATE = 6
SECURITY_GROUP_SHOW = 7
SECURITY_GROUP_RULE_CREATE = 8
SECURITY_GROUP_ADD_TO_SERVER = 9
class AddInterfaceFactory(Factory):
_ACTIONS = [
network_create,
subnet_create,
server_create,
server_wait_for_status,
security_group_create,
security_group_rule_create,
security_group_add_to_server,
security_group_remove_from_server,
security_group_rule_delete,
security_group_delete,
server_delete,
router_create,
router_show,
router_add_interface,
]
NETWORK_CREATE = 0
SUBNET_CREATE = 1
SERVER_CREATE = 2
SERVER_WAIT = 3
SECURITY_GROUP_CREATE = 4
SECURITY_GROUP_RULE_CREATE = 5
SECURITY_GROUP_ADD_TO_SERVER = 6
SECURITY_GROUP_REMOVE_FROM_SERVER = 7
SECURITY_GROUP_RULE_DELETE = 8
SECURITY_GROUP_DELETE = 9
SERVER_DELETE = 10
ROUTER_CREATE = 11
ROUTER_SHOW = 12
ROUTER_ADD_INTERFACE = 13
class RouterDeleteFactory(Factory):
_ACTIONS = [
router_create,
router_delete
]
ROUTER_CREATE = 0
ROUTER_DELETE = 1
class RouterUpdateFactory(Factory):
_ACTIONS = [
router_create,
router_update
]
ROUTER_CREATE = 0
ROUTER_UPDATE = 1
class RouterCreateFactory(Factory):
_ACTIONS = [
router_create
]
ROUTER_CREATE = 0
class SubnetDeleteFactory(Factory):
_ACTIONS = [
network_create,
subnet_create,
subnet_delete
]
NETWORK_CREATE = 0
SUBNET_CREATE = 1
SUBNET_DELETE = 2
class SubnetUpdateFactory(Factory):
_ACTIONS = [
network_create,
subnet_create,
subnet_update
]
NETWORK_CREATE = 0
SUBNET_CREATE = 1
SUBNET_UPDATE = 2
class NetworkDeleteFactory(Factory):
_ACTIONS = [
network_create,
network_delete
]
NETWORK_CREATE = 0
NETWORK_DELETE = 1
class NetworkCreateFactory(Factory):
_ACTIONS = [
network_create,
]
NETWORK_CREATE = 0
class SubnetCreateFactory(Factory):
_ACTIONS = [
network_create,
subnet_create
]
NETWORK_CREATE = 0
SUBNET_CREATE = 1
class FloatingIPFactory(Factory):
_ACTIONS = [
network_create,
subnet_create,
port_create,
router_create,
router_add_interface,
floatingip_create,
floatingip_show,
floatingip_associate,
floatingip_disassociate,
floatingip_delete,
]
NETWORK_CREATE = 0
SUBNET_CREATE = 1
PORT_CREATE = 2
ROUTER_CREATE = 3
ROUTER_ADD_INTERFACE = 4
FLOATINGIP_CREATE = 5
FLOATINGIP_SHOW = 6
FLOATINGIP_ASSOCIATE = 7
FLOATINGIP_DISASSOCIATE = 8
FLOATINGIP_DELETE = 9
class TestSample(BaseTestCase):
name = 'scratch'
flavor = '1'
# image_file = '/home/chalupaul/cirros-0.3.4-x86_64-disk.img'
image_file = '/Users/egle/Downloads/cirros-0.3.4-x86_64-disk.img'
project = randomname()
def setUp(self):
super(TestSample, self).setUp()
try:
n = self.km.admin_client_manager.get_neutron()
networks = n.list_networks()['networks']
public_network = [x['id']
for x in networks
if x['router:external'] is True][0]
except IndexError:
err_str = "No public network found to create floating ips from."
raise NetworkNotFoundClient(message=err_str)
self.context["external_network_id"] = public_network
kwargs = {
'name': "glance test image",
'disk_format': 'qcow2',
'container_format': 'bare',
'is_public': 'public'
}
try:
image_id = self.context['image_id']
except Exception:
logger.info("No image_id found, creating image")
glance = self.km.admin_client_manager.get_glance()
images = glance.images.list()
for img in images:
if img.name == "glance test image" and img.status == "active" and img.visibility == 'public':
logger.info("found image with image id: %s" %img.id)
self.context.update(image_id=img.id)
if 'image_id' in self.context:
logger.info("image found:")
logger.info("image_id in context: %s" %self.context['image_id'])
else:
logger.info("creating new image.")
image_create(self.km.admin_client_manager, self.context, self.image_file)
image_id = self.context['image_id']
def test_cloud_admin_all(self):
cloud_admin = self.km.find_user_credentials(
'Default', self.project, 'cloud-admin'
)
SampleFactory(cloud_admin) \
.produce() \
.run(context=self.context)
def test_cloud_admin_floatingip(self):
cloud_admin = self.km.find_user_credentials(
'Default', self.project, 'cloud-admin'
)
FloatingIPFactory(cloud_admin) \
.produce() \
.run(context=self.context)
def test_cloud_admin_same_domain_different_user(self):
creator = self.km.find_user_credentials(
'Default', self.project, 'admin'
)
user1 = self.km.find_user_credentials(
'Default', self.project, 'cloud-admin'
)
cloud_admin = self.km.find_user_credentials(
'Default', self.project, 'cloud-admin'
)
SampleFactory(cloud_admin) \
.set(SampleFactory.NETWORK_CREATE,
clients=user1) \
.set(SampleFactory.SUBNET_CREATE,
clients=user1) \
.set(SampleFactory.SERVER_CREATE,
clients=user1) \
.set(SampleFactory.SERVER_WAIT,
clients=user1) \
.set(SampleFactory.SECURITY_GROUP_CREATE,
clients=user1) \
.set(SampleFactory.SECURITY_GROUP_RULE_CREATE,
clients=user1) \
.set(SampleFactory.SERVER_DELETE,
clients=user1) \
.set(SampleFactory.ROUTER_CREATE,
clients=user1) \
.set(SampleFactory.ROUTER_ADD_INTERFACE,
clients=user1) \
.produce() \
.run(context=self.context)
def test_cloud_admin_same_domain_different_user_floatingip(self):
user1 = self.km.find_user_credentials(
'Default', self.project, 'cloud-admin'
)
cloud_admin = self.km.find_user_credentials(
'Default', self.project, 'cloud-admin'
)
FloatingIPFactory(cloud_admin) \
.set(FloatingIPFactory.NETWORK_CREATE,
clients=user1) \
.set(FloatingIPFactory.SUBNET_CREATE,
clients=user1) \
.set(FloatingIPFactory.PORT_CREATE,
clients=user1) \
.set(FloatingIPFactory.ROUTER_CREATE,
clients=user1) \
.set(FloatingIPFactory.ROUTER_ADD_INTERFACE,
clients=user1) \
.set(FloatingIPFactory.FLOATINGIP_CREATE,
clients=user1) \
.produce() \
.run(context=self.context)
def test_bu_admin_all(self):
bu_admin = self.km.find_user_credentials(
'Default', self.project, 'bu-admin'
)
cloud_admin = self.km.find_user_credentials(
'Default', self.project, 'cloud-admin'
)
SampleFactory(bu_admin) \
.set(SampleFactory.NETWORK_CREATE, clients=cloud_admin) \
.set(SampleFactory.NETWORK_SHOW, clients=cloud_admin) \
.set(SampleFactory.SUBNET_CREATE, clients=cloud_admin) \
.set(SampleFactory.SECURITY_GROUP_CREATE, clients=cloud_admin) \
.set(SampleFactory.SECURITY_GROUP_RULE_CREATE, clients=cloud_admin) \
.set(SampleFactory.SECURITY_GROUP_RULE_DELETE, clients=cloud_admin) \
.set(SampleFactory.SECURITY_GROUP_DELETE, clients=cloud_admin) \
.set(SampleFactory.ROUTER_CREATE, clients=cloud_admin) \
.set(SampleFactory.ROUTER_ADD_INTERFACE, clients=cloud_admin) \
.set(SampleFactory.ROUTER_REMOVE_INTERFACE, clients=cloud_admin) \
.set(SampleFactory.ROUTER_DELETE, clients=cloud_admin) \
.set(SampleFactory.SUBNET_DELETE, clients=cloud_admin) \
.set(SampleFactory.NETWORK_DELETE, clients=cloud_admin) \
.produce() \
.run(context=self.context)
def test_bu_admin_floatingip(self):
cloud_admin = self.km.find_user_credentials(
'Default', self.project, 'cloud-admin'
)
bu_admin = self.km.find_user_credentials(
'Default', self.project, 'bu-admin'
)
FloatingIPFactory(bu_admin) \
.set(FloatingIPFactory.NETWORK_CREATE, clients=cloud_admin) \
.set(FloatingIPFactory.SUBNET_CREATE, clients=cloud_admin) \
.set(FloatingIPFactory.ROUTER_CREATE, clients=cloud_admin) \
.set(FloatingIPFactory.ROUTER_ADD_INTERFACE, clients=cloud_admin) \
.set(FloatingIPFactory.FLOATINGIP_DELETE, clients=cloud_admin) \
.produce() \
.run(context=self.context)
def test_bu_admin_same_domain_different_user(self):
creator = self.km.find_user_credentials(
'Default', self.project, 'cloud-admin'
)
user1 = self.km.find_user_credentials(
'Default', self.project, 'cloud-admin'
)
bu_admin = self.km.find_user_credentials(
'Default', self.project, 'bu-admin'
)
SampleFactory(bu_admin) \
.set(SampleFactory.NETWORK_CREATE,
clients=user1) \
.set(SampleFactory.SUBNET_CREATE,
clients=user1) \
.set(SampleFactory.SERVER_CREATE,
clients=user1) \
.set(SampleFactory.SERVER_WAIT,
clients=user1) \
.set(SampleFactory.SECURITY_GROUP_CREATE,
clients=user1) \
.set(SampleFactory.SECURITY_GROUP_RULE_CREATE, clients=user1) \
.set(SampleFactory.SECURITY_GROUP_RULE_DELETE, clients=user1) \
.set(SampleFactory.SECURITY_GROUP_REMOVE_FROM_SERVER, clients=user1) \
.set(SampleFactory.SECURITY_GROUP_DELETE, clients=user1) \
.set(SampleFactory.SERVER_DELETE,
clients=user1) \
.set(SampleFactory.ROUTER_CREATE,
clients=user1) \
.set(SampleFactory.ROUTER_ADD_INTERFACE,
clients=user1) \
.set(SampleFactory.ROUTER_REMOVE_INTERFACE, clients=user1) \
.set(SampleFactory.ROUTER_DELETE, clients=user1) \
.set(SampleFactory.SUBNET_DELETE, clients=user1) \
.set(SampleFactory.NETWORK_DELETE, clients=user1) \
.produce() \
.run(context=self.context)
def test_bu_admin_same_domain_different_user_floatingip(self):
user1 = self.km.find_user_credentials(
'Default', self.project, 'cloud-admin'
)
bu_admin = self.km.find_user_credentials(
'Default', self.project, 'bu-admin'
)
FloatingIPFactory(bu_admin) \
.set(FloatingIPFactory.NETWORK_CREATE,
clients=user1) \
.set(FloatingIPFactory.SUBNET_CREATE,
clients=user1) \
.set(FloatingIPFactory.PORT_CREATE,
clients=user1) \
.set(FloatingIPFactory.ROUTER_CREATE,
clients=user1) \
.set(FloatingIPFactory.ROUTER_ADD_INTERFACE,
clients=user1) \
.set(FloatingIPFactory.FLOATINGIP_CREATE,
clients=user1) \
.set(FloatingIPFactory.FLOATINGIP_DELETE, \
clients=user1) \
.produce() \
.run(context=self.context)
def test_bu_admin_different_domain_different_user_floatingip(self):
user1 = self.km.find_user_credentials(
'Default', self.project, 'cloud-admin'
)
bu_admin = self.km.find_user_credentials(
'Domain2', self.project, 'bu-admin'
)
FloatingIPFactory(bu_admin) \
.set(FloatingIPFactory.NETWORK_CREATE,
clients=user1) \
.set(FloatingIPFactory.SUBNET_CREATE,
clients=user1) \
.set(FloatingIPFactory.PORT_CREATE,
clients=user1) \
.set(FloatingIPFactory.ROUTER_CREATE,
clients=user1) \
.set(FloatingIPFactory.ROUTER_ADD_INTERFACE,
clients=user1) \
.set(FloatingIPFactory.FLOATINGIP_CREATE,
clients=user1) \
.set(FloatingIPFactory.FLOATINGIP_SHOW,
expected_exceptions=[KeystoneUnauthorized]) \
.set(FloatingIPFactory.FLOATINGIP_ASSOCIATE,
clients=user1) \
.set(FloatingIPFactory.FLOATINGIP_DISASSOCIATE,
expected_exceptions=[KeystoneUnauthorized]) \
.set(FloatingIPFactory.FLOATINGIP_DELETE,
expected_exceptions=[KeystoneUnauthorized]) \
.produce() \
.run(context=self.context)
def test_bu_admin_different_domain_different_user_secgroup_add_to_server(self):
creator = self.km.find_user_credentials(
'Default', self.project, 'cloud-admin'
)
user1 = self.km.find_user_credentials(
'Default', self.project, 'bu-admin'
)
bu_admin = self.km.find_user_credentials(
'Domain2', self.project, 'bu-admin'
)
SecgroupAddFactory(bu_admin) \
.set(SecgroupAddFactory.NETWORK_CREATE,
clients=creator) \
.set(SecgroupAddFactory.NETWORK_SHOW,
expected_exceptions = [KeystoneUnauthorized]) \
.set(SecgroupAddFactory.SUBNET_CREATE,
clients=creator) \
.set(SecgroupAddFactory.SUBNET_SHOW,
expected_exceptions = [KeystoneUnauthorized]) \
.set(SecgroupAddFactory.SERVER_CREATE,
clients=user1) \
.set(SecgroupAddFactory.SERVER_WAIT,
clients=user1) \
.set(SecgroupAddFactory.SECURITY_GROUP_CREATE,
clients=creator) \
.set(SecgroupAddFactory.SECURITY_GROUP_SHOW,
expected_exceptions = [KeystoneUnauthorized]) \
.set(SecgroupAddFactory.SECURITY_GROUP_RULE_CREATE,
clients=creator) \
.set(SecgroupAddFactory.SECURITY_GROUP_ADD_TO_SERVER,
expected_exceptions = [KeystoneUnauthorized]) \
.produce() \
.run(context=self.context)
def test_bu_admin_different_domain_different_user_add_interface_to_server(self):
creator = self.km.find_user_credentials(
'Default', self.project, 'cloud-admin'
)
user1 = self.km.find_user_credentials(
'Default', self.project, 'cloud-admin'
)
bu_admin = self.km.find_user_credentials(
'Domain2', self.project, 'bu-admin'
)
AddInterfaceFactory(bu_admin) \
.set(AddInterfaceFactory.NETWORK_CREATE,
clients=user1) \
.set(AddInterfaceFactory.SUBNET_CREATE,
clients=user1) \
.set(AddInterfaceFactory.SERVER_CREATE,
clients=user1) \
.set(AddInterfaceFactory.SERVER_WAIT,
clients=user1) \
.set(AddInterfaceFactory.SECURITY_GROUP_CREATE,
clients=user1) \
.set(AddInterfaceFactory.SECURITY_GROUP_RULE_CREATE,
clients=user1) \
.set(AddInterfaceFactory.SECURITY_GROUP_ADD_TO_SERVER,
clients=user1) \
.set(AddInterfaceFactory.SECURITY_GROUP_REMOVE_FROM_SERVER,
expected_exceptions=[KeystoneUnauthorized]) \
.set(AddInterfaceFactory.SECURITY_GROUP_RULE_DELETE,
expected_exceptions=[KeystoneUnauthorized]) \
.set(AddInterfaceFactory.SECURITY_GROUP_DELETE,
expected_exceptions=[KeystoneUnauthorized]) \
.set(AddInterfaceFactory.SERVER_DELETE,
clients=user1) \
.set(AddInterfaceFactory.ROUTER_CREATE,
clients=user1) \
.set(AddInterfaceFactory.ROUTER_SHOW,
expected_exceptions=[KeystoneUnauthorized]) \
.set(AddInterfaceFactory.ROUTER_ADD_INTERFACE,
expected_exceptions=[KeystoneUnauthorized]) \
.produce() \
.run(context=self.context)
def test_bu_admin_different_domain_different_user_subnet_delete(self):
user1 = self.km.find_user_credentials(
'Default', self.project, 'cloud-admin'
)
bu_admin = self.km.find_user_credentials(
'Domain2', self.project, 'bu-admin'
)
SubnetDeleteFactory(user1) \
.set(SubnetDeleteFactory.SUBNET_DELETE, clients=bu_admin,
expected_exceptions=[KeystoneUnauthorized]) \
.produce() \
.run(context=self.context)
def test_bu_admin_different_domain_different_user_network_delete(self):
user1 = self.km.find_user_credentials(
'Default', self.project, 'cloud-admin'
)
bu_admin = self.km.find_user_credentials(
'Domain2', self.project, 'bu-admin'
)
NetworkDeleteFactory(user1) \
.set(NetworkDeleteFactory.NETWORK_DELETE, clients=bu_admin,
expected_exceptions=[KeystoneUnauthorized]) \
.produce() \
.run(context=self.context)
def test_cloud_admin_subnet_update(self):
cloud_admin = self.km.find_user_credentials(
'Default', self.project, 'cloud-admin'
)
SubnetUpdateFactory(cloud_admin) \
.produce() \
.run(context=self.context)
#todo: retest
def test_bu_admin_subnet_update(self):
cloud_admin = self.km.find_user_credentials(
'Default', self.project, 'cloud-admin'
)
bu_admin = self.km.find_user_credentials(
'Default', self.project, 'bu-admin'
)
SubnetUpdateFactory(cloud_admin) \
.set(SubnetUpdateFactory.SUBNET_UPDATE, clients=bu_admin, expected_exceptions=[NeutronForbidden]) \
.produce() \
.run(context=self.context)
def test_cloud_admin_router_update(self):
cloud_admin = self.km.find_user_credentials(
'Default', self.project, 'cloud-admin'
)
RouterUpdateFactory(cloud_admin) \
.produce() \
.run(context=self.context)
def test_bu_admin_router_update(self):
cloud_admin = self.km.find_user_credentials(
'Default', self.project, 'cloud-admin'
)
bu_admin = self.km.find_user_credentials(
'Default', self.project, 'bu-admin'
)
RouterUpdateFactory(cloud_admin) \
.set(RouterUpdateFactory.ROUTER_UPDATE, clients=bu_admin, expected_exceptions=[NeutronForbidden]) \
.produce() \
.run(context=self.context)
# bu-user
#get subnet, get subnet pool
#get network details
#todo: retest
def test_bu_user(self):
user1 = self.km.find_user_credentials(
'Default', self.project, 'bu-user'
)
cloud_admin = self.km.find_user_credentials(
'Default', self.project, 'cloud-admin'
)
SampleFactory(cloud_admin) \
.set(SampleFactory.NETWORK_SHOW, clients=user1, expected_exceptions=[NeutronNotFound]) \
.set(SampleFactory.SUBNET_SHOW, clients=user1) \
.set(SampleFactory.SERVER_WAIT, clients=user1) \
.set(SampleFactory.SECURITY_GROUP_SHOW, clients=user1) \
.produce() \
.run(context=self.context)
def test_bu_user_network_create(self):
user1 = self.km.find_user_credentials(
'Default', self.project, 'bu-user'
)
NetworkCreateFactory(user1) \
.set(NetworkCreateFactory.NETWORK_CREATE, expected_exceptions=[NeutronForbidden]) \
.produce() \
.run(context=self.context)
def test_bu_user_subnet_create(self):
user1 = self.km.find_user_credentials(
'Default', self.project, 'bu-user'
)
cloud_admin = self.km.find_user_credentials(
'Default', self.project, 'cloud-admin'
)
SubnetCreateFactory(user1) \
.set(SubnetCreateFactory.NETWORK_CREATE, clients=cloud_admin) \
.set(SubnetCreateFactory.SUBNET_CREATE, expected_exceptions=[NeutronForbidden]) \
.produce() \
.run(context=self.context)
def test_bu_user_router_create(self):
user1 = self.km.find_user_credentials(
'Default', self.project, 'bu-user'
)
RouterCreateFactory(user1) \
.set(RouterCreateFactory.ROUTER_CREATE, expected_exceptions=[NeutronForbidden]) \
.produce() \
.run(context=self.context)
def test_bu_user_network_delete(self):
user1 = self.km.find_user_credentials(
'Default', self.project, 'bu-user'
)
cloud_admin = self.km.find_user_credentials(
'Default', self.project, 'cloud-admin'
)
NetworkDeleteFactory(cloud_admin) \
.set(NetworkDeleteFactory.NETWORK_DELETE, clients=user1, expected_exceptions=[NeutronNotFound]) \
.produce() \
.run(context=self.context)
def test_bu_user_subnet_delete(self):
user1 = self.km.find_user_credentials(
'Default', self.project, 'bu-user'
)
cloud_admin = self.km.find_user_credentials(
'Default', self.project, 'cloud-admin'
)
SubnetDeleteFactory(cloud_admin) \
.set(SubnetDeleteFactory.SUBNET_DELETE, clients=user1, expected_exceptions=[NeutronNotFound]) \
.produce() \
.run(context=self.context)
def test_bu_user_router_delete(self):
user1 = self.km.find_user_credentials(
'Default', self.project, 'bu-user'
)
cloud_admin = self.km.find_user_credentials(
'Default', self.project, 'cloud-admin'
)
RouterDeleteFactory(cloud_admin) \
.set(RouterDeleteFactory.ROUTER_DELETE, clients=user1, expected_exceptions=[NeutronNotFound]) \
.produce() \
.run(context=self.context)
def test_bu_user_subnet_update(self):
user1 = self.km.find_user_credentials(
'Default', self.project, 'bu-user'
)
cloud_admin = self.km.find_user_credentials(
'Default', self.project, 'cloud-admin'
)
SubnetUpdateFactory(user1) \
.set(SubnetUpdateFactory.NETWORK_CREATE, clients=cloud_admin) \
.set(SubnetUpdateFactory.SUBNET_CREATE, clients=cloud_admin) \
.set(SubnetUpdateFactory.SUBNET_UPDATE, expected_exceptions=[NeutronForbidden]) \
.produce() \
.run(context=self.context)
def test_bu_user_router_update(self):
user1 = self.km.find_user_credentials(
'Default', self.project, 'bu-user'
)
cloud_admin = self.km.find_user_credentials(
'Default', self.project, 'cloud-admin'
)
RouterUpdateFactory(user1) \
.set(RouterUpdateFactory.ROUTER_CREATE, clients=cloud_admin) \
.set(RouterUpdateFactory.ROUTER_UPDATE, expected_exceptions=[NeutronForbidden]) \
.produce() \
.run(context=self.context)
def test_bu_user_get_floatingip(self):
user1 = self.km.find_user_credentials(
'Default', self.project, 'bu-user'
)
cloud_admin = self.km.find_user_credentials(
'Default', self.project, 'cloud-admin'
)
FloatingIPFactory(cloud_admin) \
.set(FloatingIPFactory.FLOATINGIP_SHOW,
clients=user1) \
.set(FloatingIPFactory.FLOATINGIP_ASSOCIATE,
clients=user1, expected_exceptions=[NeutronForbidden]) \
.set(FloatingIPFactory.FLOATINGIP_DISASSOCIATE,
clients=user1, expected_exceptions=[NeutronForbidden]) \
.set(FloatingIPFactory.FLOATINGIP_DELETE, clients=user1,
expected_exceptions=[NeutronNotFound]) \
.produce() \
.run(context=self.context)
#todo: re-test, seems broken
def test_bu_user_get_floatingip_diff_domain(self):
cloud_admin = self.km.find_user_credentials(
'Default', self.project, 'cloud-admin'
)
user1 = self.km.find_user_credentials(
'Domain2', self.project, 'bu-user'
)
FloatingIPFactory(cloud_admin) \
.set(FloatingIPFactory.FLOATINGIP_SHOW,
clients=user1, expected_exceptions=[KeystoneUnauthorized]) \
.set(FloatingIPFactory.FLOATINGIP_ASSOCIATE,
clients=user1, expected_exceptions=[KeystoneUnauthorized]) \
.set(FloatingIPFactory.FLOATINGIP_DISASSOCIATE,
clients=user1, expected_exceptions=[KeystoneUnauthorized]) \
.set(FloatingIPFactory.FLOATINGIP_DELETE, clients=user1,
expected_exceptions=[KeystoneUnauthorized]) \
.produce() \
.run(context=self.context)
#bu-brt
#todo: retest
def test_bu_brt(self):
user1 = self.km.find_user_credentials(
'Default', self.project, 'bu-brt'
)
cloud_admin = self.km.find_user_credentials(
'Default', self.project, 'cloud-admin'
)
SampleFactory(cloud_admin) \
.set(SampleFactory.NETWORK_CREATE) \
.set(SampleFactory.NETWORK_SHOW, clients=user1) \
.set(SampleFactory.SUBNET_CREATE) \
.set(SampleFactory.SUBNET_SHOW, clients=user1) \
.set(SampleFactory.SERVER_CREATE) \
.set(SampleFactory.SERVER_WAIT,
clients=user1) \
.set(SampleFactory.SECURITY_GROUP_CREATE) \
.set(SampleFactory.SECURITY_GROUP_SHOW, clients=user1) \
.set(SampleFactory.SECURITY_GROUP_RULE_CREATE) \
.set(SampleFactory.SERVER_DELETE) \
.set(SampleFactory.ROUTER_CREATE) \
.set(SampleFactory.ROUTER_SHOW, clients=user1) \
.set(SampleFactory.ROUTER_ADD_INTERFACE) \
.produce() \
.run(context=self.context)
def test_bu_brt_network_create(self):
user1 = self.km.find_user_credentials(
'Default', self.project, 'bu-brt'
)
NetworkCreateFactory(user1) \
.set(NetworkCreateFactory.NETWORK_CREATE, expected_exceptions=[NeutronForbidden]) \
.produce() \
.run(context=self.context)
def test_bu_brt_subnet_create(self):
user1 = self.km.find_user_credentials(
'Default', self.project, 'bu-brt'
)
cloud_admin = self.km.find_user_credentials(
'Default', self.project, 'cloud-admin'
)
SubnetCreateFactory(user1) \
.set(SubnetCreateFactory.NETWORK_CREATE, clients=cloud_admin) \
.set(SubnetCreateFactory.SUBNET_CREATE, expected_exceptions=[NeutronForbidden]) \
.produce() \
.run(context=self.context)
def test_bu_brt_router_create(self):
user1 = self.km.find_user_credentials(
'Default', self.project, 'bu-brt'
)
RouterCreateFactory(user1) \
.set(RouterCreateFactory.ROUTER_CREATE, expected_exceptions=[NeutronForbidden]) \
.produce() \
.run(context=self.context)
def test_bu_brt_network_delete(self):
user1 = self.km.find_user_credentials(
'Default', self.project, 'bu-brt'
)
cloud_admin = self.km.find_user_credentials(
'Default', self.project, 'cloud-admin'
)
NetworkDeleteFactory(cloud_admin) \
.set(NetworkDeleteFactory.NETWORK_DELETE, clients=user1, expected_exceptions=[NeutronNotFound]) \
.produce() \
.run(context=self.context)
def test_bu_brt_subnet_delete(self):
user1 = self.km.find_user_credentials(
'Default', self.project, 'bu-brt'
)
cloud_admin = self.km.find_user_credentials(
'Default', self.project, 'cloud-admin'
)
SubnetDeleteFactory(cloud_admin) \
.set(SubnetDeleteFactory.SUBNET_DELETE, clients=user1, expected_exceptions=[NeutronNotFound]) \
.produce() \
.run(context=self.context)
def test_bu_brt_router_delete(self):
user1 = self.km.find_user_credentials(
'Default', self.project, 'bu-brt'
)
cloud_admin = self.km.find_user_credentials(
'Default', self.project, 'cloud-admin'
)
RouterDeleteFactory(cloud_admin) \
.set(RouterDeleteFactory.ROUTER_DELETE, clients=user1, expected_exceptions=[NeutronNotFound]) \
.produce() \
.run(context=self.context)
def test_bu_brt_subnet_update(self):
user1 = self.km.find_user_credentials(
'Default', self.project, 'bu-brt'
)
cloud_admin = self.km.find_user_credentials(
'Default', self.project, 'cloud-admin'
)
SubnetUpdateFactory(user1) \
.set(SubnetUpdateFactory.NETWORK_CREATE, clients=cloud_admin) \
.set(SubnetUpdateFactory.SUBNET_CREATE, clients=cloud_admin) \
.set(SubnetUpdateFactory.SUBNET_UPDATE, expected_exceptions=[NeutronForbidden]) \
.produce() \
.run(context=self.context)
def test_bu_brt_router_update(self):
user1 = self.km.find_user_credentials(
'Default', self.project, 'bu-brt'
)
cloud_admin = self.km.find_user_credentials(
'Default', self.project, 'cloud-admin'
)
RouterUpdateFactory(user1) \
.set(RouterUpdateFactory.ROUTER_CREATE, clients=cloud_admin) \
.set(RouterUpdateFactory.ROUTER_UPDATE, expected_exceptions=[NeutronForbidden]) \
.produce() \
.run(context=self.context)
def test_bu_brt_get_floatingip(self):
user1 = self.km.find_user_credentials(
'Default', self.project, 'bu-brt'
)
cloud_admin = self.km.find_user_credentials(
'Default', self.project, 'cloud-admin'
)
FloatingIPFactory(cloud_admin) \
.set(FloatingIPFactory.FLOATINGIP_SHOW,
clients=user1) \
.set(FloatingIPFactory.FLOATINGIP_ASSOCIATE,
clients=user1, expected_exceptions=[NeutronForbidden]) \
.set(FloatingIPFactory.FLOATINGIP_DISASSOCIATE,
clients=user1, expected_exceptions=[NeutronForbidden]) \
.set(FloatingIPFactory.FLOATINGIP_DELETE, clients=user1,
expected_exceptions=[NeutronNotFound]) \
.produce() \
.run(context=self.context)
def test_bu_brt_get_floatingip_diff_domain(self):
cloud_admin = self.km.find_user_credentials(
'Default', self.project, 'cloud-admin'
)
user1 = self.km.find_user_credentials(
'Domain2', self.project, 'bu-brt'
)
FloatingIPFactory(cloud_admin) \
.set(FloatingIPFactory.FLOATINGIP_SHOW,
clients=user1, expected_exceptions=[KeystoneUnauthorized]) \
.set(FloatingIPFactory.FLOATINGIP_ASSOCIATE,
clients=user1, expected_exceptions=[KeystoneUnauthorized]) \
.set(FloatingIPFactory.FLOATINGIP_DISASSOCIATE,
clients=user1, expected_exceptions=[KeystoneUnauthorized]) \
.set(FloatingIPFactory.FLOATINGIP_DELETE, clients=user1,
expected_exceptions=[KeystoneUnauthorized]) \
.produce() \
.run(context=self.context)
#bu-poweruser
#todo: test all these
def test_bu_poweruser_all(self):
bu_admin = self.km.find_user_credentials(
'Default', self.project, 'bu-poweruser', False
)
cloud_admin = self.km.find_user_credentials(
'Default', self.project, 'cloud-admin'
)
SampleFactory(bu_admin) \
.set(SampleFactory.NETWORK_CREATE, clients=cloud_admin) \
.set(SampleFactory.SUBNET_CREATE, clients=cloud_admin) \
.set(SampleFactory.SECURITY_GROUP_CREATE, clients=cloud_admin) \
.set(SampleFactory.SECURITY_GROUP_RULE_CREATE, clients=cloud_admin) \
.set(SampleFactory.SECURITY_GROUP_RULE_DELETE, clients=cloud_admin) \
.set(SampleFactory.SECURITY_GROUP_DELETE, clients=cloud_admin) \
.set(SampleFactory.ROUTER_CREATE, clients=cloud_admin) \
.set(SampleFactory.ROUTER_ADD_INTERFACE, clients=cloud_admin) \
.set(SampleFactory.ROUTER_REMOVE_INTERFACE, clients=cloud_admin) \
.set(SampleFactory.ROUTER_DELETE, clients=cloud_admin) \
.set(SampleFactory.SUBNET_DELETE, clients=cloud_admin) \
.set(SampleFactory.NETWORK_DELETE, clients=cloud_admin) \
.produce() \
.run(context=self.context)
def test_bu_poweruser_floatingip(self):
cloud_admin = self.km.find_user_credentials(
'Default', self.project, 'cloud-admin'
)
bu_admin = self.km.find_user_credentials(
'Default', self.project, 'bu-poweruser', False
)
FloatingIPFactory(bu_admin) \
.set(FloatingIPFactory.NETWORK_CREATE, clients=cloud_admin) \
.set(FloatingIPFactory.SUBNET_CREATE, clients=cloud_admin) \
.set(FloatingIPFactory.ROUTER_CREATE, clients=cloud_admin) \
.set(FloatingIPFactory.ROUTER_ADD_INTERFACE, clients=cloud_admin) \
.set(FloatingIPFactory.FLOATINGIP_DELETE, clients=cloud_admin) \
.produce() \
.run(context=self.context)
def test_bu_poweruser_same_domain_different_user(self):
creator = self.km.find_user_credentials(
'Default', self.project, 'cloud-admin'
)
user1 = self.km.find_user_credentials(
'Default', self.project, 'cloud-admin'
)
bu_admin = self.km.find_user_credentials(
'Default', self.project, 'bu-poweruser', False
)
SampleFactory(bu_admin) \
.set(SampleFactory.NETWORK_CREATE,
clients=user1) \
.set(SampleFactory.SUBNET_CREATE,
clients=user1) \
.set(SampleFactory.SERVER_CREATE,
clients=user1) \
.set(SampleFactory.SERVER_WAIT,
clients=user1) \
.set(SampleFactory.SECURITY_GROUP_CREATE,
clients=user1) \
.set(SampleFactory.SECURITY_GROUP_RULE_CREATE,
clients=user1) \
.set(SampleFactory.SECURITY_GROUP_RULE_DELETE, clients=user1) \
.set(SampleFactory.SECURITY_GROUP_DELETE, clients=user1) \
.set(SampleFactory.SERVER_DELETE,
clients=user1) \
.set(SampleFactory.ROUTER_CREATE,
clients=user1) \
.set(SampleFactory.ROUTER_ADD_INTERFACE, clients=user1) \
.set(SampleFactory.ROUTER_REMOVE_INTERFACE, clients=user1) \
.set(SampleFactory.ROUTER_DELETE, clients=user1) \
.set(SampleFactory.SUBNET_DELETE, clients=user1) \
.set(SampleFactory.NETWORK_DELETE, clients=user1) \
.produce() \
.run(context=self.context)
def test_bu_poweruser_same_domain_different_user_floatingip(self):
user1 = self.km.find_user_credentials(
'Default', self.project, 'cloud-admin'
)
bu_admin = self.km.find_user_credentials(
'Default', self.project, 'bu-poweruser', False
)
FloatingIPFactory(bu_admin) \
.set(FloatingIPFactory.NETWORK_CREATE,
clients=user1) \
.set(FloatingIPFactory.SUBNET_CREATE,
clients=user1) \
.set(FloatingIPFactory.PORT_CREATE,
clients=user1) \
.set(FloatingIPFactory.ROUTER_CREATE,
clients=user1) \
.set(FloatingIPFactory.ROUTER_ADD_INTERFACE,
clients=user1) \
.set(FloatingIPFactory.FLOATINGIP_CREATE, clients=user1) \
.set(FloatingIPFactory.FLOATINGIP_DELETE, clients=user1) \
.produce() \
.run(context=self.context)
def test_bu_poweruser_different_domain_different_user_floatingip(self):
user1 = self.km.find_user_credentials(
'Default', self.project, 'cloud-admin'
)
bu_admin = self.km.find_user_credentials(
'Domain2', self.project, 'bu-poweruser', False
)
FloatingIPFactory(bu_admin) \
.set(FloatingIPFactory.NETWORK_CREATE,
clients=user1) \
.set(FloatingIPFactory.SUBNET_CREATE,
clients=user1) \
.set(FloatingIPFactory.PORT_CREATE,
clients=user1) \
.set(FloatingIPFactory.ROUTER_CREATE,
clients=user1) \
.set(FloatingIPFactory.ROUTER_ADD_INTERFACE,
clients=user1) \
.set(FloatingIPFactory.FLOATINGIP_CREATE,
clients=user1) \
.set(FloatingIPFactory.FLOATINGIP_SHOW,
expected_exceptions=[KeystoneUnauthorized]) \
.set(FloatingIPFactory.FLOATINGIP_ASSOCIATE,
clients=user1) \
.set(FloatingIPFactory.FLOATINGIP_DISASSOCIATE,
expected_exceptions=[KeystoneUnauthorized]) \
.set(FloatingIPFactory.FLOATINGIP_DELETE,
expected_exceptions=[KeystoneUnauthorized]) \
.produce() \
.run(context=self.context)
def test_bu_poweruser_different_domain_different_user_secgroup_add_to_server(self):
creator = self.km.find_user_credentials(
'Default', self.project, 'cloud-admin'
)
user1 = self.km.find_user_credentials(
'Default', self.project, 'bu-poweruser', False
)
bu_admin = self.km.find_user_credentials(
'Domain2', self.project, 'bu-poweruser', False
)
SecgroupAddFactory(bu_admin) \
.set(SecgroupAddFactory.NETWORK_CREATE,
clients=creator) \
.set(SecgroupAddFactory.NETWORK_SHOW,
expected_exceptions = [KeystoneUnauthorized]) \
.set(SecgroupAddFactory.SUBNET_CREATE,
clients=creator) \
.set(SecgroupAddFactory.SUBNET_SHOW,
expected_exceptions = [KeystoneUnauthorized]) \
.set(SecgroupAddFactory.SERVER_CREATE,
clients=user1) \
.set(SecgroupAddFactory.SERVER_WAIT,
clients=user1) \
.set(SecgroupAddFactory.SECURITY_GROUP_CREATE,
clients=creator) \
.set(SecgroupAddFactory.SECURITY_GROUP_SHOW,
expected_exceptions = [KeystoneUnauthorized]) \
.set(SecgroupAddFactory.SECURITY_GROUP_RULE_CREATE,
clients=creator) \
.set(SecgroupAddFactory.SECURITY_GROUP_ADD_TO_SERVER,
expected_exceptions = [KeystoneUnauthorized]) \
.produce() \
.run(context=self.context)
def test_bu_poweruser_different_domain_different_user_add_interface_to_server(self):
creator = self.km.find_user_credentials(
'Default', self.project, 'cloud-admin'
)
user1 = self.km.find_user_credentials(
'Default', self.project, 'cloud-admin'
)
bu_admin = self.km.find_user_credentials(
'Domain2', self.project, 'bu-poweruser', False
)
AddInterfaceFactory(bu_admin) \
.set(AddInterfaceFactory.NETWORK_CREATE,
clients=user1) \
.set(AddInterfaceFactory.SUBNET_CREATE,
clients=user1) \
.set(AddInterfaceFactory.SERVER_CREATE,
clients=user1) \
.set(AddInterfaceFactory.SERVER_WAIT,
clients=user1) \
.set(AddInterfaceFactory.SECURITY_GROUP_CREATE,
clients=user1) \
.set(AddInterfaceFactory.SECURITY_GROUP_RULE_CREATE,
clients=user1) \
.set(AddInterfaceFactory.SECURITY_GROUP_ADD_TO_SERVER,
clients=user1) \
.set(AddInterfaceFactory.SECURITY_GROUP_REMOVE_FROM_SERVER,
expected_exceptions=[KeystoneUnauthorized]) \
.set(AddInterfaceFactory.SECURITY_GROUP_RULE_DELETE,
expected_exceptions=[KeystoneUnauthorized]) \
.set(AddInterfaceFactory.SECURITY_GROUP_DELETE,
expected_exceptions=[KeystoneUnauthorized]) \
.set(AddInterfaceFactory.SERVER_DELETE,
clients=user1) \
.set(AddInterfaceFactory.ROUTER_CREATE,
clients=user1) \
.set(AddInterfaceFactory.ROUTER_SHOW,
expected_exceptions=[KeystoneUnauthorized]) \
.set(AddInterfaceFactory.ROUTER_ADD_INTERFACE,
expected_exceptions=[KeystoneUnauthorized]) \
.produce() \
.run(context=self.context)
def test_bu_poweruser_different_domain_different_user_subnet_delete(self):
user1 = self.km.find_user_credentials(
'Default', self.project, 'cloud-admin'
)
bu_admin = self.km.find_user_credentials(
'Domain2', self.project, 'bu-poweruser', False
)
SubnetDeleteFactory(user1) \
.set(SubnetDeleteFactory.SUBNET_DELETE, clients=bu_admin,
expected_exceptions=[KeystoneUnauthorized]) \
.produce() \
.run(context=self.context)
def test_bu_poweruser_different_domain_different_user_network_delete(self):
user1 = self.km.find_user_credentials(
'Default', self.project, 'cloud-admin'
)
bu_admin = self.km.find_user_credentials(
'Domain2', self.project, 'bu-poweruser', False
)
NetworkDeleteFactory(user1) \
.set(NetworkDeleteFactory.NETWORK_DELETE, clients=bu_admin,
expected_exceptions=[KeystoneUnauthorized]) \
.produce() \
.run(context=self.context)
def test_bu_poweruser_subnet_update(self):
cloud_admin = self.km.find_user_credentials(
'Default', self.project, 'cloud-admin'
)
bu_admin = self.km.find_user_credentials(
'Default', self.project, 'bu-poweruser', False
)
SubnetUpdateFactory(cloud_admin) \
.set(SubnetUpdateFactory.SUBNET_UPDATE, clients=bu_admin, expected_exceptions=[NeutronForbidden]) \
.produce() \
.run(context=self.context)
def test_bu_poweruser_router_update(self):
cloud_admin = self.km.find_user_credentials(
'Default', self.project, 'cloud-admin'
)
bu_admin = self.km.find_user_credentials(
'Default', self.project, 'bu-poweruser', False
)
RouterUpdateFactory(cloud_admin) \
.set(RouterUpdateFactory.ROUTER_UPDATE, clients=bu_admin, expected_exceptions=[NeutronForbidden]) \
.produce() \
.run(context=self.context)
#cirt
#todo: test all these
def test_cirt_all(self):
bu_admin = self.km.find_user_credentials(
'Default', self.project, 'cirt'
)
cloud_admin = self.km.find_user_credentials(
'Default', self.project, 'cloud-admin'
)
SampleFactory(cloud_admin) \
.set(SampleFactory.SUBNET_SHOW, clients=bu_admin) \
.set(SampleFactory.ROUTER_SHOW, clients=bu_admin) \
.set(SampleFactory.SECURITY_GROUP_SHOW, clients=bu_admin) \
.produce() \
.run(context=self.context)
def test_cirt_floatingip(self):
cloud_admin = self.km.find_user_credentials(
'Default', self.project, 'cloud-admin'
)
bu_admin = self.km.find_user_credentials(
'Default', self.project, 'cirt'
)
FloatingIPFactory(bu_admin) \
.set(FloatingIPFactory.NETWORK_CREATE, clients=cloud_admin) \
.set(FloatingIPFactory.SUBNET_CREATE, clients=cloud_admin) \
.set(FloatingIPFactory.PORT_CREATE, clients=cloud_admin) \
.set(FloatingIPFactory.FLOATINGIP_CREATE, clients=cloud_admin) \
.set(FloatingIPFactory.FLOATINGIP_ASSOCIATE, clients=cloud_admin) \
.set(FloatingIPFactory.FLOATINGIP_DISASSOCIATE, clients=cloud_admin) \
.set(FloatingIPFactory.FLOATINGIP_DELETE, clients=cloud_admin) \
.set(FloatingIPFactory.ROUTER_CREATE, clients=cloud_admin) \
.set(FloatingIPFactory.ROUTER_ADD_INTERFACE, clients=cloud_admin) \
.produce() \
.run(context=self.context)
def test_cirt_same_domain_different_user(self):
creator = self.km.find_user_credentials(
'Default', self.project, 'cloud-admin'
)
user1 = self.km.find_user_credentials(
'Default', self.project, 'cloud-admin'
)
bu_admin = self.km.find_user_credentials(
'Default', self.project, 'cirt'
)
SampleFactory(bu_admin) \
.set(SampleFactory.NETWORK_CREATE, clients=user1) \
.set(SampleFactory.NETWORK_SHOW, clients=user1) \
.set(SampleFactory.SUBNET_CREATE,
clients=user1) \
.set(SampleFactory.SERVER_CREATE, clients=user1) \
.set(SampleFactory.SERVER_WAIT, clients=user1) \
.set(SampleFactory.SECURITY_GROUP_CREATE, clients=user1) \
.set(SampleFactory.SECURITY_GROUP_RULE_CREATE,
clients=user1) \
.set(SampleFactory.SECURITY_GROUP_ADD_TO_SERVER, clients=user1) \
.set(SampleFactory.SECURITY_GROUP_REMOVE_FROM_SERVER, clients=user1) \
.set(SampleFactory.SECURITY_GROUP_RULE_DELETE, clients=user1) \
.set(SampleFactory.SECURITY_GROUP_DELETE, clients=user1) \
.set(SampleFactory.SERVER_DELETE,
clients=user1) \
.set(SampleFactory.ROUTER_CREATE,
clients=user1) \
.set(SampleFactory.ROUTER_ADD_INTERFACE, clients=user1) \
.set(SampleFactory.ROUTER_REMOVE_INTERFACE, clients=user1) \
.set(SampleFactory.ROUTER_DELETE, clients=user1) \
.set(SampleFactory.SUBNET_DELETE, clients=user1) \
.set(SampleFactory.NETWORK_DELETE, clients=user1) \
.produce() \
.run(context=self.context)
def test_cirt_same_domain_different_user_floatingip(self):
user1 = self.km.find_user_credentials(
'Default', self.project, 'cloud-admin'
)
bu_admin = self.km.find_user_credentials(
'Default', self.project, 'cirt'
)
FloatingIPFactory(bu_admin) \
.set(FloatingIPFactory.NETWORK_CREATE,
clients=user1) \
.set(FloatingIPFactory.SUBNET_CREATE,
clients=user1) \
.set(FloatingIPFactory.PORT_CREATE, clients=user1) \
.set(FloatingIPFactory.ROUTER_CREATE,
clients=user1) \
.set(FloatingIPFactory.ROUTER_ADD_INTERFACE,
clients=user1) \
.set(FloatingIPFactory.FLOATINGIP_CREATE,
clients=user1) \
.set(FloatingIPFactory.FLOATINGIP_ASSOCIATE, clients=user1) \
.set(FloatingIPFactory.FLOATINGIP_DISASSOCIATE, clients=user1) \
.set(FloatingIPFactory.FLOATINGIP_DELETE, clients=user1) \
.produce() \
.run(context=self.context)
def test_cirt_different_domain_different_user_floatingip(self):
user1 = self.km.find_user_credentials(
'Default', self.project, 'cloud-admin'
)
bu_admin = self.km.find_user_credentials(
'Domain2', self.project, 'cirt'
)
FloatingIPFactory(bu_admin) \
.set(FloatingIPFactory.NETWORK_CREATE,
clients=user1) \
.set(FloatingIPFactory.SUBNET_CREATE,
clients=user1) \
.set(FloatingIPFactory.PORT_CREATE,
clients=user1) \
.set(FloatingIPFactory.ROUTER_CREATE,
clients=user1) \
.set(FloatingIPFactory.ROUTER_ADD_INTERFACE,
clients=user1) \
.set(FloatingIPFactory.FLOATINGIP_CREATE,
clients=user1) \
.set(FloatingIPFactory.FLOATINGIP_SHOW,
expected_exceptions=[KeystoneUnauthorized]) \
.set(FloatingIPFactory.FLOATINGIP_ASSOCIATE,
clients=user1) \
.set(FloatingIPFactory.FLOATINGIP_DISASSOCIATE,
expected_exceptions=[KeystoneUnauthorized]) \
.set(FloatingIPFactory.FLOATINGIP_DELETE,
expected_exceptions=[KeystoneUnauthorized]) \
.produce() \
.run(context=self.context)
def test_cirt_different_domain_different_user_secgroup_add_to_server(self):
creator = self.km.find_user_credentials(
'Default', self.project, 'cloud-admin'
)
user1 = self.km.find_user_credentials(
'Default', self.project, 'cirt'
)
bu_admin = self.km.find_user_credentials(
'Domain2', self.project, 'cirt'
)
SecgroupAddFactory(bu_admin) \
.set(SecgroupAddFactory.NETWORK_CREATE,
clients=creator) \
.set(SecgroupAddFactory.NETWORK_SHOW,
expected_exceptions = [KeystoneUnauthorized]) \
.set(SecgroupAddFactory.SUBNET_CREATE,
clients=creator) \
.set(SecgroupAddFactory.SUBNET_SHOW,
expected_exceptions = [KeystoneUnauthorized]) \
.set(SecgroupAddFactory.SERVER_CREATE,
clients=creator) \
.set(SecgroupAddFactory.SERVER_WAIT,
clients=user1) \
.set(SecgroupAddFactory.SECURITY_GROUP_CREATE,
clients=creator) \
.set(SecgroupAddFactory.SECURITY_GROUP_SHOW,
expected_exceptions = [KeystoneUnauthorized]) \
.set(SecgroupAddFactory.SECURITY_GROUP_RULE_CREATE,
clients=creator) \
.set(SecgroupAddFactory.SECURITY_GROUP_ADD_TO_SERVER,
expected_exceptions = [KeystoneUnauthorized]) \
.produce() \
.run(context=self.context)
def test_cirt_different_domain_different_user_add_interface_to_server(self):
creator = self.km.find_user_credentials(
'Default', self.project, 'cloud-admin'
)
user1 = self.km.find_user_credentials(
'Default', self.project, 'cloud-admin'
)
bu_admin = self.km.find_user_credentials(
'Domain2', self.project, 'cirt'
)
AddInterfaceFactory(bu_admin) \
.set(AddInterfaceFactory.NETWORK_CREATE,
clients=user1) \
.set(AddInterfaceFactory.SUBNET_CREATE,
clients=user1) \
.set(AddInterfaceFactory.SERVER_CREATE,
clients=user1) \
.set(AddInterfaceFactory.SERVER_WAIT,
clients=user1) \
.set(AddInterfaceFactory.SECURITY_GROUP_CREATE,
clients=user1) \
.set(AddInterfaceFactory.SECURITY_GROUP_RULE_CREATE,
clients=user1) \
.set(AddInterfaceFactory.SECURITY_GROUP_ADD_TO_SERVER,
clients=user1) \
.set(AddInterfaceFactory.SECURITY_GROUP_REMOVE_FROM_SERVER,
expected_exceptions=[KeystoneUnauthorized]) \
.set(AddInterfaceFactory.SECURITY_GROUP_RULE_DELETE,
expected_exceptions=[KeystoneUnauthorized]) \
.set(AddInterfaceFactory.SECURITY_GROUP_DELETE,
expected_exceptions=[KeystoneUnauthorized]) \
.set(AddInterfaceFactory.SERVER_DELETE,
clients=user1) \
.set(AddInterfaceFactory.ROUTER_CREATE,
clients=user1) \
.set(AddInterfaceFactory.ROUTER_SHOW,
expected_exceptions=[KeystoneUnauthorized]) \
.set(AddInterfaceFactory.ROUTER_ADD_INTERFACE,
expected_exceptions=[KeystoneUnauthorized]) \
.produce() \
.run(context=self.context)
def test_cirt_different_domain_different_user_subnet_delete(self):
user1 = self.km.find_user_credentials(
'Default', self.project, 'cloud-admin'
)
bu_admin = self.km.find_user_credentials(
'Domain2', self.project, 'cirt'
)
SubnetDeleteFactory(user1) \
.set(SubnetDeleteFactory.SUBNET_DELETE, clients=bu_admin,
expected_exceptions=[KeystoneUnauthorized]) \
.produce() \
.run(context=self.context)
def test_cirt_different_domain_different_user_network_delete(self):
user1 = self.km.find_user_credentials(
'Default', self.project, 'cloud-admin'
)
bu_admin = self.km.find_user_credentials(
'Domain2', self.project, 'cirt'
)
NetworkDeleteFactory(user1) \
.set(NetworkDeleteFactory.NETWORK_DELETE, clients=bu_admin,
expected_exceptions=[KeystoneUnauthorized]) \
.produce() \
.run(context=self.context)
def test_cirt_subnet_update(self):
cloud_admin = self.km.find_user_credentials(
'Default', self.project, 'cloud-admin'
)
bu_admin = self.km.find_user_credentials(
'Default', self.project, 'cirt'
)
SubnetUpdateFactory(cloud_admin) \
.set(SubnetUpdateFactory.SUBNET_UPDATE, clients=bu_admin, expected_exceptions=[NeutronForbidden]) \
.produce() \
.run(context=self.context)
def test_cirt_router_update(self):
cloud_admin = self.km.find_user_credentials(
'Default', self.project, 'cloud-admin'
)
bu_admin = self.km.find_user_credentials(
'Default', self.project, 'cirt'
)
RouterUpdateFactory(cloud_admin) \
.set(RouterUpdateFactory.ROUTER_UPDATE, clients=bu_admin, expected_exceptions=[NeutronForbidden]) \
.produce() \
.run(context=self.context)
#cloud-support
#todo: test all these
def test_cloud_support_all(self):
bu_admin = self.km.find_user_credentials(
'Default', self.project, 'cloud-support'
)
cloud_admin = self.km.find_user_credentials(
'Default', self.project, 'cloud-admin'
)
SampleFactory(cloud_admin) \
.set(SampleFactory.NETWORK_SHOW, clients=bu_admin) \
.set(SampleFactory.SUBNET_SHOW, clients=bu_admin) \
.set(SampleFactory.ROUTER_SHOW, clients=bu_admin) \
.set(SampleFactory.SECURITY_GROUP_SHOW, clients=bu_admin) \
.produce() \
.run(context=self.context)
def test_cloud_support_floatingip(self):
cloud_admin = self.km.find_user_credentials(
'Default', self.project, 'cloud-admin'
)
bu_admin = self.km.find_user_credentials(
'Default', self.project, 'cloud-support'
)
FloatingIPFactory(bu_admin) \
.set(FloatingIPFactory.NETWORK_CREATE, clients=cloud_admin) \
.set(FloatingIPFactory.SUBNET_CREATE, clients=cloud_admin) \
.set(FloatingIPFactory.ROUTER_CREATE, clients=cloud_admin) \
.set(FloatingIPFactory.ROUTER_ADD_INTERFACE, clients=cloud_admin) \
.set(FloatingIPFactory.FLOATINGIP_DELETE, clients=cloud_admin) \
.produce() \
.run(context=self.context)
def test_cloud_support_same_domain_different_user(self):
creator = self.km.find_user_credentials(
'Default', self.project, 'cloud-admin'
)
user1 = self.km.find_user_credentials(
'Default', self.project, 'cloud-admin'
)
bu_admin = self.km.find_user_credentials(
'Default', self.project, 'cloud-support'
)
SampleFactory(bu_admin) \
.set(SampleFactory.NETWORK_CREATE,
clients=user1) \
.set(SampleFactory.SUBNET_CREATE,
clients=user1) \
.set(SampleFactory.SERVER_CREATE,
clients=user1) \
.set(SampleFactory.SERVER_WAIT,
clients=user1) \
.set(SampleFactory.SECURITY_GROUP_CREATE,
clients=user1) \
.set(SampleFactory.SECURITY_GROUP_RULE_CREATE, clients=user1) \
.set(SampleFactory.SECURITY_GROUP_ADD_TO_SERVER, clients=user1) \
.set(SampleFactory.SECURITY_GROUP_REMOVE_FROM_SERVER, clients=user1) \
.set(SampleFactory.SECURITY_GROUP_RULE_DELETE, clients=user1) \
.set(SampleFactory.SECURITY_GROUP_DELETE, clients=user1) \
.set(SampleFactory.SERVER_DELETE,
clients=user1) \
.set(SampleFactory.ROUTER_CREATE,
clients=user1) \
.set(SampleFactory.ROUTER_ADD_INTERFACE, clients=user1) \
.set(SampleFactory.ROUTER_REMOVE_INTERFACE, clients=user1) \
.set(SampleFactory.ROUTER_DELETE, clients=user1) \
.set(SampleFactory.SUBNET_DELETE, clients=user1) \
.set(SampleFactory.NETWORK_DELETE, clients=user1) \
.produce() \
.run(context=self.context)
def test_cloud_support_same_domain_different_user_floatingip(self):
user1 = self.km.find_user_credentials(
'Default', self.project, 'cloud-admin'
)
bu_admin = self.km.find_user_credentials(
'Default', self.project, 'cloud-support'
)
FloatingIPFactory(bu_admin) \
.set(FloatingIPFactory.NETWORK_CREATE,
clients=user1) \
.set(FloatingIPFactory.SUBNET_CREATE,
clients=user1) \
.set(FloatingIPFactory.PORT_CREATE,
clients=user1) \
.set(FloatingIPFactory.ROUTER_CREATE,
clients=user1) \
.set(FloatingIPFactory.ROUTER_ADD_INTERFACE,
clients=user1) \
.set(FloatingIPFactory.FLOATINGIP_CREATE,
clients=user1) \
.set(FloatingIPFactory.FLOATINGIP_DELETE, clients=user1) \
.produce() \
.run(context=self.context)
def test_cloud_support_different_domain_different_user_floatingip(self):
user1 = self.km.find_user_credentials(
'Default', self.project, 'cloud-admin'
)
bu_admin = self.km.find_user_credentials(
'Domain2', self.project, 'cloud-support'
)
FloatingIPFactory(bu_admin) \
.set(FloatingIPFactory.NETWORK_CREATE,
clients=user1) \
.set(FloatingIPFactory.SUBNET_CREATE,
clients=user1) \
.set(FloatingIPFactory.PORT_CREATE,
clients=user1) \
.set(FloatingIPFactory.ROUTER_CREATE,
clients=user1) \
.set(FloatingIPFactory.ROUTER_ADD_INTERFACE,
clients=user1) \
.set(FloatingIPFactory.FLOATINGIP_CREATE,
clients=user1) \
.set(FloatingIPFactory.FLOATINGIP_SHOW,
expected_exceptions=[KeystoneUnauthorized]) \
.set(FloatingIPFactory.FLOATINGIP_ASSOCIATE,
clients=user1) \
.set(FloatingIPFactory.FLOATINGIP_DISASSOCIATE,
expected_exceptions=[KeystoneUnauthorized]) \
.set(FloatingIPFactory.FLOATINGIP_DELETE,
expected_exceptions=[KeystoneUnauthorized]) \
.produce() \
.run(context=self.context)
def test_cloud_support_different_domain_different_user_add_interface_to_server(self):
creator = self.km.find_user_credentials(
'Default', self.project, 'cloud-admin'
)
user1 = self.km.find_user_credentials(
'Default', self.project, 'cloud-admin'
)
bu_admin = self.km.find_user_credentials(
'Domain2', self.project, 'cloud-support'
)
AddInterfaceFactory(bu_admin) \
.set(AddInterfaceFactory.NETWORK_CREATE,
clients=user1) \
.set(AddInterfaceFactory.SUBNET_CREATE,
clients=user1) \
.set(AddInterfaceFactory.SERVER_CREATE,
clients=user1) \
.set(AddInterfaceFactory.SERVER_WAIT,
clients=user1) \
.set(AddInterfaceFactory.SECURITY_GROUP_CREATE,
clients=user1) \
.set(AddInterfaceFactory.SECURITY_GROUP_RULE_CREATE,
clients=user1) \
.set(AddInterfaceFactory.SECURITY_GROUP_ADD_TO_SERVER,
clients=user1) \
.set(AddInterfaceFactory.SECURITY_GROUP_REMOVE_FROM_SERVER,
expected_exceptions=[KeystoneUnauthorized]) \
.set(AddInterfaceFactory.SECURITY_GROUP_RULE_DELETE,
expected_exceptions=[KeystoneUnauthorized]) \
.set(AddInterfaceFactory.SECURITY_GROUP_DELETE,
expected_exceptions=[KeystoneUnauthorized]) \
.set(AddInterfaceFactory.SERVER_DELETE,
clients=user1) \
.set(AddInterfaceFactory.ROUTER_CREATE,
clients=user1) \
.set(AddInterfaceFactory.ROUTER_SHOW,
expected_exceptions=[KeystoneUnauthorized]) \
.set(AddInterfaceFactory.ROUTER_ADD_INTERFACE,
expected_exceptions=[KeystoneUnauthorized]) \
.produce() \
.run(context=self.context)
def test_cloud_support_different_domain_different_user_subnet_delete(self):
user1 = self.km.find_user_credentials(
'Default', self.project, 'cloud-admin'
)
bu_admin = self.km.find_user_credentials(
'Domain2', self.project, 'cloud-support'
)
SubnetDeleteFactory(user1) \
.set(SubnetDeleteFactory.SUBNET_DELETE, clients=bu_admin,
expected_exceptions=[KeystoneUnauthorized]) \
.produce() \
.run(context=self.context)
def test_cloud_support_different_domain_different_user_network_delete(self):
user1 = self.km.find_user_credentials(
'Default', self.project, 'cloud-admin'
)
bu_admin = self.km.find_user_credentials(
'Domain2', self.project, 'cloud-support'
)
NetworkDeleteFactory(user1) \
.set(NetworkDeleteFactory.NETWORK_DELETE, clients=bu_admin,
expected_exceptions=[KeystoneUnauthorized]) \
.produce() \
.run(context=self.context)
#todo: retest
def test_cloud_support_subnet_update(self):
cloud_admin = self.km.find_user_credentials(
'Default', self.project, 'cloud-admin'
)
bu_admin = self.km.find_user_credentials(
'Default', self.project, 'cloud-support'
)
SubnetUpdateFactory(cloud_admin) \
.set(SubnetUpdateFactory.SUBNET_UPDATE, clients=bu_admin, expected_exceptions=[NeutronForbidden]) \
.produce() \
.run(context=self.context)
def test_cloud_support_router_update(self):
cloud_admin = self.km.find_user_credentials(
'Default', self.project, 'cloud-admin'
)
bu_admin = self.km.find_user_credentials(
'Default', self.project, 'cloud-support'
)
RouterUpdateFactory(cloud_admin) \
.set(RouterUpdateFactory.ROUTER_UPDATE, clients=bu_admin, expected_exceptions=[NeutronForbidden]) \
.produce() \
.run(context=self.context)
|
|
#############################################
# Image Processing Constants
############################################
CV_BLUR_NO_SCALE = 0
CV_BLUR = 1
CV_GAUSSIAN = 2
CV_MEDIAN = 3
CV_BILATERAL = 4
CV_TERMCRIT_NUMBER = 1
CV_TERMCRIT_ITER = 1
CV_TERMCRIT_EPS = 2
CV_INTER_NN = 0
CV_INTER_LINEAR = 1
CV_INTER_CUBIC = 2
CV_INTER_AREA = 3
CV_WARP_FILL_OUTLIERS = 8
CV_WARP_INVERSE_MAP = 16
CV_SHAPE_RECT = 0
CV_SHAPE_CROSS = 1
CV_SHAPE_ELLIPSE = 2
CV_SHAPE_CUSTOM = 100
CV_THRESH_BINARY = 0
CV_THRESH_BINARY_INV = 1
CV_THRESH_TRUNC = 2
CV_THRESH_TOZERO = 3
CV_THRESH_TOZERO_INV = 4
CV_THRESH_MASK = 7
CV_ADAPTIVE_THRESH_MEAN_C = 0
CV_ADAPTIVE_THRESH_GAUSSIAN_C = 1
CV_MOP_OPEN = 2
CV_MOP_CLOSE = 3
CV_MOP_GRADIENT = 4
CV_MOP_TOPHAT = 5
CV_MOP_BLACKHAT = 6
#-------------------------------------------------------------------------------
# Color Conversion
#-------------------------------------------------------------------------------
CV_BGR2BGRA = 0
CV_RGB2RGBA = CV_BGR2BGRA
CV_BGRA2BGR = 1
CV_RGBA2RGB = CV_BGRA2BGR
CV_BGR2RGBA = 2
CV_RGB2BGRA = CV_BGR2RGBA
CV_RGBA2BGR = 3
CV_BGRA2RGB = CV_RGBA2BGR
CV_BGR2RGB = 4
CV_RGB2BGR = CV_BGR2RGB
CV_BGRA2RGBA = 5
CV_RGBA2BGRA = CV_BGRA2RGBA
CV_BGR2GRAY = 6
CV_RGB2GRAY = 7
CV_GRAY2BGR = 8
CV_GRAY2RGB = CV_GRAY2BGR
CV_GRAY2BGRA = 9
CV_GRAY2RGBA = CV_GRAY2BGRA
CV_BGRA2GRAY = 10
CV_RGBA2GRAY = 11
CV_BGR2BGR565 = 12
CV_RGB2BGR565 = 13
CV_BGR5652BGR = 14
CV_BGR5652RGB = 15
CV_BGRA2BGR565 = 16
CV_RGBA2BGR565 = 17
CV_BGR5652BGRA = 18
CV_BGR5652RGBA = 19
CV_GRAY2BGR565 = 20
CV_BGR5652GRAY = 21
CV_BGR2BGR555 = 22
CV_RGB2BGR555 = 23
CV_BGR5552BGR = 24
CV_BGR5552RGB = 25
CV_BGRA2BGR555 = 26
CV_RGBA2BGR555 = 27
CV_BGR5552BGRA = 28
CV_BGR5552RGBA = 29
CV_GRAY2BGR555 = 30
CV_BGR5552GRAY = 31
CV_BGR2XYZ = 32
CV_RGB2XYZ = 33
CV_XYZ2BGR = 34
CV_XYZ2RGB = 35
CV_BGR2YCrCb = 36
CV_RGB2YCrCb = 37
CV_YCrCb2BGR = 38
CV_YCrCb2RGB = 39
CV_BGR2HSV = 40
CV_RGB2HSV = 41
CV_BGR2Lab = 44
CV_RGB2Lab = 45
CV_BayerBG2BGR = 46
CV_BayerGB2BGR = 47
CV_BayerRG2BGR = 48
CV_BayerGR2BGR = 49
CV_BayerBG2RGB = CV_BayerRG2BGR
CV_BayerGB2RGB = CV_BayerGR2BGR
CV_BayerRG2RGB = CV_BayerBG2BGR
CV_BayerGR2RGB = CV_BayerGB2BGR
CV_BGR2Luv = 50
CV_RGB2Luv = 51
CV_BGR2HLS = 52
CV_RGB2HLS = 53
CV_HSV2BGR = 54
CV_HSV2RGB = 55
CV_Lab2BGR = 56
CV_Lab2RGB = 57
CV_Luv2BGR = 58
CV_Luv2RGB = 59
CV_HLS2BGR = 60
CV_HLS2RGB = 61
#########################
# Calibration Constants #
#########################
CV_CALIB_USE_INTRINSIC_GUESS = 1
CV_CALIB_FIX_ASPECT_RATIO = 2
CV_CALIB_FIX_PRINCIPAL_POINT = 4
CV_CALIB_ZERO_TANGENT_DIST = 8
CV_CALIB_CB_ADAPTIVE_THRESH = 1
CV_CALIB_CB_NORMALIZE_IMAGE = 2
CV_CALIB_CB_FILTER_QUADS = 4
################################
# Fundamental Matrix Constants #
################################
CV_FM_7POINT = 1
CV_FM_8POINT = 2
CV_FM_LMEDS = 4
CV_FM_RANSAC = 8
####################
# cvMat TypeValues #
####################
CV_CN_MAX = 4
CV_CN_SHIFT = 3
CV_DEPTH_MAX = (1 << CV_CN_SHIFT)
CV_8U = 0
CV_8S = 1
CV_16U = 2
CV_16S = 3
CV_32S = 4
CV_32F = 5
CV_64F = 6
CV_USRTYPE1 = 7
def _CV_MAKETYPE(depth,cn):
return ((depth) + (((cn)-1) << CV_CN_SHIFT))
CV_8UC1 = _CV_MAKETYPE(CV_8U,1)
CV_8UC2 = _CV_MAKETYPE(CV_8U,2)
CV_8UC3 = _CV_MAKETYPE(CV_8U,3)
CV_8UC4 = _CV_MAKETYPE(CV_8U,4)
CV_8SC1 = _CV_MAKETYPE(CV_8S,1)
CV_8SC2 = _CV_MAKETYPE(CV_8S,2)
CV_8SC3 = _CV_MAKETYPE(CV_8S,3)
CV_8SC4 = _CV_MAKETYPE(CV_8S,4)
CV_16UC1 = _CV_MAKETYPE(CV_16U,1)
CV_16UC2 = _CV_MAKETYPE(CV_16U,2)
CV_16UC3 = _CV_MAKETYPE(CV_16U,3)
CV_16UC4 = _CV_MAKETYPE(CV_16U,4)
CV_16SC1 = _CV_MAKETYPE(CV_16S,1)
CV_16SC2 = _CV_MAKETYPE(CV_16S,2)
CV_16SC3 = _CV_MAKETYPE(CV_16S,3)
CV_16SC4 = _CV_MAKETYPE(CV_16S,4)
CV_32SC1 = _CV_MAKETYPE(CV_32S,1)
CV_32SC2 = _CV_MAKETYPE(CV_32S,2)
CV_32SC3 = _CV_MAKETYPE(CV_32S,3)
CV_32SC4 = _CV_MAKETYPE(CV_32S,4)
CV_32FC1 = _CV_MAKETYPE(CV_32F,1)
CV_32FC2 = _CV_MAKETYPE(CV_32F,2)
CV_32FC3 = _CV_MAKETYPE(CV_32F,3)
CV_32FC4 = _CV_MAKETYPE(CV_32F,4)
CV_64FC1 = _CV_MAKETYPE(CV_64F,1)
CV_64FC2 = _CV_MAKETYPE(CV_64F,2)
CV_64FC3 = _CV_MAKETYPE(CV_64F,3)
CV_64FC4 = _CV_MAKETYPE(CV_64F,4)
#-------------------------------------------------------------------------------
# Template Matching
#-------------------------------------------------------------------------------
CV_TM_SQDIFF = 0
CV_TM_SQDIFF_NORMED = 1
CV_TM_CCORR = 2
CV_TM_CCORR_NORMED = 3
CV_TM_CCOEFF = 4
CV_TM_CCOEFF_NORMED = 5
|
|
#!/usr/bin/env python
# Zed Attack Proxy (ZAP) and its related class files.
#
# ZAP is an HTTP/HTTPS proxy for assessing web application security.
#
# Copyright 2017 ZAP Development Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script runs a full scan against an API defined by OpenAPI/Swagger or SOAP
# using ZAP
#
# It can either be run 'standalone', in which case depends on
# https://pypi.python.org/pypi/python-owasp-zap-v2.4 and Docker, or it can be run
# inside one of the ZAP docker containers. It automatically detects if it is
# running in docker so the parameters are the same.
#
# It currently support APIS defined by:
# OpenAPI/Swagger URL
# OpenAPI/Swagger file
# SOAP URL
# SOAP File
# It will exit with codes of:
# 0: Success
# 1: At least 1 FAIL
# 2: At least one WARN and no FAILs
# 3: Any other failure
# By default all alerts found by ZAP will be treated as WARNings.
# You can use the -c or -u parameters to specify a configuration file to override
# this.
# You can generate a template configuration file using the -g parameter. You will
# then need to change 'WARN' to 'FAIL', 'INFO' or 'IGNORE' for the rules you want
# to be handled differently.
# You can also add your own messages for the rules by appending them after a tab
# at the end of each line.
# By default the active scan rules run are hardcoded in the API-Minimal.policy
# file but you can change them by supplying a configuration file with the rules
# you dont want to be run set to IGNORE.
import getopt
import json
import logging
import os
import os.path
import subprocess
import sys
import time
from datetime import datetime
from six.moves.urllib.parse import urljoin
from zapv2 import ZAPv2
from zap_common import *
class NoUrlsException(Exception):
pass
config_dict = {}
config_msg = {}
out_of_scope_dict = {}
min_level = 0
# Scan rules that aren't really relevant, eg the examples rules in the alpha set
blacklist = ['-1', '50003', '60000', '60001']
# Scan rules that are being addressed
in_progress_issues = {}
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s')
# Hide "Starting new HTTP connection" messages
logging.getLogger("requests").setLevel(logging.WARNING)
def usage():
print('Usage: zap-api-scan.py -t <target> -f <format> [options]')
print(' -t target target API definition, OpenAPI or SOAP, local file or URL, eg https://www.example.com/openapi.json')
print(' -f format either openapi or soap')
print('Options:')
print(' -h print this help message')
print(' -c config_file config file to use to INFO, IGNORE or FAIL warnings')
print(' -u config_url URL of config file to use to INFO, IGNORE or FAIL warnings')
print(' -g gen_file generate default config file(all rules set to WARN)')
print(' -r report_html file to write the full ZAP HTML report')
print(' -w report_md file to write the full ZAP Wiki(Markdown) report')
print(' -x report_xml file to write the full ZAP XML report')
print(' -J report_json file to write the full ZAP JSON document')
print(' -a include the alpha passive scan rules as well')
print(' -d show debug messages')
print(' -P specify listen port')
print(' -D delay in seconds to wait for passive scanning ')
print(' -i default rules not in the config file to INFO')
print(' -l level minimum level to show: PASS, IGNORE, INFO, WARN or FAIL, use with -s to hide example URLs')
print(' -n context_file context file which will be loaded prior to scanning the target')
print(' -p progress_file progress file which specifies issues that are being addressed')
print(' -s short output format - dont show PASSes or example URLs')
print(' -T max time in minutes to wait for ZAP to start and the passive scan to run')
print(' -O the hostname to override in the (remote) OpenAPI spec')
print(' -z zap_options ZAP command line options e.g. -z "-config aaa=bbb -config ccc=ddd"')
print(' --hook path to python file that define your custom hooks')
print('')
print('For more details see https://github.com/zaproxy/zaproxy/wiki/ZAP-API-Scan')
def main(argv):
global min_level
global in_progress_issues
cid = ''
context_file = ''
progress_file = ''
config_file = ''
config_url = ''
generate = ''
port = 0
detailed_output = True
report_html = ''
report_md = ''
report_xml = ''
report_json = ''
target = ''
target_file = ''
target_url = ''
host_override = ''
format = ''
zap_alpha = False
info_unspecified = False
base_dir = ''
zap_ip = 'localhost'
zap_options = ''
delay = 0
timeout = 0
hook_file = None
pass_count = 0
warn_count = 0
fail_count = 0
info_count = 0
ignore_count = 0
warn_inprog_count = 0
fail_inprog_count = 0
try:
opts, args = getopt.getopt(argv, "t:f:c:u:g:m:n:r:J:w:x:l:hdaijp:sz:P:D:T:O:", ["hook="])
except getopt.GetoptError as exc:
logging.warning('Invalid option ' + exc.opt + ' : ' + exc.msg)
usage()
sys.exit(3)
for opt, arg in opts:
if opt == '-h':
usage()
sys.exit(0)
elif opt == '-t':
target = arg
logging.debug('Target: ' + target)
elif opt == '-f':
format = arg
elif opt == '-c':
config_file = arg
elif opt == '-u':
config_url = arg
elif opt == '-g':
generate = arg
elif opt == '-d':
logging.getLogger().setLevel(logging.DEBUG)
elif opt == '-P':
port = int(arg)
elif opt == '-D':
delay = int(arg)
elif opt == '-n':
context_file = arg
elif opt == '-p':
progress_file = arg
elif opt == '-r':
report_html = arg
elif opt == '-J':
report_json = arg
elif opt == '-w':
report_md = arg
elif opt == '-x':
report_xml = arg
elif opt == '-a':
zap_alpha = True
elif opt == '-i':
info_unspecified = True
elif opt == '-l':
try:
min_level = zap_conf_lvls.index(arg)
except ValueError:
logging.warning('Level must be one of ' + str(zap_conf_lvls))
usage()
sys.exit(3)
elif opt == '-z':
zap_options = arg
elif opt == '-s':
detailed_output = False
elif opt == '-T':
timeout = int(arg)
elif opt == '-O':
host_override = arg
elif opt == '--hook':
hook_file = arg
check_zap_client_version()
load_custom_hooks(hook_file)
trigger_hook('cli_opts', opts)
# Check target supplied and ok
if len(target) == 0:
usage()
sys.exit(3)
if format != 'openapi' and format != 'soap':
logging.warning('Format must be either \'openapi\' or \'soap\'')
usage()
sys.exit(3)
if running_in_docker():
base_dir = '/zap/wrk/'
if config_file or generate or report_html or report_xml or report_json or progress_file or context_file or target_file:
# Check directory has been mounted
if not os.path.exists(base_dir):
logging.warning('A file based option has been specified but the directory \'/zap/wrk\' is not mounted ')
usage()
sys.exit(3)
if target.startswith('http://') or target.startswith('https://'):
target_url = target
else:
# assume its a file
if not os.path.exists(base_dir + target):
logging.warning('Target must either start with \'http://\' or \'https://\' or be a local file')
logging.warning('File does not exist: ' + base_dir + target)
usage()
sys.exit(3)
else:
target_file = target
# Choose a random 'ephemeral' port and check its available if it wasn't specified with -P option
if port == 0:
port = get_free_port()
logging.debug('Using port: ' + str(port))
if config_file:
# load config file from filestore
with open(base_dir + config_file) as f:
try:
load_config(f, config_dict, config_msg, out_of_scope_dict)
except ValueError as e:
logging.warning("Failed to load config file " + base_dir + config_file + " " + str(e))
sys.exit(3)
elif config_url:
# load config file from url
try:
config_data = urlopen(config_url).read().decode('UTF-8').splitlines()
load_config(config_data, config_dict, config_msg, out_of_scope_dict)
except ValueError as e:
logging.warning("Failed to read configs from " + config_url + " " + str(e))
sys.exit(3)
except:
logging.warning('Failed to read configs from ' + config_url)
sys.exit(3)
if progress_file:
# load progress file from filestore
with open(base_dir + progress_file) as f:
progress = json.load(f)
# parse into something more useful...
# in_prog_issues = map of vulnid -> {object with everything in}
for issue in progress["issues"]:
if issue["state"] == "inprogress":
in_progress_issues[issue["id"]] = issue
if running_in_docker():
try:
params = [
'-addonupdate',
'-addoninstall', 'pscanrulesBeta'] # In case we're running in the stable container
if zap_alpha:
params.append('-addoninstall')
params.append('pscanrulesAlpha')
add_zap_options(params, zap_options)
start_zap(port, params)
except OSError:
logging.warning('Failed to start ZAP :(')
sys.exit(3)
else:
# Not running in docker, so start one
mount_dir = ''
if context_file:
mount_dir = os.path.dirname(os.path.abspath(context_file))
params = ['-addonupdate']
if (zap_alpha):
params.extend(['-addoninstall', 'pscanrulesAlpha'])
add_zap_options(params, zap_options)
try:
cid = start_docker_zap('owasp/zap2docker-weekly', port, params, mount_dir)
zap_ip = ipaddress_for_cid(cid)
logging.debug('Docker ZAP IP Addr: ' + zap_ip)
# Copy across the files that may not be in all of the docker images
try:
subprocess.check_output(['docker', 'exec', '-t', cid, 'mkdir', '-p', '/home/zap/.ZAP_D/scripts/scripts/httpsender/'])
cp_to_docker(cid, 'scripts/scripts/httpsender/Alert_on_HTTP_Response_Code_Errors.js', '/home/zap/.ZAP_D/')
cp_to_docker(cid, 'scripts/scripts/httpsender/Alert_on_Unexpected_Content_Types.js', '/home/zap/.ZAP_D/')
cp_to_docker(cid, 'policies/API-Minimal.policy', '/home/zap/.ZAP_D/')
if target_file:
cp_to_docker(cid, target_file, '/zap/')
except OSError:
logging.warning('Failed to copy one of the required files')
sys.exit(3)
except OSError:
logging.warning('Failed to start ZAP in docker :(')
sys.exit(3)
try:
zap = ZAPv2(proxies={'http': 'http://' + zap_ip + ':' + str(port), 'https': 'http://' + zap_ip + ':' + str(port)})
wait_for_zap_start(zap, timeout * 60)
trigger_hook('zap_started', zap, target)
if context_file:
# handle the context file, cant use base_dir as it might not have been set up
zap_import_context(zap, '/zap/wrk/' + os.path.basename(context_file))
# Enable scripts
zap.script.load('Alert_on_HTTP_Response_Code_Errors.js', 'httpsender', 'Oracle Nashorn', '/home/zap/.ZAP_D/scripts/scripts/httpsender/Alert_on_HTTP_Response_Code_Errors.js')
zap.script.enable('Alert_on_HTTP_Response_Code_Errors.js')
zap.script.load('Alert_on_Unexpected_Content_Types.js', 'httpsender', 'Oracle Nashorn', '/home/zap/.ZAP_D/scripts/scripts/httpsender/Alert_on_Unexpected_Content_Types.js')
zap.script.enable('Alert_on_Unexpected_Content_Types.js')
# Import the API defn
if format == 'openapi':
trigger_hook('importing_openapi', target_url, target_file)
if target_url:
logging.debug('Import OpenAPI URL ' + target_url)
res = zap.openapi.import_url(target, host_override)
urls = zap.core.urls()
if host_override:
target = urljoin(target_url, '//' + host_override)
logging.info('Using host override, new target: {0}'.format(target))
else:
logging.debug('Import OpenAPI File ' + target_file)
res = zap.openapi.import_file(base_dir + target_file)
urls = zap.core.urls()
if len(urls) > 0:
# Choose the first one - will be striping off the path below
target = urls[0]
logging.debug('Using target from imported file: {0}'.format(target))
else:
trigger_hook('importing_soap', target_url, target_file)
if target_url:
logging.debug('Import SOAP URL ' + target_url)
res = zap._request(zap.base + 'soap/action/importUrl/', {'url':target})
urls = zap.core.urls()
else:
logging.debug('Import SOAP File ' + target_file)
res = zap._request(zap.base + 'soap/action/importFile/', {'file': base_dir + target_file})
urls = zap.core.urls()
if len(urls) > 0:
# Choose the first one - will be striping off the path below
target = urls[0]
logging.debug('Using target from imported file: {0}'.format(target))
logging.info('Number of Imported URLs: ' + str(len(urls)))
logging.debug('Import warnings: ' + str(res))
if len(urls) == 0:
logging.warning('Failed to import any URLs')
# No point continue, there's nothing to scan.
raise NoUrlsException()
if target.count('/') > 2:
old_target = target
# The url can include a valid path, but always reset to scan the host
target = target[0:target.index('/', 8)+1]
logging.debug('Normalised target from {0} to {1}'.format(old_target, target))
# Wait for a delay if specified with -D option
if (delay):
start_scan = datetime.now()
while((datetime.now() - start_scan).seconds < delay ):
time.sleep(5)
logging.debug('Delay active scan ' + str(delay -(datetime.now() - start_scan).seconds) + ' seconds')
# Set up the scan policy
scan_policy = 'API-Minimal'
if config_dict:
# They have supplied a config file, use this to define the ascan rules
# Use the default one as the script might not have write access to the one just copied across
scan_policy = 'Default Policy'
zap.ascan.enable_all_scanners(scanpolicyname=scan_policy)
for scanner, state in config_dict.items():
if state == 'IGNORE':
# Dont bother checking the result - this will fail for pscan rules
zap.ascan.set_scanner_alert_threshold(id=scanner, alertthreshold='OFF', scanpolicyname=scan_policy)
zap_active_scan(zap, target, scan_policy)
zap_wait_for_passive_scan(zap, timeout * 60)
# Print out a count of the number of urls
num_urls = len(zap.core.urls())
if num_urls == 0:
logging.warning('No URLs found - is the target URL accessible? Local services may not be accessible from the Docker container')
else:
if detailed_output:
print('Total of ' + str(num_urls) + ' URLs')
alert_dict = zap_get_alerts(zap, target, blacklist, out_of_scope_dict)
all_ascan_rules = zap.ascan.scanners('Default Policy')
all_pscan_rules = zap.pscan.scanners
all_dict = {}
for rule in all_pscan_rules:
plugin_id = rule.get('id')
if plugin_id in blacklist:
continue
all_dict[plugin_id] = rule.get('name') + ' - Passive/' + rule.get('quality')
for rule in all_ascan_rules:
plugin_id = rule.get('id')
if plugin_id in blacklist:
continue
all_dict[plugin_id] = rule.get('name') + ' - Active/' + rule.get('quality')
if generate:
# Create the config file
with open(base_dir + generate, 'w') as f:
f.write('# zap-api-scan rule configuration file\n')
f.write('# Change WARN to IGNORE to ignore rule or FAIL to fail if rule matches\n')
f.write('# Active scan rules set to IGNORE will not be run which will speed up the scan\n')
f.write('# Only the rule identifiers are used - the names are just for info\n')
f.write('# You can add your own messages to each rule by appending them after a tab on each line.\n')
for key, rule in sorted(all_dict.iteritems()):
f.write(key + '\tWARN\t(' + rule + ')\n')
# print out the passing rules
pass_dict = {}
for rule in all_pscan_rules:
plugin_id = rule.get('id')
if plugin_id in blacklist:
continue
if (not alert_dict.has_key(plugin_id)):
pass_dict[plugin_id] = rule.get('name')
for rule in all_ascan_rules:
plugin_id = rule.get('id')
if plugin_id in blacklist:
continue
if not alert_dict.has_key(plugin_id) and not(config_dict.has_key(plugin_id) and config_dict[plugin_id] == 'IGNORE'):
pass_dict[plugin_id] = rule.get('name')
if min_level == zap_conf_lvls.index("PASS") and detailed_output:
for key, rule in sorted(pass_dict.iteritems()):
print('PASS: ' + rule + ' [' + key + ']')
pass_count = len(pass_dict)
if detailed_output:
# print out the ignored ascan rules(there will be no alerts for these as they were not run)
for rule in all_ascan_rules:
plugin_id = rule.get('id')
if plugin_id in blacklist:
continue
if config_dict.has_key(plugin_id) and config_dict[plugin_id] == 'IGNORE':
print('SKIP: ' + rule.get('name') + ' [' + plugin_id + ']')
# print out the ignored rules
ignore_count, not_used = print_rules(alert_dict, 'IGNORE', config_dict, config_msg, min_level,
inc_ignore_rules, True, detailed_output, {})
# print out the info rules
info_count, not_used = print_rules(alert_dict, 'INFO', config_dict, config_msg, min_level,
inc_info_rules, info_unspecified, detailed_output, in_progress_issues)
# print out the warning rules
warn_count, warn_inprog_count = print_rules(alert_dict, 'WARN', config_dict, config_msg, min_level,
inc_warn_rules, not info_unspecified, detailed_output, in_progress_issues)
# print out the failing rules
fail_count, fail_inprog_count = print_rules(alert_dict, 'FAIL', config_dict, config_msg, min_level,
inc_fail_rules, True, detailed_output, in_progress_issues)
if report_html:
# Save the report
write_report(base_dir + report_html, zap.core.htmlreport())
if report_json:
# Save the report
write_report(base_dir + report_json, zap.core.jsonreport())
if report_md:
# Save the report
write_report(base_dir + report_md, zap.core.mdreport())
if report_xml:
# Save the report
write_report(base_dir + report_xml, zap.core.xmlreport())
print('FAIL-NEW: ' + str(fail_count) + '\tFAIL-INPROG: ' + str(fail_inprog_count) +
'\tWARN-NEW: ' + str(warn_count) + '\tWARN-INPROG: ' + str(warn_inprog_count) +
'\tINFO: ' + str(info_count) + '\tIGNORE: ' + str(ignore_count) + '\tPASS: ' + str(pass_count))
trigger_hook('zap_pre_shutdown', zap)
# Stop ZAP
zap.core.shutdown()
except IOError as e:
if hasattr(e, 'args') and len(e.args) > 1:
errno, strerror = e
print("ERROR " + str(strerror))
logging.warning('I/O error(' + str(errno) + '): ' + str(strerror))
else:
print("ERROR %s" % e)
logging.warning('I/O error: ' + str(e))
dump_log_file(cid)
except NoUrlsException:
dump_log_file(cid)
except:
print("ERROR " + str(sys.exc_info()[0]))
logging.warning('Unexpected error: ' + str(sys.exc_info()[0]))
dump_log_file(cid)
if not running_in_docker():
stop_docker(cid)
trigger_hook('pre_exit', fail_count, warn_count, pass_count)
if fail_count > 0:
sys.exit(1)
elif warn_count > 0:
sys.exit(2)
elif pass_count > 0:
sys.exit(0)
else:
sys.exit(3)
if __name__ == "__main__":
main(sys.argv[1:])
|
|
# Copyright (c) 2016 Huawei Technologies Co., Ltd.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import json
import re
from oslo_log import log as logging
from oslo_utils import excutils
from cinder import exception
from cinder.i18n import _, _LW, _LE
from cinder.volume.drivers.huawei import constants
from cinder.volume.drivers.huawei import huawei_utils
from cinder.volume.drivers.huawei import rest_client
from cinder.volume import utils as volume_utils
LOG = logging.getLogger(__name__)
class AbsReplicaOp(object):
def __init__(self, client):
self.client = client
def create(self, **kwargs):
pass
def delete(self, replica_id):
pass
def protect_second(self, replica_id):
pass
def unprotect_second(self, replica_id):
pass
def sync(self, replica_id):
pass
def split(self, replica_id):
pass
def switch(self, replica_id):
pass
def is_primary(self, replica_info):
flag = replica_info.get('ISPRIMARY')
if flag and flag.lower() == 'true':
return True
return False
def get_replica_info(self, replica_id):
return {}
def _is_status(self, status_key, status, replica_info):
if type(status) in (list, tuple):
return replica_info.get(status_key, '') in status
if type(status) is str:
return replica_info.get(status_key, '') == status
return False
def is_running_status(self, status, replica_info):
return self._is_status(constants.REPLICA_RUNNING_STATUS_KEY,
status, replica_info)
def is_health_status(self, status, replica_info):
return self._is_status(constants.REPLICA_HEALTH_STATUS_KEY,
status, replica_info)
class PairOp(AbsReplicaOp):
def create(self, local_lun_id, rmt_lun_id, rmt_dev_id,
rmt_dev_name, replica_model,
speed=constants.REPLICA_SPEED,
period=constants.REPLICA_PERIOD,
**kwargs):
super(PairOp, self).create(**kwargs)
params = {
"LOCALRESID": local_lun_id,
"LOCALRESTYPE": '11',
"REMOTEDEVICEID": rmt_dev_id,
"REMOTEDEVICENAME": rmt_dev_name,
"REMOTERESID": rmt_lun_id,
"REPLICATIONMODEL": replica_model,
# recovery policy. 1: auto, 2: manual
"RECOVERYPOLICY": '2',
"SPEED": speed,
}
if replica_model == constants.REPLICA_ASYNC_MODEL:
# Synchronize type values:
# 1, manual
# 2, timed wait when synchronization begins
# 3, timed wait when synchronization ends
params['SYNCHRONIZETYPE'] = '2'
params['TIMINGVAL'] = period
try:
pair_info = self.client.create_pair(params)
except Exception as err:
msg = _('Create replication pair failed. Error: %s.') % err
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
return pair_info
def split(self, pair_id):
self.client.split_pair(pair_id)
def delete(self, pair_id, force=False):
self.client.delete_pair(pair_id, force)
def protect_second(self, pair_id):
self.client.set_pair_second_access(pair_id,
constants.REPLICA_SECOND_RO)
def unprotect_second(self, pair_id):
self.client.set_pair_second_access(pair_id,
constants.REPLICA_SECOND_RW)
def sync(self, pair_id):
self.client.sync_pair(pair_id)
def switch(self, pair_id):
self.client.switch_pair(pair_id)
def get_replica_info(self, pair_id):
return self.client.get_pair_by_id(pair_id)
class CGOp(AbsReplicaOp):
pass
class ReplicaCommonDriver(object):
def __init__(self, conf, replica_op):
self.conf = conf
self.op = replica_op
def protect_second(self, replica_id):
info = self.op.get_replica_info(replica_id)
if info.get('SECRESACCESS') == constants.REPLICA_SECOND_RO:
return
self.op.protect_second(replica_id)
self.wait_second_access(replica_id, constants.REPLICA_SECOND_RO)
def unprotect_second(self, replica_id):
info = self.op.get_replica_info(replica_id)
if info.get('SECRESACCESS') == constants.REPLICA_SECOND_RW:
return
self.op.unprotect_second(replica_id)
self.wait_second_access(replica_id, constants.REPLICA_SECOND_RW)
def sync(self, replica_id, wait_complete=False):
self.protect_second(replica_id)
expect_status = (constants.REPLICA_RUNNING_STATUS_NORMAL,
constants.REPLICA_RUNNING_STATUS_SYNC,
constants.REPLICA_RUNNING_STATUS_INITIAL_SYNC)
info = self.op.get_replica_info(replica_id)
# When running status is synchronizing or normal,
# it's not necessary to do synchronize again.
if (info.get('REPLICATIONMODEL') == constants.REPLICA_SYNC_MODEL
and self.op.is_running_status(expect_status, info)):
return
self.op.sync(replica_id)
self.wait_expect_state(replica_id, expect_status)
if wait_complete:
self.wait_replica_ready(replica_id)
def split(self, replica_id):
running_status = (constants.REPLICA_RUNNING_STATUS_SPLIT,
constants.REPLICA_RUNNING_STATUS_INVALID)
info = self.op.get_replica_info(replica_id)
if self.op.is_running_status(running_status, info):
return
try:
self.op.split(replica_id)
except Exception as err:
LOG.warning(_LW('Split replication exception: %s.'), err)
try:
self.wait_expect_state(replica_id, running_status)
except Exception as err:
msg = _('Split replication failed.')
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
def enable(self, replica_id, wait_sync_complete=False):
info = self.op.get_replica_info(replica_id)
if not self.op.is_primary(info):
self.switch(replica_id)
self.sync(replica_id)
return None
def disable(self, replica_id):
self.split(replica_id)
return None
def switch(self, replica_id):
self.split(replica_id)
self.unprotect_second(replica_id)
self.op.switch(replica_id)
# Wait to be primary
def _wait_switch_to_primary():
info = self.op.get_replica_info(replica_id)
if self.op.is_primary(info):
return True
return False
interval = constants.DEFAULT_REPLICA_WAIT_INTERVAL
timeout = constants.DEFAULT_REPLICA_WAIT_TIMEOUT
huawei_utils.wait_for_condition(_wait_switch_to_primary,
interval,
timeout)
def failover(self, replica_id):
"""Failover replication.
Purpose:
1. Split replication.
2. Set secondary access read & write.
"""
info = self.op.get_replica_info(replica_id)
if self.op.is_primary(info):
msg = _('We should not do switch over on primary array.')
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
sync_status_set = (constants.REPLICA_RUNNING_STATUS_SYNC,
constants.REPLICA_RUNNING_STATUS_INITIAL_SYNC)
if self.op.is_running_status(sync_status_set, info):
self.wait_replica_ready(replica_id)
self.split(replica_id)
self.op.unprotect_second(replica_id)
def wait_replica_ready(self, replica_id, interval=None, timeout=None):
LOG.debug('Wait synchronize complete.')
running_status_normal = (constants.REPLICA_RUNNING_STATUS_NORMAL,
constants.REPLICA_RUNNING_STATUS_SYNCED)
running_status_sync = (constants.REPLICA_RUNNING_STATUS_SYNC,
constants.REPLICA_RUNNING_STATUS_INITIAL_SYNC)
health_status_normal = constants.REPLICA_HEALTH_STATUS_NORMAL
def _replica_ready():
info = self.op.get_replica_info(replica_id)
if (self.op.is_running_status(running_status_normal, info)
and self.op.is_health_status(health_status_normal, info)):
return True
if not self.op.is_running_status(running_status_sync, info):
msg = (_('Wait synchronize failed. Running status: %s.') %
info.get(constants.REPLICA_RUNNING_STATUS_KEY))
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
return False
if not interval:
interval = constants.DEFAULT_WAIT_INTERVAL
if not timeout:
timeout = constants.DEFAULT_WAIT_TIMEOUT
huawei_utils.wait_for_condition(_replica_ready,
interval,
timeout)
def wait_second_access(self, replica_id, access_level):
def _check_access():
info = self.op.get_replica_info(replica_id)
if info.get('SECRESACCESS') == access_level:
return True
return False
interval = constants.DEFAULT_REPLICA_WAIT_INTERVAL
timeout = constants.DEFAULT_REPLICA_WAIT_TIMEOUT
huawei_utils.wait_for_condition(_check_access,
interval,
timeout)
def wait_expect_state(self, replica_id,
running_status, health_status=None,
interval=None, timeout=None):
def _check_state():
info = self.op.get_replica_info(replica_id)
if self.op.is_running_status(running_status, info):
if (not health_status
or self.op.is_health_status(health_status, info)):
return True
return False
if not interval:
interval = constants.DEFAULT_REPLICA_WAIT_INTERVAL
if not timeout:
timeout = constants.DEFAULT_REPLICA_WAIT_TIMEOUT
huawei_utils.wait_for_condition(_check_state, interval, timeout)
def get_replication_driver_data(volume):
if volume.get('replication_driver_data'):
return json.loads(volume['replication_driver_data'])
return {}
def to_string(dict_data):
if dict_data:
return json.dumps(dict_data)
return ''
class ReplicaPairManager(object):
def __init__(self, local_client, conf):
self.local_client = local_client
self.conf = conf
self.replica_device = self.conf.safe_get('replication_device')
if not self.replica_device:
return
# managed_backed_name format: host_name@backend_name#pool_name
self.rmt_backend = self.replica_device[0]['managed_backend_name']
self.rmt_pool = volume_utils.extract_host(self.rmt_backend,
level='pool')
self.target_dev_id = self.replica_device[0]['target_device_id']
self._init_rmt_client()
self.local_op = PairOp(self.local_client)
self.local_driver = ReplicaCommonDriver(self.conf, self.local_op)
self.rmt_op = PairOp(self.rmt_client)
self.rmt_driver = ReplicaCommonDriver(self.conf, self.rmt_op)
self.try_login_remote_array()
def try_login_remote_array(self):
try:
self.rmt_client.login()
except Exception as err:
LOG.warning(_LW('Remote array login failed. Error: %s.'), err)
def try_get_remote_wwn(self):
try:
info = self.rmt_client.get_array_info()
return info.get('wwn')
except Exception as err:
LOG.warning(_LW('Get remote array wwn failed. Error: %s.'), err)
return None
def get_remote_device_by_wwn(self, wwn):
devices = {}
try:
devices = self.local_client.get_remote_devices()
except Exception as err:
LOG.warning(_LW('Get remote devices failed. Error: %s.'), err)
for device in devices:
if device.get('WWN') == wwn:
return device
return {}
def check_remote_available(self):
if not self.replica_device:
return False
# We get device wwn in every check time.
# If remote array changed, we can run normally.
wwn = self.try_get_remote_wwn()
if not wwn:
return False
device = self.get_remote_device_by_wwn(wwn)
# Check remote device is available to use.
# If array type is replication, 'ARRAYTYPE' == '1'.
# If health status is normal, 'HEALTHSTATUS' == '1'.
if (device and device.get('ARRAYTYPE') == '1'
and device.get('HEALTHSTATUS') == '1'
and device.get('RUNNINGSTATUS') == constants.STATUS_RUNNING):
return True
return False
def update_replica_capability(self, stats):
is_rmt_dev_available = self.check_remote_available()
if not is_rmt_dev_available:
if self.replica_device:
LOG.warning(_LW('Remote device is unavailable. '
'Remote backend: %s.'),
self.rmt_backend)
return stats
for pool in stats['pools']:
pool['replication_enabled'] = True
pool['replication_type'] = ['sync', 'async']
return stats
def _init_rmt_client(self):
# Multiple addresses support.
rmt_addrs = self.replica_device[0]['san_address'].split(';')
rmt_addrs = list(set([x.strip() for x in rmt_addrs if x.strip()]))
rmt_user = self.replica_device[0]['san_user']
rmt_password = self.replica_device[0]['san_password']
self.rmt_client = rest_client.RestClient(self.conf,
rmt_addrs,
rmt_user,
rmt_password)
def get_rmt_dev_info(self):
wwn = self.try_get_remote_wwn()
if not wwn:
return None, None
device = self.get_remote_device_by_wwn(wwn)
if not device:
return None, None
return device.get('ID'), device.get('NAME')
def build_rmt_lun_params(self, local_lun_info):
params = {
'TYPE': '11',
'NAME': local_lun_info['NAME'],
'PARENTTYPE': '216',
'PARENTID': self.rmt_client.get_pool_id(self.rmt_pool),
'DESCRIPTION': local_lun_info['DESCRIPTION'],
'ALLOCTYPE': local_lun_info['ALLOCTYPE'],
'CAPACITY': local_lun_info['CAPACITY'],
'WRITEPOLICY': self.conf.lun_write_type,
'MIRRORPOLICY': self.conf.lun_mirror_switch,
'PREFETCHPOLICY': self.conf.lun_prefetch_type,
'PREFETCHVALUE': self.conf.lun_prefetch_value,
'DATATRANSFERPOLICY': self.conf.lun_policy,
'READCACHEPOLICY': self.conf.lun_read_cache_policy,
'WRITECACHEPOLICY': self.conf.lun_write_cache_policy,
}
LOG.debug('Remote lun params: %s.', params)
return params
def wait_volume_online(self, client, lun_info,
interval=None, timeout=None):
online_status = constants.STATUS_VOLUME_READY
if lun_info.get('RUNNINGSTATUS') == online_status:
return
lun_id = lun_info['ID']
def _wait_online():
info = client.get_lun_info(lun_id)
return info.get('RUNNINGSTATUS') == online_status
if not interval:
interval = constants.DEFAULT_REPLICA_WAIT_INTERVAL
if not timeout:
timeout = constants.DEFAULT_REPLICA_WAIT_TIMEOUT
huawei_utils.wait_for_condition(_wait_online,
interval,
timeout)
def create_rmt_lun(self, local_lun_info):
# Create on rmt array. If failed, raise exception.
lun_params = self.build_rmt_lun_params(local_lun_info)
lun_info = self.rmt_client.create_lun(lun_params)
try:
self.wait_volume_online(self.rmt_client, lun_info)
except exception.VolumeBackendAPIException:
with excutils.save_and_reraise_exception():
self.rmt_client.delete_lun(lun_info['ID'])
return lun_info
def create_replica(self, local_lun_info, replica_model):
"""Create remote LUN and replication pair.
Purpose:
1. create remote lun
2. create replication pair
3. enable replication pair
"""
LOG.debug(('Create replication, local lun info: %(info)s, '
'replication model: %(model)s.'),
{'info': local_lun_info, 'model': replica_model})
local_lun_id = local_lun_info['ID']
self.wait_volume_online(self.local_client, local_lun_info)
# step1, create remote lun
rmt_lun_info = self.create_rmt_lun(local_lun_info)
rmt_lun_id = rmt_lun_info['ID']
# step2, get remote device info
rmt_dev_id, rmt_dev_name = self.get_rmt_dev_info()
if not rmt_lun_id or not rmt_dev_name:
self._delete_rmt_lun(rmt_lun_id)
msg = _('Get remote device info failed.')
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
# step3, create replication pair
try:
pair_info = self.local_op.create(local_lun_id,
rmt_lun_id, rmt_dev_id,
rmt_dev_name, replica_model)
pair_id = pair_info['ID']
except Exception as err:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Create pair failed. Error: %s.'), err)
self._delete_rmt_lun(rmt_lun_id)
# step4, start sync manually. If replication type is sync,
# then wait for sync complete.
wait_complete = (replica_model == constants.REPLICA_SYNC_MODEL)
try:
self.local_driver.sync(pair_id, wait_complete)
except Exception as err:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Start synchronization failed. Error: %s.'), err)
self._delete_pair(pair_id)
self._delete_rmt_lun(rmt_lun_id)
model_update = {}
driver_data = {'pair_id': pair_id,
'rmt_lun_id': rmt_lun_id}
model_update['replication_driver_data'] = to_string(driver_data)
model_update['replication_status'] = 'enabled'
LOG.debug('Create replication, return info: %s.', model_update)
return model_update
def _delete_pair(self, pair_id):
if (not pair_id
or not self.local_client.check_pair_exist(pair_id)):
return
self.local_driver.split(pair_id)
self.local_op.delete(pair_id)
def _delete_rmt_lun(self, lun_id):
if lun_id and self.rmt_client.check_lun_exist(lun_id):
self.rmt_client.delete_lun(lun_id)
def delete_replica(self, volume):
"""Delete replication pair and remote lun.
Purpose:
1. delete replication pair
2. delete remote_lun
"""
LOG.debug('Delete replication, volume: %s.', volume['id'])
info = get_replication_driver_data(volume)
pair_id = info.get('pair_id')
if pair_id:
self._delete_pair(pair_id)
# Delete remote_lun
rmt_lun_id = info.get('rmt_lun_id')
if rmt_lun_id:
self._delete_rmt_lun(rmt_lun_id)
def enable_replica(self, volume):
"""Enable replication.
Purpose:
1. If local backend's array is secondary, switch to primary
2. Synchronize data
"""
LOG.debug('Enable replication, volume: %s.', volume['id'])
info = get_replication_driver_data(volume)
pair_id = info.get('pair_id')
if not pair_id:
msg = _('No pair id in volume replication_driver_data.')
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
info = self.local_op.get_replica_info(pair_id)
if not info:
msg = _('Pair does not exist on array. Pair id: %s.') % pair_id
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
wait_sync_complete = False
if info.get('REPLICATIONMODEL') == constants.REPLICA_SYNC_MODEL:
wait_sync_complete = True
return self.local_driver.enable(pair_id, wait_sync_complete)
def disable_replica(self, volume):
"""We consider that all abnormal states is disabled."""
LOG.debug('Disable replication, volume: %s.', volume['id'])
info = get_replication_driver_data(volume)
pair_id = info.get('pair_id')
if not pair_id:
LOG.warning(_LW('No pair id in volume replication_driver_data.'))
return None
return self.local_driver.disable(pair_id)
def failover_replica(self, volume):
"""Just make the secondary available."""
LOG.debug('Failover replication, volume: %s.', volume['id'])
info = get_replication_driver_data(volume)
pair_id = info.get('pair_id')
if not pair_id:
msg = _('No pair id in volume replication_driver_data.')
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
rmt_lun_id = info.get('rmt_lun_id')
if not rmt_lun_id:
msg = _('No remote LUN id in volume replication_driver_data.')
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
# Remote array must be available. So we can get the real pool info.
lun_info = self.rmt_client.get_lun_info(rmt_lun_id)
lun_wwn = lun_info.get('WWN')
lun_pool = lun_info.get('PARENTNAME')
new_backend = re.sub(r'(?<=#).*$', lun_pool, self.rmt_backend)
self.rmt_driver.failover(pair_id)
metadata = huawei_utils.get_volume_metadata(volume)
metadata.update({'lun_wwn': lun_wwn})
new_driver_data = {'pair_id': pair_id,
'rmt_lun_id': volume['provider_location']}
new_driver_data = to_string(new_driver_data)
return {'host': new_backend,
'provider_location': rmt_lun_id,
'replication_driver_data': new_driver_data,
'metadata': metadata}
def list_replica_targets(self, volume):
info = get_replication_driver_data(volume)
if not info:
LOG.warning(_LW('Replication driver data does not exist. '
'Volume: %s'), volume['id'])
targets = [{'target_device_id': self.target_dev_id}]
return {'volume_id': volume['id'],
'targets': targets}
def get_replication_opts(opts):
if opts.get('replication_type') == 'sync':
opts['replication_type'] = constants.REPLICA_SYNC_MODEL
else:
opts['replication_type'] = constants.REPLICA_ASYNC_MODEL
return opts
|
|
# Copyright 2017 Brandon T. Gorman
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# BUILT USING PYTHON 3.6.0
import ctypes as ct
import pandas as pd
import numpy as np
import random, csv, sys, os
import math
import classes_water as ENC
import classes_power as ODC
import classes_interconnection as ICC
import grb_solvers
from comtypes import automation
import win32com.client
# from win32com.client import makepy
# import sys
# sys.argv = ['makepy', 'OpenDSSEngine.DSS']
# makepy.main()
def main(dss_debug, write_cols):
os_username = os.getlogin()
# --------------
# READ CSV FILES
# --------------
csv_xycurve = pd.read_csv('./data_power/network-power/1000xycurve.csv', sep=',', header=1, index_col=None, dtype=np.float64)
csv_regcontrol = pd.read_csv('./data_power/network-power/1100regcontrol.csv', sep=',', header=1, index_col=None, dtype=np.float64)
csv_wiredata = pd.read_csv('./data_power/network-power/1200wiredata.csv', sep=',', header=1, index_col=None, dtype=np.float64)
csv_linecode = pd.read_csv('./data_power/network-power/1201linecode.csv', sep=',', header=1, index_col=None, dtype=np.float64)
csv_bus = pd.read_csv('./data_power/network-power/1300bus.csv', sep=',', header=1, index_col=None, dtype=np.float64)
csv_vsource = pd.read_csv('./data_power/network-power/1301vsource.csv', sep=',', header=1, index_col=None, dtype=np.float64)
# this uses a custom generator data file
csv_generator = pd.read_csv('./data_power/network-power/1302generator_ptdf.csv', sep=',', header=1, index_col=None, dtype=np.float64)
csv_load = pd.read_csv('./data_power/network-power/1303load.csv', sep=',', header=1, index_col=None, dtype=np.float64)
csv_solarpv = pd.read_csv('./data_power/network-power/1304solarpv.csv', sep=',', header=1, index_col=None, dtype=np.float64)
csv_windturbine = pd.read_csv('./data_power/network-power/1305windturbine.csv', sep=',', header=1, index_col=None, dtype=np.float64)
csv_directconnection = pd.read_csv('./data_power/network-power/1400directconnection.csv', sep=',', header=1, index_col=None, dtype=np.float64)
csv_cable = pd.read_csv('./data_power/network-power/1401cable.csv', sep=',', header=1, index_col=None, dtype=np.float64)
csv_overheadline = pd.read_csv('./data_power/network-power/1402overheadline.csv', sep=',', header=1, index_col=None, dtype=np.float64)
csv_twowindingtransformer = pd.read_csv('./data_power/network-power/1403twowindingtransformer.csv', sep=',', header=1, index_col=None, dtype=np.float64)
csv_capacitor = pd.read_csv('./data_power/network-power/1404capacitor.csv', sep=',', header=1, index_col=None, dtype=np.float64)
csv_reactor = pd.read_csv('./data_power/network-power/1405reactor.csv', sep=',', header=1, index_col=None, dtype=np.float64)
csv_pumpload = pd.read_csv('./data_interconnection/network-interconnection/9000pump-load.csv', sep=',', header=1, index_col=None, dtype=np.float64)
csv_tankgenerator = pd.read_csv('./data_interconnection/network-interconnection/9001tank-generator.csv', sep=',', header=1, index_col=None, dtype=np.float64)
# -----------------
# CREATE COMPONENTS
# -----------------
object_xycurve = ODC.XYCurve(csv_xycurve)
object_regcontrol = ODC.RegControl(csv_regcontrol)
object_wiredata = ODC.WireData(csv_wiredata)
object_linecode = ODC.LineCode(csv_linecode)
object_bus = ODC.Bus(csv_bus)
object_vsource = ODC.VSource(csv_vsource)
object_generator = ODC.Generator(csv_generator)
object_load = ODC.Load(csv_load)
object_solarpv = ODC.SolarPV(csv_solarpv)
object_windturbine = ODC.WindTurbine(csv_windturbine, object_xycurve)
object_directconnection = ODC.DirectConnection(csv_directconnection)
object_cable = ODC.Cable(csv_cable)
object_overheadline = ODC.OverheadLine(csv_overheadline)
object_twowindingtransformer = ODC.TwoWindingTransformer(csv_twowindingtransformer)
object_capacitor = ODC.Capacitor(csv_capacitor)
object_reactor = ODC.Reactor(csv_reactor)
object_pumpload = ICC.PumpLoad(csv_pumpload)
object_tankgenerator = ICC.TankGenerator(csv_tankgenerator)
# -----------------------
# ADD COMPONENTS TO LISTS
# -----------------------
object_list = [object_vsource, object_bus, object_generator, object_load, object_solarpv, object_windturbine, #NODES
object_xycurve, object_wiredata, object_linecode, #OTHERS
object_directconnection, object_cable, object_overheadline, object_twowindingtransformer, object_capacitor, object_reactor, # CONNECTIONS
object_regcontrol] # CONTROLS
interconn_dict = {'pumpload': object_pumpload, 'tankgenerator': object_tankgenerator,
'load': object_load,
'generator': object_generator}
# ---------
# RUN OPENDSS
# ---------
def run_OpenDSS(dss_debug, solverFlag):
# SET SOURCEBUS
# VsourceClass.sourcebus = vsourceobj.id[1]
dssObj = win32com.client.Dispatch('OpenDSSEngine.DSS') # OPENDSS COMPORT
dssObj.AllowForms = False
dssText = dssObj.Text
dssCkt = dssObj.ActiveCircuit
dssSolution = dssCkt.Solution
dssActvElem = dssCkt.ActiveCktElement
dssActvBus = dssCkt.ActiveBus
dssText.Command = 'Clear'
dssText.Command = 'Set DataPath=\'C:\\Users\\'+os_username+'\\Documents\\OpenDSS'
dssText.Command = 'Set DefaultBaseFrequency=60'
for object in object_list:
object.createAllDSS(dssText, interconn_dict, dss_debug)
set_voltagebase = set()
for object in object_list:
set_voltagebase = set_voltagebase | object.voltagesToSets()
dssText.Command = 'Set VoltageBases={}'.format(list(set_voltagebase))
dssText.Command = 'CalcVoltageBases'
dssText.Command = 'Solve BaseFrequency=60 MaxIter=300'
variant_buses = automation.VARIANT()
variant_voltages_mag = automation.VARIANT()
variant_voltages_pu = automation.VARIANT()
variant_currents = automation.VARIANT()
variant_powers = automation.VARIANT()
for object in object_list:
object.readAllDSSOutputs(dssCkt, dssActvElem, dssActvBus, variant_buses, variant_voltages_mag, variant_voltages_pu, variant_currents, variant_powers)
if solverFlag == False:
# dssText.Command = 'Save Circuit'
# dssText.Command = 'Export Summary (summary.csv)'
# dssText.Command = 'Export Currents (currents.csv)'
# dssText.Command = 'Export Voltages (voltages.csv)'
# dssText.Command = 'Export Overloads (overloads.csv)'
# dssText.Command = 'Export Powers kVA (powers.csv)'
input_list_continuous = []
input_list_categorical = []
input_tensor_continuous = np.empty([0,0], dtype=np.float64).flatten()
input_tensor_categorical = np.empty([0,0], dtype=np.float64).flatten()
for object in object_list:
list_continuous, list_categorical, tensor_continuous, tensor_categorical = object.convertToInputTensor()
input_list_continuous = input_list_continuous + list_continuous
input_list_categorical = input_list_categorical + list_categorical
input_tensor_continuous = np.concatenate((input_tensor_continuous, tensor_continuous), axis=0)
input_tensor_categorical = np.concatenate((input_tensor_categorical, tensor_categorical), axis=0)
output_list = []
output_tensor = np.empty([0,0], dtype=np.float64).flatten()
for object in object_list:
o_list, o_tensor = object.convertToOutputTensor()
output_list = output_list + o_list
output_tensor = np.concatenate((output_tensor, o_tensor), axis=0)
return input_list_continuous, input_list_categorical, output_list, input_tensor_continuous, input_tensor_categorical, output_tensor
else:
losses = dssCkt.Losses
return float(losses[0])*0.001 # kW
# SIM STEP 1: SET LOAD CURVES
# ------------------------------
# ensures power factor of 0.95
power_factor_factor = (math.sqrt(1.0**2 - 0.95**2) / 0.95)
real_load_factor = 0.5
reactive_load_factor = 0.0
object_load.multiplyLoadFactor(real_load_factor, power_factor_factor)
# SIM STEP 2: SET GENERATOR DISPATCH
# ----------------------------------
object_generator.matrix[:, ODC.Generator.REAL_GENERATION] = real_load_factor * object_generator.matrix[:, ODC.Generator.REAL_GENERATION_MAX_RATING]
# almost zeros out reactive power in the network
object_generator.matrix[:, ODC.Generator.REACTIVE_GENERATION] = object_generator.matrix[:, ODC.Generator.REAL_GENERATION] * power_factor_factor * 0.8467
# SIM STEP 3: RUN POWER-WATER SIMULATION
# --------------------------------------
lodf_tab = pd.DataFrame.from_csv('tables\lodf.csv', header=0, index_col=0)
ptdf_tab = pd.DataFrame.from_csv('tables\ptdf.csv', header=0, index_col=0)
ptdf_gen_id_check = 323.0
for gen in object_generator.matrix:
if gen[ODC.Generator.ID] == ptdf_gen_id_check:
gen[ODC.Generator.REAL_GENERATION] = gen[ODC.Generator.REAL_GENERATION_MAX_RATING]
run_OpenDSS(dss_debug, False)
branch_id_base = object_cable.matrix[:, ODC.Cable.ID]
branch_real_power_base = np.array(0.5*(object_cable.matrix[:, ODC.Cable.REAL_POWER_2] - object_cable.matrix[:, ODC.Cable.REAL_POWER_1]), copy=True)
est_branch_power_base = 0.5 * (object_cable.matrix[1-1, ODC.Cable.REAL_POWER_2] - object_cable.matrix[1-1, ODC.Cable.REAL_POWER_1])
countcount = 0
ptdf_estimator = np.array(object_cable.matrix[:, ODC.Cable.ID], copy=True)*0
for gen in object_generator.matrix:
if gen[ODC.Generator.ID] == ptdf_gen_id_check:
delta = -13330. #0.3 * gen[ODC.Generator.REAL_GENERATION_MAX_RATING]
print('projected delta is', delta)
print('actual delta is', max(gen[ODC.Generator.REAL_GENERATION_MIN_RATING]-gen[ODC.Generator.REAL_GENERATION], delta))
object_generator.matrix[:, ODC.Generator.REAL_GENERATION] = real_load_factor * object_generator.matrix[:, ODC.Generator.REAL_GENERATION_MAX_RATING]
gen[ODC.Generator.REAL_GENERATION] = gen[ODC.Generator.REAL_GENERATION_MAX_RATING]
gen[ODC.Generator.REAL_GENERATION] += delta
run_OpenDSS(dss_debug, False)
branch_real_power = np.array(0.5*(object_cable.matrix[:, ODC.Cable.REAL_POWER_2] - object_cable.matrix[:, ODC.Cable.REAL_POWER_1]), copy=True)
branch_delta = branch_real_power - branch_real_power_base
ptdf_estimator = np.array(ptdf_tab.loc[int(gen[ODC.Generator.ID])])
est_branch_power = 0.5 * (object_cable.matrix[1-1, ODC.Cable.REAL_POWER_2] - object_cable.matrix[1-1, ODC.Cable.REAL_POWER_1])
if countcount >= 0:
print(gen[ODC.Generator.ID])
print(max(np.absolute(branch_delta - delta*ptdf_estimator)))
print('branch 1 base', est_branch_power_base)
print('branch 1 new', est_branch_power)
print('branch 1 error', est_branch_power - (est_branch_power_base + delta*ptdf_tab.loc[int(ptdf_gen_id_check)]['1']))
print('')
countcount += 1
# countcount = 0
# lodf_estimator = np.array(object_cable.matrix[:, ODC.Cable.ID], copy=True)*0
# for cable in object_cable.matrix:
# if cable[ODC.Cable.ID] != 100.0:
# object_cable.matrix[:, ODC.Cable.OPERATIONAL_STATUS_A] = 1.0
# cable[ODC.Cable.OPERATIONAL_STATUS_A] = 0.0
# for iditer in range(0, len(branch_id_base)):
# if cable[ODC.Cable.ID] == branch_id_base[iditer]:
# delta = math.fabs(branch_real_power_base[iditer])
# run_OpenDSS(dss_debug, False)
# branch_real_power = np.array(0.5*(object_cable.matrix[:, ODC.Cable.REAL_POWER_2] - object_cable.matrix[:, ODC.Cable.REAL_POWER_1]), copy=True)
# branch_delta = branch_real_power - branch_real_power_base
# lodf_estimator = np.array(lodf_tab.loc[int(cable[ODC.Cable.ID])])
# if countcount >= 0:
# print(max(np.absolute(branch_delta - delta*lodf_estimator)))
# print(max((branch_delta - delta*lodf_estimator)/(0.5* branch_real_power+branch_real_power_base)))
# print(min((branch_delta - delta*lodf_estimator)/(0.5* branch_real_power+branch_real_power_base)))
# print('')
# countcount +=1
# countcount = 0
# delta_percent = 0.5
# lodf_estimator = np.array(object_cable.matrix[:, ODC.Cable.ID], copy=True)*0
# ptdf_estimator = np.array(object_cable.matrix[:, ODC.Cable.ID], copy=True)*0
# for cable in object_cable.matrix:
# if cable[ODC.Cable.ID] != 100.0:
# object_generator.matrix[:, ODC.Generator.REAL_GENERATION] = real_load_factor * object_generator.matrix[:, ODC.Generator.REAL_GENERATION_MAX_RATING]
# object_cable.matrix[:, ODC.Cable.OPERATIONAL_STATUS_A] = 1.0
# cable[ODC.Cable.OPERATIONAL_STATUS_A] = 0.0
# run_OpenDSS(dss_debug, False)
# branch_real_power_line_base = np.array(0.5*(object_cable.matrix[:, ODC.Cable.REAL_POWER_2] - object_cable.matrix[:, ODC.Cable.REAL_POWER_1]), copy=True)
# print(cable[ODC.Cable.ID])
# maxerror = 0.0
# for gen in object_generator.matrix:
# delta_kw = delta_percent * gen[ODC.Generator.REAL_GENERATION_MAX_RATING]
# object_generator.matrix[:, ODC.Generator.REAL_GENERATION] = real_load_factor * object_generator.matrix[:, ODC.Generator.REAL_GENERATION_MAX_RATING]
# gen[ODC.Generator.REAL_GENERATION] += delta_kw
# run_OpenDSS(dss_debug, False)
# branch_real_power = np.array(0.5*(object_cable.matrix[:, ODC.Cable.REAL_POWER_2] - object_cable.matrix[:, ODC.Cable.REAL_POWER_1]), copy=True)
# branch_delta = branch_real_power - branch_real_power_line_base
# ptdf_estimator = np.array(ptdf_tab.loc[int(gen[ODC.Generator.ID])])
# lodf_estimator = np.array(lodf_tab.loc[int(cable[ODC.Cable.ID])])
# lodf_val = - lodf_tab.loc[int(cable[ODC.Cable.ID])][str(int(cable[ODC.Cable.ID]))]
# otdf_val = ptdf_tab.loc[int(gen[ODC.Generator.ID])][str(int(cable[ODC.Cable.ID]))]
# if countcount >= 0:
# maxerror = max(maxerror, max(np.absolute(branch_delta - delta_kw*(ptdf_estimator + lodf_val*lodf_estimator*otdf_val))))
# countcount += 1
# print(maxerror)
# print('')
# for b in range(0, branches):
# for g in range(0, generators):
# otdf[g, b] = ptdf_tab.loc[float(g)][str(b)] + lodf_tab.loc[float(o)][str(b)] * ptdf_tab[float(g)][str(o)]
###############################
# END
# ---
if __name__ == '__main__':
write_cols = False # Write column names to seperate file
dss_debug = 0
main(dss_debug, write_cols)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.